Esempio n. 1
0
 def insert_docs(self, num_of_docs, prefix, extra_values={}, wait_for_persistence=True, return_docs=False):
     rest = RestConnection(self.master)
     smart = VBucketAwareMemcached(rest, self.bucket)
     doc_names = []
     for i in range(0, num_of_docs):
         key = doc_name = "{0}-{1}".format(prefix, i)
         geom = {"type": "Point", "coordinates": [random.randrange(-180, 180), random.randrange(-90, 90)]}
         value = {"name": doc_name, "age": random.randrange(1, 1000), "geometry": geom}
         value.update(extra_values)
         if not return_docs:
             doc_names.append(doc_name)
         else:
             doc_names.append(value)
         # loop till value is set
         fail_count = 0
         while True:
             try:
                 smart.set(key, 0, 0, json.dumps(value))
                 break
             except MemcachedError as e:
                 fail_count += 1
                 if (e.status == 133 or e.status == 132) and fail_count < 60:
                     if i == 0:
                         self.log.error("waiting 5 seconds. error {0}".format(e))
                         time.sleep(5)
                     else:
                         self.log.error(e)
                         time.sleep(1)
                 else:
                     raise e
     if wait_for_persistence:
         self.wait_for_persistence()
     self.log.info("inserted {0} json documents".format(num_of_docs))
     return doc_names
Esempio n. 2
0
    def key_not_exists_test(self):
        self.assertTrue(len(self.buckets) > 0, 'at least 1 bucket required')
        bucket = self.buckets[0].name
        client = VBucketAwareMemcached(RestConnection(self.master), bucket)
        KEY_NAME = 'key'

        for i in range(1500):
            client.set(KEY_NAME, 0, 0, "x")
            # delete and verify get fails
            client.delete(KEY_NAME)
            err = None
            try:
                rc = client.get(KEY_NAME)
            except MemcachedError as error:
                # It is expected to raise MemcachedError because the key is deleted.
                err = error.status
            self.assertTrue(err == ERR_NOT_FOUND,
                            'expected key to be deleted {0}'.format(KEY_NAME))

            #cas errors do not sleep the test for 10 seconds, plus we need to check that the correct
            #error is being thrown
            err = None
            try:
                #For some reason replace instead of cas would not reproduce the bug
                mc_active = client.memcached(KEY_NAME)
                mc_active.replace(KEY_NAME, 0, 10, "value")
            except MemcachedError as error:
                err = error.status
            self.assertTrue(
                err == ERR_NOT_FOUND,
                'was able to replace cas on removed key {0}'.format(KEY_NAME))
Esempio n. 3
0
    def key_not_exists_test(self):
        self.assertTrue(len(self.buckets) > 0, 'at least 1 bucket required')
        bucket = self.buckets[0].name
        client = VBucketAwareMemcached(RestConnection(self.master), bucket)
        KEY_NAME = 'key'

        for i in range(1500):
            client.set(KEY_NAME, 0, 0, "x")
            # delete and verify get fails
            client.delete(KEY_NAME)
            err = None
            try:
                rc = client.get(KEY_NAME)
            except MemcachedError as error:
                 # It is expected to raise MemcachedError because the key is deleted.
                 err = error.status
            self.assertTrue(err == ERR_NOT_FOUND, 'expected key to be deleted {0}'.format(KEY_NAME))

            #cas errors do not sleep the test for 10 seconds, plus we need to check that the correct
            #error is being thrown
            err = None
            try:
                #For some reason replace instead of cas would not reproduce the bug
                mc_active = client.memcached(KEY_NAME)
                mc_active.replace(KEY_NAME, 0, 10, "value")
            except MemcachedError as error:
                err = error.status
            self.assertTrue(err == ERR_NOT_FOUND, 'was able to replace cas on removed key {0}'.format(KEY_NAME))
    def test_CASnotzero(self):
        # MB-31149
        # observe.observeseqnotests.ObserveSeqNoTests.test_CASnotzero
        # set value, append and check CAS value
        self.log.info('Starting test_CASnotzero')

        # without hello(mutationseqencenumber)
        client = VBucketAwareMemcached(RestConnection(self.master), 'default')
        KEY_NAME = "test1key"
        client.set(KEY_NAME, 0, 0, json.dumps({'value': 'value2'}))
        client.generic_request(
            client.memcached(KEY_NAME).append, 'test1key', 'appended data')
        get_meta_resp = client.generic_request(
            client.memcached(KEY_NAME).getMeta, 'test1key')
        self.log.info(
            'the CAS value without hello(mutationseqencenumber): {} '.format(
                get_meta_resp[4]))
        self.assertNotEqual(get_meta_resp[4], 0)

        # with hello(mutationseqencenumber)
        KEY_NAME = "test2key"
        client.set(KEY_NAME, 0, 0, json.dumps({'value': 'value1'}))
        h = client.sendHellos(
            memcacheConstants.PROTOCOL_BINARY_FEATURE_MUTATION_SEQNO)
        client.generic_request(
            client.memcached(KEY_NAME).append, 'test2key', 'appended data456')

        get_meta_resp = client.generic_request(
            client.memcached(KEY_NAME).getMeta, 'test2key')
        self.log.info(
            'the CAS value with hello(mutationseqencenumber): {} '.format(
                get_meta_resp[4]))
        self.assertNotEqual(get_meta_resp[4], 0)
Esempio n. 5
0
 def insert_docs(self,
                 num_of_docs,
                 prefix='doc',
                 extra_values={},
                 return_docs=False,
                 scope=None,
                 collection=None):
     random.seed(12345)
     rest = RestConnection(self.master)
     smart = VBucketAwareMemcached(rest, self.bucket)
     doc_names = []
     for i in range(0, num_of_docs):
         key = doc_name = "{0}-{1}".format(prefix, i)
         geom = {
             "type":
             "Point",
             "coordinates":
             [random.randrange(-180, 180),
              random.randrange(-90, 90)]
         }
         value = {
             "name": doc_name,
             "age": random.randrange(1, 1000),
             "geometry": geom,
             "height": random.randrange(1, 13000),
             "bloom": random.randrange(1, 6),
             "shed_leaves": random.randrange(6, 13)
         }
         value.update(extra_values)
         if not return_docs:
             doc_names.append(doc_name)
         else:
             doc_names.append(value)
         # loop till value is set
         fail_count = 0
         while True:
             try:
                 smart.set(key,
                           0,
                           0,
                           json.dumps(value),
                           scope=scope,
                           collection=collection)
                 break
             except MemcachedError as e:
                 fail_count += 1
                 if (e.status == 133 or e.status == 132
                         or e.status == 134) and fail_count < 60:
                     if i == 0:
                         self.log.error(
                             "waiting 5 seconds. error {0}".format(e))
                         time.sleep(5)
                     else:
                         self.log.error(e)
                         time.sleep(1)
                 else:
                     raise e
     self.log.info("Inserted {0} json documents".format(num_of_docs))
     return doc_names
Esempio n. 6
0
    def insert_nested_docs(self,
                           num_of_docs,
                           prefix='doc',
                           levels=16,
                           size=512,
                           return_docs=False,
                           long_path=False,
                           scope=None,
                           collection=None):
        rest = RestConnection(self.master)
        smart = VBucketAwareMemcached(rest, self.bucket)
        doc_names = []

        dict = {'doc': {}, 'levels': levels}

        for i in range(0, num_of_docs):
            key = doc_name = "{0}-{1}".format(prefix, i)
            if long_path:
                self._createNestedJson_longPath(key, dict)
            else:
                self._createNestedJson(key, dict)
            value = dict['doc']
            if not return_docs:
                doc_names.append(doc_name)
            else:
                doc_names.append(value)
                # loop till value is set
            fail_count = 0
            while True:
                try:
                    smart.set(key,
                              0,
                              0,
                              json.dumps(value),
                              scope=scope,
                              collection=collection)
                    break
                except MemcachedError as e:
                    fail_count += 1
                    if (e.status == 133
                            or e.status == 132) and fail_count < 60:
                        if i == 0:
                            self.log.error(
                                "waiting 5 seconds. error {0}".format(e))
                            time.sleep(5)
                        else:
                            self.log.error(e)
                            time.sleep(1)
                    else:
                        raise e
        self.log.info("Inserted {0} json documents".format(num_of_docs))
        return doc_names
Esempio n. 7
0
 def insert_docs(self,
                 num_of_docs,
                 prefix,
                 extra_values={},
                 wait_for_persistence=True,
                 return_docs=False):
     rest = RestConnection(self.master)
     smart = VBucketAwareMemcached(rest, self.bucket)
     doc_names = []
     for i in range(0, num_of_docs):
         key = doc_name = "{0}-{1}".format(prefix, i)
         geom = {
             "type":
             "Point",
             "coordinates":
             [random.randrange(-180, 180),
              random.randrange(-90, 90)]
         }
         value = {
             "name": doc_name,
             "age": random.randrange(1, 1000),
             "geometry": geom
         }
         value.update(extra_values)
         if not return_docs:
             doc_names.append(doc_name)
         else:
             doc_names.append(value)
         # loop till value is set
         fail_count = 0
         while True:
             try:
                 smart.set(key, 0, 0, json.dumps(value))
                 break
             except MemcachedError as e:
                 fail_count += 1
                 if (e.status == 133
                         or e.status == 132) and fail_count < 60:
                     if i == 0:
                         self.log.error(
                             "waiting 5 seconds. error {0}".format(e))
                         time.sleep(5)
                     else:
                         self.log.error(e)
                         time.sleep(1)
                 else:
                     raise e
     if wait_for_persistence:
         self.wait_for_persistence()
     self.log.info("inserted {0} json documents".format(num_of_docs))
     return doc_names
Esempio n. 8
0
    def _test_view_on_multiple_docs(self, num_docs, params={"stale":"update_after"}, delay=10):
        self.log.info("description : create a view on {0} documents".format(num_docs))
        master = self.servers[0]
        rest = RestConnection(master)
        bucket = "default"
        view_name = "dev_test_view_on_{1}_docs-{0}".format(str(uuid.uuid4())[:7], self.num_docs)
        map_fn = "function (doc) {if(doc.name.indexOf(\"" + view_name + "\") != -1) { emit(doc.name, doc);}}"
        rest.create_view(view_name, bucket, [View(view_name, map_fn, dev_view=False)])
        self.created_views[view_name] = bucket
        rest = RestConnection(self.servers[0])
        smart = VBucketAwareMemcached(rest, bucket)
        doc_names = []
        prefix = str(uuid.uuid4())[:7]
        total_time = 0
        self.log.info("inserting {0} json objects".format(num_docs))
        for i in range(0, num_docs):
            key = doc_name = "{0}-{1}-{2}".format(view_name, prefix, i)
            doc_names.append(doc_name)
            value = {"name": doc_name, "age": 1000}
            smart.set(key, 0, 0, json.dumps(value))
        self.log.info("inserted {0} json documents".format(len(doc_names)))
        time.sleep(10)
        results = ViewBaseTests._get_view_results(self, rest, bucket, view_name, len(doc_names), extra_params=params)
        view_time = results['view_time']

        keys = ViewBaseTests._get_keys(self, results)

        RebalanceHelper.wait_for_persistence(master, bucket, 0)

        total_time = view_time
        # Keep trying this for maximum 5 minutes
        start_time = time.time()
        # increase timeout to 600 seconds for windows testing
        while (len(keys) != len(doc_names)) and (time.time() - start_time < 900):
            msg = "view returned {0} items , expected to return {1} items"
            self.log.info(msg.format(len(keys), len(doc_names)))
            self.log.info("trying again in {0} seconds".format(delay))
            time.sleep(delay)
            results = ViewBaseTests._get_view_results(self, rest, bucket, view_name, len(doc_names), extra_params=params)
            view_time = results['view_time']
            total_time += view_time
            keys = ViewBaseTests._get_keys(self, results)

        self.log.info("View time: {0} secs".format(total_time))

        # Only if the lengths are not equal, look for missing keys
        if len(keys) != len(doc_names):
            not_found = list(set(doc_names) - set(keys))
            ViewBaseTests._print_keys_not_found(self, not_found, 10)
            self.fail("map function did not return docs for {0} keys".format(len(not_found)))
Esempio n. 9
0
    def load_docs(self, node, num_docs, bucket = 'default', password = '',
                  exp = 0, flags = 0):

        client = VBucketAwareMemcached(RestConnection(node), bucket)
        for i in range(num_docs):
            key = "key%s"%i
            rc = client.set(key, 0, 0, "value")
Esempio n. 10
0
    def _load_docs(self, num_docs, prefix, verify=True, bucket='default', expire=0, flag=0):
        master = self.servers[0]
        rest = RestConnection(master)
        smart = VBucketAwareMemcached(rest, bucket)
        doc_names = []
        for i in range(0, num_docs):
            key = doc_name = "{0}-{1}".format(prefix, i)
            doc_names.append(doc_name)
            value = {"name": doc_name, "age": i}
            smart.set(key, expire, flag, json.dumps(value))
            # loop till value is set
        RebalanceHelper.wait_for_persistence(master, bucket)
        self.log.info("inserted {0} json documents".format(num_docs))
        if verify:
            ViewBaseTests._verify_keys(self, doc_names, prefix)

        return doc_names
Esempio n. 11
0
 def insert_docs(self, num_of_docs, prefix='doc', extra_values={},
                 return_docs=False):
     random.seed(12345)
     rest = RestConnection(self.master)
     smart = VBucketAwareMemcached(rest, self.bucket)
     doc_names = []
     for i in range(0, num_of_docs):
         key = doc_name = "{0}-{1}".format(prefix, i)
         geom = {"type": "Point", "coordinates":
             [random.randrange(-180, 180),
              random.randrange(-90, 90)]}
         value = {
             "name": doc_name,
             "age": random.randrange(1, 1000),
             "geometry": geom,
             "array" :[0,1,2,3,4,5,6,7,8,9,20],
             "isDict" : True,
             "dict_value" : {"name":"abc", "age":1},
             "height": random.randrange(1, 13000),
             "bloom": random.randrange(1, 6),
             "shed_leaves": random.randrange(6, 13)}
         value.update(extra_values)
         if not return_docs:
             doc_names.append(doc_name)
         else:
             doc_names.append(value)
             # loop till value is set
         fail_count = 0
         while True:
             try:
                 smart.set(key, 0, 0, json.dumps(value))
                 break
             except MemcachedError as e:
                 fail_count += 1
                 if (e.status == 133 or e.status == 132) and fail_count < 60:
                     if i == 0:
                         self.log.error("waiting 5 seconds. error {0}"
                         .format(e))
                         time.sleep(5)
                     else:
                         self.log.error(e)
                         time.sleep(1)
                 else:
                     raise e
     self.log.info("Inserted {0} json documents".format(num_of_docs))
     return doc_names
Esempio n. 12
0
    def load_docs(self,
                  node,
                  num_docs,
                  bucket='default',
                  password='',
                  exp=0,
                  flags=0):

        client = VBucketAwareMemcached(RestConnection(node), bucket)
        for i in range(num_docs):
            key = "key%s" % i
            rc = client.set(key, 0, 0, "value")
Esempio n. 13
0
 def load(self, path, bucket, prefix='test'):
     client = VBucketAwareMemcached(RestConnection(self.master), bucket)
     for file in os.listdir(path):
         f = open(path + '/' + file, 'r')
         rq_s = f.read()
         f.close()
         rq_json = json.loads(rq_s)
         key = str(file)
         try:
             o, c, d = client.set(key, 0, 0, json.dumps(rq_json))
         except Exception, ex:
             print 'WARN======================='
             print ex
Esempio n. 14
0
    def insert_nested_docs(self, num_of_docs, prefix='doc', levels=16, size=512, return_docs=False, long_path=False):
        rest = RestConnection(self.master)
        smart = VBucketAwareMemcached(rest, self.bucket)
        doc_names = []

        dict = {'doc' : {}, 'levels' : levels }

        for i in range(0, num_of_docs):
            key = doc_name = "{0}-{1}".format(prefix, i)
            if long_path:
                self._createNestedJson_longPath(key, dict)
            else:
                self._createNestedJson(key, dict)
            value = dict['doc']
            if not return_docs:
                doc_names.append(doc_name)
            else:
                doc_names.append(value)
                # loop till value is set
            fail_count = 0
            while True:
                try:
                    smart.set(key, 0, 0, json.dumps(value))
                    break
                except MemcachedError as e:
                    fail_count += 1
                    if (e.status == 133 or e.status == 132) and fail_count < 60:
                        if i == 0:
                            self.log.error("waiting 5 seconds. error {0}"
                            .format(e))
                            time.sleep(5)
                        else:
                            self.log.error(e)
                            time.sleep(1)
                    else:
                        raise e
        self.log.info("Inserted {0} json documents".format(num_of_docs))
        return doc_names
    def test_new_response_fields(self):

        self.log.info('\n\nStarting test_new_response_fields')

        client = VBucketAwareMemcached(RestConnection(self.master), 'default')

        h = client.sendHellos(
            memcacheConstants.PROTOCOL_BINARY_FEATURE_MUTATION_SEQNO)

        set_resp = self.extract_vbucket_uuid_and_seqno(
            client.set('test1key', 0, 0, '123456789'))

        # test the inplace operations
        test = client.generic_request(
            client.memcached('test1key').set, 'test1key', 0, 0,
            'totally new value')
        replace_resp = self.extract_vbucket_uuid_and_seqno(
            client.generic_request(
                client.memcached('test1key').replace, 'test1key', 0, 0,
                'totally new value'))
        self.verify_vbucket_and_seqno(set_resp, replace_resp, 'replace')

        append_resp = self.extract_vbucket_uuid_and_seqno(
            client.generic_request(
                client.memcached('test1key').append, 'test1key',
                'appended data'))
        self.verify_vbucket_and_seqno(replace_resp, append_resp, 'append')

        prepend_resp = self.extract_vbucket_uuid_and_seqno(
            client.generic_request(
                client.memcached('test1key').prepend, 'test1key',
                'prepended data'))
        self.verify_vbucket_and_seqno(append_resp, prepend_resp, 'prepend')

        # and finally do the delete
        delete_resp = self.extract_vbucket_uuid_and_seqno(
            client.generic_request(
                client.memcached('test1key').delete, 'test1key'))
        self.verify_vbucket_and_seqno(set_resp, delete_resp, 'delete')

        #meta commands under construction
        # test the 'meta' commands
        TEST_SEQNO = 123
        TEST_CAS = 456

        set_with_meta_resp = client.generic_request(
            client.memcached('test1keyformeta').set_with_meta,
            'test1keyformeta', 0, 0, TEST_SEQNO, TEST_CAS, '123456789')
        set_meta_vbucket_uuid, set_meta_seqno = struct.unpack(
            '>QQ', set_with_meta_resp[2])
        set_with_meta_dict = {
            'vbucket_uuid': set_meta_vbucket_uuid,
            'seqno': set_meta_seqno
        }

        get_meta_resp = client.generic_request(
            client.memcached('test1keyformeta').getMeta, 'test1keyformeta')
        self.assertTrue(TEST_SEQNO == get_meta_resp[3], \
               msg='get meta seqno does not match as set. Expected {0}, actual {1}'.format(TEST_SEQNO, get_meta_resp[3]) )
        self.assertTrue(TEST_CAS == get_meta_resp[4], \
               msg='get meta cas does not match as set. Expected {0}, actual {1}'.format(TEST_CAS, get_meta_resp[4]) )

        #   def del_with_meta(self, key, exp, flags, seqno, old_cas, new_cas, vbucket= -1):
        del_with_meta_resp = client.generic_request(
            client.memcached('test1keyformeta').del_with_meta,
            'test1keyformeta', 0, 0, TEST_SEQNO, TEST_CAS, TEST_CAS + 1)
        vbucket_uuid, seqno = struct.unpack('>QQ', del_with_meta_resp[2])
        del_with_meta_dict = {'vbucket_uuid': vbucket_uuid, 'seqno': seqno}

        self.verify_vbucket_and_seqno(set_with_meta_dict, del_with_meta_dict,
                                      'set/del with meta')

        #  do some integer operations
        set_resp = self.extract_vbucket_uuid_and_seqno(
            client.set('key-for-integer-value', 0, 0, '123'))
        incr_resp = client.generic_request(
            client.memcached('key-for-integer-value').incr,
            'key-for-integer-value')
        incr_resp_dict = {'vbucket_uuid': incr_resp[2], 'seqno': incr_resp[3]}
        self.verify_vbucket_and_seqno(set_resp, incr_resp_dict, 'incr')

        decr_resp = client.generic_request(
            client.memcached('key-for-integer-value').decr,
            'key-for-integer-value')
        decr_resp_dict = {'vbucket_uuid': decr_resp[2], 'seqno': decr_resp[3]}
        self.verify_vbucket_and_seqno(incr_resp_dict, decr_resp_dict, 'decr')

        add_resp = self.extract_vbucket_uuid_and_seqno(
            client.generic_request(
                client.memcached('totally new key').add, 'totally new key', 0,
                0, 'totally new value'))

        self.assertTrue(add_resp['vbucket_uuid'] > 0,
                        msg='Add request vbucket uuid is zero')

        self.log.info('\n\nComplete test_new_response_fields\n\n')
Esempio n. 16
0
    def _run_observe(self):
        tasks = []
        query_set = "true"
        persisted = 0
        mutated = False
        count = 0
        for bucket in self.buckets:
            self.cluster.create_view(self.master, self.default_design_doc,
                                     self.default_view, bucket,
                                     self.wait_timeout * 2)
            client = VBucketAwareMemcached(RestConnection(self.master), bucket)
            self.max_time = timedelta(microseconds=0)
            if self.mutate_by == "multi_set":
                key_val = self._create_multi_set_batch()
                client.setMulti(0, 0, key_val)
            keys = ["observe%s" % (i) for i in range(self.num_items)]
            for key in keys:
                mutated = False
                while not mutated and count < 60:
                    try:
                        if self.mutate_by == "set":
                            # client.memcached(key).set(key, 0, 0, "set")
                            client.set(key, 0, 0, "setvalue")
                        elif self.mutate_by == "append":
                            client.memcached(key).append(key, "append")
                        elif self.mutate_by == "prepend":
                            client.memcached(key).prepend(key, "prepend")
                        elif self.mutate_by == "incr":
                            client.memcached(key).incr(key, 1)
                        elif self.mutate_by == "decr":
                            client.memcached(key).decr(key)
                        mutated = True
                        t_start = datetime.now()
                    except MemcachedError as error:
                        if error.status == 134:
                            loaded = False
                            self.log.error(
                                "Memcached error 134, wait for 5 seconds and then try again"
                            )
                            count += 1
                            time.sleep(5)
                while persisted == 0:
                    opaque, rep_time, persist_time, persisted, cas = client.observe(
                        key)
                t_end = datetime.now()
                #self.log.info("##########key:-%s################" % (key))
                #self.log.info("Persisted:- %s" % (persisted))
                #self.log.info("Persist_Time:- %s" % (rep_time))
                #self.log.info("Time2:- %s" % (t_end - t_start))
                if self.max_time <= (t_end - t_start):
                    self.max_time = (t_end - t_start)
                    self.log.info("Max Time taken for observe is :- %s" %
                                  self.max_time)
                    self.log.info("Cas Value:- %s" % (cas))
            query = {
                "stale": "false",
                "full_set": "true",
                "connection_timeout": 600000
            }
            self.cluster.query_view(self.master,
                                    "dev_Doc1",
                                    self.default_view.name,
                                    query,
                                    self.num_items,
                                    bucket,
                                    timeout=self.wait_timeout)
            self.log.info(
                "Observe Validation:- view: %s in design doc dev_Doc1 and in bucket %s"
                % (self.default_view, bucket))
            # check whether observe has to run with delete and delete parallel with observe or not
            if len(self.observe_with) > 0:
                if self.observe_with == "delete":
                    self.log.info("Deleting 0- %s number of items" %
                                  (self.num_items // 2))
                    self._load_doc_data_all_buckets('delete', 0,
                                                    self.num_items // 2)
                    query_set = "true"
                elif self.observe_with == "delete_parallel":
                    self.log.info("Deleting Parallel 0- %s number of items" %
                                  (self.num_items // 2))
                    tasks = self._async_load_doc_data_all_buckets(
                        'delete', 0, self.num_items // 2)
                    query_set = "false"
                for key in keys:
                    opaque, rep_time, persist_time, persisted, cas = client.memcached(
                        key).observe(key)
                    self.log.info("##########key:-%s################" % (key))
                    self.log.info("Persisted:- %s" % (persisted))
                if self.observe_with == "delete_parallel":
                    for task in tasks:
                        task.result()

                query = {
                    "stale": "false",
                    "full_set": query_set,
                    "connection_timeout": 600000
                }
                self.cluster.query_view(self.master,
                                        "dev_Doc1",
                                        self.default_view.name,
                                        query,
                                        self.num_items // 2,
                                        bucket,
                                        timeout=self.wait_timeout)
                self.log.info(
                    "Observe Validation:- view: %s in design doc dev_Doc1 and in bucket %s"
                    % (self.default_view, self.default_bucket_name))
        """test_observe_basic_data_load_delete will test observer basic scenario
    def test_failover(self):

        self.log.info('\n\nStarting test_failover')

        client = VBucketAwareMemcached(RestConnection(self.master), 'default')
        h = client.sendHellos(
            memcacheConstants.PROTOCOL_BINARY_FEATURE_MUTATION_SEQNO)

        self.log.info(
            '\n\nVerify responses are correct after graceful failover')

        op_data = self.extract_vbucket_uuid_and_seqno(
            client.set('failoverkey', 0, 0, 'failovervalue'))
        op_data['format_type'] = 'no_failover'

        # don't really need to do this so it is commented
        #pre_failover_results = self.observe_seqno_response_to_dict( client.observe_seqno('failoverkey', vbucket_uuid) )

        # which server did the key go to and gracefully fail that server

        self.log.info('\n\nstarting graceful failover scenario')
        server_with_key = client.memcached('failoverkey').host
        self.log.info(
            '\n\nserver {0} has the key and it will be failed over'.format(
                server_with_key))

        RebalanceHelper.wait_for_persistence(self.master,
                                             self.default_bucket_name)

        # now failover
        RestConnection(self.master).fail_over(otpNode='ns_1@' +
                                              server_with_key,
                                              graceful=True)

        if server_with_key in self.servers:
            self.servers.remove(server_with_key)

        self.log.info('server should be failed over now')

        time.sleep(5)
        # reinstantiate the client so we get the new view of the world
        client = VBucketAwareMemcached(RestConnection(self.master), 'default')
        server_with_key = client.memcached('failoverkey').host
        self.log.info('\n\nkey is now on server {0}'.format(server_with_key))

        after_failover_results = self.observe_seqno_response_to_dict(
            client.observe_seqno('failoverkey', op_data['vbucket_uuid']))

        # verify: no (hard) failover, everything else as before
        self.check_results(op_data, after_failover_results)
        self.log.info('Test complete')

        # now do a hard failover

        # which server did the key go to and gracefully fail that server

        time.sleep(30)
        self.log.info('\n\nstarting hard failover scenario')

        client.sendHellos(
            memcacheConstants.PROTOCOL_BINARY_FEATURE_MUTATION_SEQNO)
        op_data = self.extract_vbucket_uuid_and_seqno(
            client.set('hardfailoverkey', 0, 0, 'failovervalue'))
        op_data['format_type'] = 'hard_failover'

        server_with_key = client.memcached('hardfailoverkey').host
        self.log.info(
            '\n\nserver {0} has the key and it will be hard failed over'.
            format(server_with_key))

        # now failover
        RestConnection(self.master).fail_over(otpNode='ns_1@' +
                                              server_with_key,
                                              graceful=False)

        if server_with_key in self.servers:
            self.servers.remove(server_with_key)

        self.log.info('\n\nserver should be failed over now')

        time.sleep(10)
        # reinstantiate the client so we get the new view of the world
        client = VBucketAwareMemcached(RestConnection(self.master), 'default')
        server_with_key = client.memcached('hardfailoverkey').host
        self.log.info('\n\nkey is now on server {0}'.format(server_with_key))

        time.sleep(10)

        after_failover_results = self.observe_seqno_response_to_dict(
            client.observe_seqno('hardfailoverkey', op_data['vbucket_uuid']))

        self.check_results(op_data, after_failover_results)

        self.log.info('Test complete')
    def test_basic_operations(self):
        self.log.info('\n\nStarting test_basic_operations')

        client = VBucketAwareMemcached(RestConnection(self.master), 'default')
        h = client.sendHellos(
            memcacheConstants.PROTOCOL_BINARY_FEATURE_MUTATION_SEQNO)

        all_clients = []
        for s in self.servers:
            all_clients.append(
                MemcachedClientHelper.direct_client(s,
                                                    self.default_bucket_name))

        # basic case
        op_data = self.extract_vbucket_uuid_and_seqno(
            client.set('test1key', 0, 0, 'test1value'))
        op_data['format_type'] = 'no_failover'

        for s in self.servers:
            RebalanceHelper.wait_for_persistence(s, self.default_bucket_name)

        o = client.observe_seqno('test1key', op_data['vbucket_uuid'])
        results = self.observe_seqno_response_to_dict(o)
        self.check_results(op_data, results)

        # 2. Disable persistence, Set a key, observe seqno, should not be persisted,
        #    enable and wait for persistence, observe seqno and check everything

        self.log.info(
            '\n\nVerify responses are correct when keys are not persisted')
        for i in all_clients:
            i.stop_persistence()
        mc = MemcachedClientHelper.direct_client(self.master, "default")

        self.log.info('setting the kv')
        op_data = self.extract_vbucket_uuid_and_seqno(
            client.set('test2key', 0, 0, 'test2value'))
        op_data['format_type'] = 'no_failover'

        self.log.info('calling observe seq no')
        o = client.observe_seqno('test2key', op_data['vbucket_uuid'])
        results = self.observe_seqno_response_to_dict(o)
        # can't use check results because persisted is
        self.assertTrue(
            op_data['vbucket_uuid'] == results['new_vbucket_uuid'],
            msg='Observe Vbucket uuid does not match. Expected: {0}. Actual {1}'
            .format(hex(op_data['vbucket_uuid']),
                    hex(results['new_vbucket_uuid'])))

        self.assertTrue(
            op_data['seqno'] == results['current_seqno'],
            msg='Observe seqno does not match. Expected: {0}. Actual {1}'.
            format(op_data['seqno'], results['current_seqno']))

        self.assertTrue(
            op_data['seqno'] > results['last_persisted_seqno'],
            msg='Persisted seqno is too big. Expected: {0}. Actual {1}'.format(
                op_data['seqno'], results['last_persisted_seqno']))

        self.log.info('starting persistence')

        for s in all_clients:
            s.start_persistence()

        for s in self.servers:
            RebalanceHelper.wait_for_persistence(s, self.default_bucket_name)

        results = self.observe_seqno_response_to_dict(
            client.observe_seqno('test2key', op_data['vbucket_uuid']))
        self.check_results(op_data, results)

        # error case - broken
        """
        mc.set('badbuckettestkey', 0, 0, 'testvalue',1)
        try:
           o = client.observe_seqno('badbuckettestkey', 2)
           self.fail('bucket is incorrect, should have returned an error')
        except AssertionError, ex:    # this is to catch the above fail, should it ever happen
            raise
        except Exception, ex:
            traceback.print_exc()
            if ex.status != memcacheConstants.ERR_NOT_FOUND:
                self.log.info('Observe seqno incorrect error code for invalid bucket. Expected: {0}. Actual {1}'.format(
                  memcacheConstants.ERR_NOT_FOUND, ex.status))
                raise Exception(ex)
        """

        self.log.info('\n\nComplete test_basic_operations')
Esempio n. 19
0
    def _run_observe(self):
        tasks = []
        query_set = "true"
        persisted = 0
        mutated = False
        count = 0
        for bucket in self.buckets:
            self.cluster.create_view(self.master, self.default_design_doc,
                                      self.default_view, bucket , self.wait_timeout * 2)
            client = VBucketAwareMemcached(RestConnection(self.master), bucket)
            self.max_time = timedelta(microseconds=0)
            if self.mutate_by == "multi_set":
                key_val = self._create_multi_set_batch()
                client.setMulti(0, 0, key_val)
            keys = ["observe%s" % (i) for i in xrange(self.num_items)]
            for key in keys:
                mutated = False
                while not mutated and count < 60:
                    try:
                        if self.mutate_by == "set":
                            # client.memcached(key).set(key, 0, 0, "set")
                            client.set(key, 0, 0, "setvalue")
                        elif self.mutate_by == "append":
                            client.memcached(key).append(key, "append")
                        elif self.mutate_by == "prepend" :
                            client.memcached(key).prepend(key, "prepend")
                        elif self.mutate_by == "incr":
                            client.memcached(key).incr(key, 1)
                        elif self.mutate_by == "decr":
                            client.memcached(key).decr(key)
                        mutated = True
                        t_start = datetime.now()
                    except MemcachedError as error:
                        if error.status == 134:
                            loaded = False
                            self.log.error("Memcached error 134, wait for 5 seconds and then try again")
                            count += 1
                            time.sleep(5)
                while persisted == 0:
                    opaque, rep_time, persist_time, persisted, cas = client.observe(key)
                t_end = datetime.now()
                self.log.info("##########key:-%s################" % (key))
                self.log.info("Persisted:- %s" % (persisted))
                self.log.info("Persist_Time:- %s" % (rep_time))
                self.log.info("Time2:- %s" % (t_end - t_start))
                if self.max_time <= (t_end - t_start):
                    self.max_time = (t_end - t_start)
                    self.log.info("Max Time taken for observe is :- %s" % self.max_time)
                    self.log.info("Cas Value:- %s" % (cas))
            query = {"stale" : "false", "full_set" : "true", "connection_timeout" : 60000}
            self.cluster.query_view(self.master, "dev_Doc1", self.default_view.name, query, self.num_items, bucket, timeout=self.wait_timeout)
            self.log.info("Observe Validation:- view: %s in design doc dev_Doc1 and in bucket %s" % (self.default_view, bucket))
            # check whether observe has to run with delete and delete parallel with observe or not
            if len (self.observe_with) > 0 :
                if self.observe_with == "delete" :
                    self.log.info("Deleting 0- %s number of items" % (self.num_items / 2))
                    self._load_doc_data_all_buckets('delete', 0, self.num_items / 2)
                    query_set = "true"
                elif self.observe_with == "delete_parallel":
                    self.log.info("Deleting Parallel 0- %s number of items" % (self.num_items / 2))
                    tasks = self._async_load_doc_data_all_buckets('delete', 0, self.num_items / 2)
                    query_set = "false"
                for key in keys:
                    opaque, rep_time, persist_time, persisted, cas = client.memcached(key).observe(key)
                    self.log.info("##########key:-%s################" % (key))
                    self.log.info("Persisted:- %s" % (persisted))
                if self.observe_with == "delete_parallel":
                    for task in tasks:
                        task.result()

                query = {"stale" : "false", "full_set" : query_set, "connection_timeout" : 60000}
                self.cluster.query_view(self.master, "dev_Doc1", self.default_view.name, query, self.num_items / 2, bucket, timeout=self.wait_timeout)
                self.log.info("Observe Validation:- view: %s in design doc dev_Doc1 and in bucket %s" % (self.default_view, self.default_bucket_name))

        """test_observe_basic_data_load_delete will test observer basic scenario
Esempio n. 20
0
 def insert_nested_specific_docs(self, num_of_docs, prefix='doc', extra_values={},
                 return_docs=False,collection=None):
     random.seed(12345)
     rest = RestConnection(self.master)
     smart = VBucketAwareMemcached(rest, self.bucket)
     doc_names = []
     for i in range(0, num_of_docs):
         key = doc_name = "{0}-{1}".format(prefix, i)
         geom = {"type": "Point", "coordinates":
                     [random.randrange(-180, 180),
                      random.randrange(-90, 90)]}
         value = {
             "padding": None,
             "d1" :{
                 "padding": None,
                 "d2" :{
                     "int_array" : [1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
                     "str_array" :["john", "doe", "john", "block", "jim", "john"],
                     "mix_array" : [1, 2, True, False, 'bird', 5.0, 6.0, `123`],
                     "d3" : {
                         "d4_01" : 1,
                         "d4_02" : [21, 22, 23, 24, 25 ],
                         "d4_03" : False,
                         "d4_04" : "San Francisco",
                         "d4_05" : {
                             "d5_01" : random.randrange(6, 13),
                             "d5_02" : [ random.randrange(5, 10), random.randrange(6, 13)],
                             "d5_03" : "abcdefghi",
                             "d5_04" : {
                                "d6_01" : random.randrange(6, 13),
                                "d6_02" : [1, 2, True, False, 'bird', 5.0, 6.0, `123`]
                             }
                         }
                     },
                     "d2_02" : {"d2_02_01":"name"},
                     "d2_03" :geom
                 },
                 "d1_02" :[1, 2, True, False, 'bird', 5.0, 6.0, `123`],
                 "d1_03" : False
             },
             "age": random.randrange(1, 1000),
             "geometry": geom,
             "array" :[0,1,2,3,4,5,6,7,8,9,20],
             "isDict" : True,
             "dict_value" : {"name":"abc", "age":1},
             "height": random.randrange(1, 13000),
             "bloom": random.randrange(1, 6),
             "shed_leaves": random.randrange(6, 13)}
         value.update(extra_values)
         if not return_docs:
             doc_names.append(doc_name)
         else:
             doc_names.append(value)
         # loop till value is set
         fail_count = 0
         while True:
             try:
                 smart.set(key, 0, 0, json.dumps(value),collection=collection)
                 break
             except MemcachedError as e:
                 fail_count += 1
                 if (e.status == 133 or e.status == 132) and fail_count < 60:
                     if i == 0:
                         self.log.error("waiting 5 seconds. error {0}"
                                        .format(e))
                         time.sleep(5)
                     else:
                         self.log.error(e)
                         time.sleep(1)
                 else:
                     raise e
     self.log.info("Inserted {0} json documents".format(num_of_docs))
     return doc_names
Esempio n. 21
0
class GenericLoadingTask(Thread, Task):
    def __init__(self, server, bucket, kv_store):
        Thread.__init__(self)
        Task.__init__(self, "load_gen_task")
        self.kv_store = kv_store
        self.client = VBucketAwareMemcached(RestConnection(server), bucket)

    def execute(self, task_manager):
        self.start()
        self.state = EXECUTING

    def check(self, task_manager):
        pass

    def run(self):
        while self.has_next() and not self.done():
            self.next()
        self.state = FINISHED
        self.set_result(True)

    def has_next(self):
        raise NotImplementedError

    def next(self):
        raise NotImplementedError

    def _unlocked_create(self, partition, key, value):
        try:
            value_json = json.loads(value)
            value_json['mutated'] = 0
            value = json.dumps(value_json)
        except ValueError:
            index = random.choice(range(len(value)))
            value = value[0:index] + random.choice(string.ascii_uppercase) + value[index+1:]

        try:
            self.client.set(key, self.exp, 0, value)
            partition.set(key, value, self.exp)
        except MemcachedError as error:
            self.state = FINISHED
            self.set_exception(error)

    def _unlocked_read(self, partition, key):
        try:
            o, c, d = self.client.get(key)
        except MemcachedError as error:
            if error.status == ERR_NOT_FOUND and partition.get_valid(key) is None:
                pass
            else:
                self.state = FINISHED
                self.set_exception(error)

    def _unlocked_update(self, partition, key):
        value = partition.get_valid(key)
        if value is None:
            return
        try:
            value_json = json.loads(value)
            value_json['mutated'] += 1
            value = json.dumps(value_json)
        except ValueError:
            index = random.choice(range(len(value)))
            value = value[0:index] + random.choice(string.ascii_uppercase) + value[index+1:]

        try:
            self.client.set(key, self.exp, 0, value)
            partition.set(key, value, self.exp)
        except MemcachedError as error:
            self.state = FINISHED
            self.set_exception(error)

    def _unlocked_delete(self, partition, key):
        try:
            self.client.delete(key)
            partition.delete(key)
        except MemcachedError as error:
            if error.status == ERR_NOT_FOUND and partition.get_valid(key) is None:
                pass
            else:
                self.state = FINISHED
                self.set_exception(error)
Esempio n. 22
0
    def test_basic_operations(self):
        self.log.info('\n\nStarting test_basic_operations')


        client = VBucketAwareMemcached(RestConnection(self.master), 'default')
        h = client.sendHellos( memcacheConstants.PROTOCOL_BINARY_FEATURE_MUTATION_SEQNO );

        all_clients = []
        for s in self.servers:
            all_clients.append( MemcachedClientHelper.direct_client(s, self.default_bucket_name))


        # basic case
        op_data = self.extract_vbucket_uuid_and_seqno( client.set('test1key', 0, 0, 'test1value') )
        op_data['format_type'] = 'no_failover'


        for s in self.servers:
           RebalanceHelper.wait_for_persistence(s, self.default_bucket_name)

        o = client.observe_seqno('test1key', op_data['vbucket_uuid'])
        results = self.observe_seqno_response_to_dict( o )
        self.check_results( op_data, results)



         # 2. Disable persistence, Set a key, observe seqno, should not be persisted,
         #    enable and wait for persistence, observe seqno and check everything


        self.log.info('\n\nVerify responses are correct when keys are not persisted')
        for i in all_clients:
            i.stop_persistence()
        mc = MemcachedClientHelper.direct_client(self.master, "default")


        self.log.info('setting the kv')
        op_data = self.extract_vbucket_uuid_and_seqno( client.set('test2key', 0, 0, 'test2value') )
        op_data['format_type'] = 'no_failover'

        self.log.info('calling observe seq no')
        o = client.observe_seqno('test2key', op_data['vbucket_uuid'])
        results = self.observe_seqno_response_to_dict( o )
        # can't use check results because persisted is
        self.assertTrue(op_data['vbucket_uuid'] == results['new_vbucket_uuid'],
           msg='Observe Vbucket uuid does not match. Expected: {0}. Actual {1}'.format(
                    hex(op_data['vbucket_uuid']), hex(results['new_vbucket_uuid'])) )

        self.assertTrue(op_data['seqno'] == results['current_seqno'],
           msg='Observe seqno does not match. Expected: {0}. Actual {1}'.format(
                    op_data['seqno'], results['current_seqno']) )


        self.assertTrue(op_data['seqno'] > results['last_persisted_seqno'],
           msg='Persisted seqno is too big. Expected: {0}. Actual {1}'.format(
                    op_data['seqno'], results['last_persisted_seqno']) )



        self.log.info('starting persistence')

        for s in all_clients:
            s.start_persistence()

        for s in self.servers:
            RebalanceHelper.wait_for_persistence(s, self.default_bucket_name)

        results = self.observe_seqno_response_to_dict( client.observe_seqno('test2key', op_data['vbucket_uuid']) )
        self.check_results( op_data, results)



        # error case
        mc.set('badbuckettestkey', 0, 0, 'testvalue',1)
        try:
           o = client.observe_seqno('badbuckettestkey', 2)
           self.fail('bucket is incorrect, should have returned an error')
        except AssertionError, ex:    # this is to catch the above fail, should it ever happen
            raise
Esempio n. 23
0
    def test_failover(self):

        self.log.info('\n\nStarting test_failover')

        client = VBucketAwareMemcached(RestConnection(self.master), 'default')
        h = client.sendHellos( memcacheConstants.PROTOCOL_BINARY_FEATURE_MUTATION_SEQNO );


        self.log.info('\n\nVerify responses are correct after graceful failover')

        op_data = self.extract_vbucket_uuid_and_seqno( client.set('failoverkey', 0, 0, 'failovervalue') )
        op_data['format_type'] = 'no_failover'



        # don't really need to do this so it is commented
        #pre_failover_results = self.observe_seqno_response_to_dict( client.observe_seqno('failoverkey', vbucket_uuid) )




        # which server did the key go to and gracefully fail that server

        self.log.info('\n\nstarting graceful failover scenario')
        server_with_key = client.memcached( 'failoverkey').host
        self.log.info('\n\nserver {0} has the key and it will be failed over'.format(server_with_key))


        RebalanceHelper.wait_for_persistence(self.master, self.default_bucket_name)

        # now failover
        RestConnection(self.master).fail_over(otpNode = 'ns_1@' + server_with_key, graceful=True)

        if server_with_key in self.servers:
            self.servers.remove(server_with_key)



        self.log.info('server should be failed over now')

        time.sleep(5)
        # reinstantiate the client so we get the new view of the world
        client = VBucketAwareMemcached(RestConnection(self.master), 'default')
        server_with_key = client.memcached( 'failoverkey').host
        self.log.info('\n\nkey is now on server {0}'.format(server_with_key))

        after_failover_results = self.observe_seqno_response_to_dict(
            client.observe_seqno('failoverkey', op_data['vbucket_uuid']) )


        # verify: no (hard) failover, everything else as before
        self.check_results( op_data, after_failover_results)
        self.log.info('Test complete')






        # now do a hard failover

        # which server did the key go to and gracefully fail that server

        self.log.info('\n\nstarting hard failover scenario')

        client.sendHellos( memcacheConstants.PROTOCOL_BINARY_FEATURE_MUTATION_SEQNO );
        op_data = self.extract_vbucket_uuid_and_seqno( client.set('hardfailoverkey', 0, 0, 'failovervalue') )
        op_data['format_type'] = 'hard_failover'


        server_with_key = client.memcached( 'hardfailoverkey').host
        self.log.info('\n\nserver {0} has the key and it will be hard failed over'.format(server_with_key))


        # now failover
        RestConnection(self.master).fail_over(otpNode = 'ns_1@' + server_with_key, graceful=False)

        if server_with_key in self.servers:
            self.servers.remove(server_with_key)



        self.log.info('\n\nserver should be failed over now')

        time.sleep(5)
        # reinstantiate the client so we get the new view of the world
        client = VBucketAwareMemcached(RestConnection(self.master), 'default')
        server_with_key = client.memcached( 'hardfailoverkey').host
        self.log.info('\n\nkey is now on server {0}'.format(server_with_key))

        after_failover_results = self.observe_seqno_response_to_dict(
            client.observe_seqno('hardfailoverkey',op_data['vbucket_uuid']) )

        self.check_results( op_data, after_failover_results)

        self.log.info('Test complete')
Esempio n. 24
0
    def test_new_response_fields(self):

        self.log.info('\n\nStarting test_new_response_fields')

        client = VBucketAwareMemcached(RestConnection(self.master), 'default')


        h = client.sendHellos( memcacheConstants.PROTOCOL_BINARY_FEATURE_MUTATION_SEQNO );



        set_resp = self.extract_vbucket_uuid_and_seqno( client.set('test1key', 0, 0, '123456789') )

        # test the inplace operations
        test = client.generic_request(client.memcached('test1key').set, 'test1key', 0, 0,'totally new value')
        replace_resp = self.extract_vbucket_uuid_and_seqno(
            client.generic_request( client.memcached('test1key').replace,  'test1key', 0, 0,'totally new value') )
        self.verify_vbucket_and_seqno( set_resp, replace_resp, 'replace')

        append_resp = self.extract_vbucket_uuid_and_seqno(
            client.generic_request( client.memcached('test1key').append, 'test1key', 'appended data') )
        self.verify_vbucket_and_seqno(replace_resp, append_resp, 'append')

        prepend_resp = self.extract_vbucket_uuid_and_seqno(
            client.generic_request( client.memcached('test1key').prepend, 'test1key', 'prepended data') )
        self.verify_vbucket_and_seqno(append_resp, prepend_resp, 'prepend')


        # and finally do the delete
        delete_resp = self.extract_vbucket_uuid_and_seqno(
            client.generic_request( client.memcached('test1key').delete,'test1key') )
        self.verify_vbucket_and_seqno( set_resp, delete_resp, 'delete')


        #meta commands under construction
        # test the 'meta' commands
        TEST_SEQNO = 123
        TEST_CAS = 456

        set_with_meta_resp = client.generic_request(
            client.memcached('test1keyformeta').set_with_meta, 'test1keyformeta', 0, 0, TEST_SEQNO, TEST_CAS, '123456789')
        set_meta_vbucket_uuid, set_meta_seqno = struct.unpack('>QQ', set_with_meta_resp[2])
        set_with_meta_dict = {'vbucket_uuid':set_meta_vbucket_uuid, 'seqno': set_meta_seqno}



        get_meta_resp = client.generic_request(client.memcached( 'test1keyformeta').getMeta, 'test1keyformeta')
        self.assertTrue(TEST_SEQNO == get_meta_resp[3], \
               msg='get meta seqno does not match as set. Expected {0}, actual {1}'.format(TEST_SEQNO,get_meta_resp[3]) )
        self.assertTrue(TEST_CAS == get_meta_resp[4], \
               msg='get meta cas does not match as set. Expected {0}, actual {1}'.format(TEST_CAS,get_meta_resp[4]) )


        #   def del_with_meta(self, key, exp, flags, seqno, old_cas, new_cas, vbucket= -1):
        del_with_meta_resp = client.generic_request(
            client.memcached('test1keyformeta').del_with_meta,'test1keyformeta', 0, 0, TEST_SEQNO, TEST_CAS, TEST_CAS+1)
        vbucket_uuid, seqno = struct.unpack('>QQ', del_with_meta_resp[2])
        del_with_meta_dict = {'vbucket_uuid':vbucket_uuid, 'seqno': seqno}

        self.verify_vbucket_and_seqno( set_with_meta_dict, del_with_meta_dict, 'set/del with meta')





        #  do some integer operations
        set_resp = self.extract_vbucket_uuid_and_seqno( client.set('key-for-integer-value', 0, 0, '123') )
        incr_resp = client.generic_request(client.memcached('key-for-integer-value').incr, 'key-for-integer-value')
        incr_resp_dict = {'vbucket_uuid':incr_resp[2], 'seqno':incr_resp[3]}
        self.verify_vbucket_and_seqno(set_resp, incr_resp_dict, 'incr')


        decr_resp = client.generic_request(client.memcached('key-for-integer-value').decr,'key-for-integer-value')
        decr_resp_dict = {'vbucket_uuid':decr_resp[2], 'seqno':decr_resp[3]}
        self.verify_vbucket_and_seqno(incr_resp_dict, decr_resp_dict, 'decr')


        add_resp = self.extract_vbucket_uuid_and_seqno(
            client.generic_request( client.memcached('totally new key').add, 'totally new key', 0, 0,'totally new value') )

        self.assertTrue( add_resp['vbucket_uuid'] > 0, msg='Add request vbucket uuid is zero')

        self.log.info('\n\nComplete test_new_response_fields\n\n')
Esempio n. 25
0
 def insert_nested_specific_docs(self, num_of_docs, prefix='doc', extra_values={},
                 return_docs=False):
     random.seed(12345)
     rest = RestConnection(self.master)
     smart = VBucketAwareMemcached(rest, self.bucket)
     doc_names = []
     for i in range(0, num_of_docs):
         key = doc_name = "{0}-{1}".format(prefix, i)
         geom = {"type": "Point", "coordinates":
                     [random.randrange(-180, 180),
                      random.randrange(-90, 90)]}
         value = {
             "padding": None,
             "d1" :{
                 "padding": None,
                 "d2" :{
                     "int_array" : [1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
                     "str_array" :["john", "doe", "john", "block", "jim", "john"],
                     "mix_array" : [1, 2, True, False, 'bird', 5.0, 6.0, `123`],
                     "d3" : {
                         "d4_01" : 1,
                         "d4_02" : [21, 22, 23, 24, 25 ],
                         "d4_03" : False,
                         "d4_04" : "San Francisco",
                         "d4_05" : {
                             "d5_01" : random.randrange(6, 13),
                             "d5_02" : [ random.randrange(5, 10), random.randrange(6, 13)],
                             "d5_03" : "abcdefghi",
                             "d5_04" : {
                                "d6_01" : random.randrange(6, 13),
                                "d6_02" : [1, 2, True, False, 'bird', 5.0, 6.0, `123`]
                             }
                         }
                     },
                     "d2_02" : {"d2_02_01":"name"},
                     "d2_03" :geom
                 },
                 "d1_02" :[1, 2, True, False, 'bird', 5.0, 6.0, `123`],
                 "d1_03" : False
             },
             "age": random.randrange(1, 1000),
             "geometry": geom,
             "array" :[0,1,2,3,4,5,6,7,8,9,20],
             "isDict" : True,
             "dict_value" : {"name":"abc", "age":1},
             "height": random.randrange(1, 13000),
             "bloom": random.randrange(1, 6),
             "shed_leaves": random.randrange(6, 13)}
         value.update(extra_values)
         if not return_docs:
             doc_names.append(doc_name)
         else:
             doc_names.append(value)
         # loop till value is set
         fail_count = 0
         while True:
             try:
                 smart.set(key, 0, 0, json.dumps(value))
                 break
             except MemcachedError as e:
                 fail_count += 1
                 if (e.status == 133 or e.status == 132) and fail_count < 60:
                     if i == 0:
                         self.log.error("waiting 5 seconds. error {0}"
                                        .format(e))
                         time.sleep(5)
                     else:
                         self.log.error(e)
                         time.sleep(1)
                 else:
                     raise e
     self.log.info("Inserted {0} json documents".format(num_of_docs))
     return doc_names