Exemple #1
0
    def direct_mc_bin_client(self, server, bucket, timeout=30):
        # USE MC BIN CLIENT WHEN NOT USING SDK CLIENT
        rest = RestConnection(server)
        node = None
        try:
            node = rest.get_nodes_self()
        except ValueError as e:
            self.log.info("could not connect to server {0}, will try scanning all nodes".format(server))
        if not node:
            nodes = rest.get_nodes()
            for n in nodes:
                if n.ip == server.ip and n.port == server.port:
                    node = n

        if isinstance(server, dict):
            self.log.info("dict:{0}".format(server))
            self.log.info("creating direct client {0}:{1} {2}".format(server["ip"], node.memcached, bucket))
        else:
            self.log.info("creating direct client {0}:{1} {2}".format(server.ip, node.memcached, bucket))
        RestHelper(rest).vbucket_map_ready(bucket, 60)
        vBuckets = RestConnection(server).get_vbuckets(bucket)
        if isinstance(server, dict):
            client = MemcachedClient(server["ip"], node.memcached, timeout=timeout)
        else:
            client = MemcachedClient(server.ip, node.memcached, timeout=timeout)
        if vBuckets != None:
            client.vbucket_count = len(vBuckets)
        else:
            client.vbucket_count = 0
        bucket_info = rest.get_bucket(bucket)
        return client
class SubdocSinglePathTests(SubdocBaseTest):
    def setUp(self):
        super(SubdocSinglePathTests, self).setUp()
        self.client = MemcachedClient(host=self.server.ip)
        self.jsonSchema = {
            "id": "0",
            "number": 0,
            "array": [],
            "child": {},
            "isDict": True,
            "padding": None
        }

    def tearDown(self):
        super(SubdocSinglePathTests, self).tearDown()

    def insertBinaryDocument(self, key):
        pass

    def _createNestedJson(self, key, dict):
        if dict['levels'] == 0:
            return
        if dict['doc'] == {}:
            dict['doc'] = copy.copy(self.jsonSchema)
        else:
            dict['doc']['child'] = copy.copy(self.jsonSchema)
            dict['doc']['child']['array'] = []
            dict['doc'] = dict['doc']['child']

        dict['doc']['id'] = key
        dict['doc']['number'] = dict['levels']

        for level in range(0, dict['levels']):
            dict['doc']['array'].append(level)
        return self._createNestedJson(key, {
            'doc': dict['doc'],
            'levels': dict['levels'] - 1
        })

    def insertJsonDocument(self, key, levels, expiry, size=512):
        dict = {'doc': {}, 'levels': levels}
        self._createNestedJson(key, dict)
        jsonDump = json.dumps(dict['doc'])
        self.client.set(key, expiry, 0, jsonDump)

    def deleteDoc(self, key):
        self.client.delete(key)

    def _load_all_docs(self, key=None, levels=0, num_items=0):
        dict = {'doc': {}, 'levels': levels}
        self._createNestedJson(key, dict)
        template = dict['doc']
        gen_load = SubdocDocumentGenerator(key,
                                           template,
                                           start=0,
                                           end=num_items)

        print("Inserting json data into bucket")
        self._load_all_buckets(self.server, gen_load, "create", 0)
        self._wait_for_stats_all_buckets([self.server])
    def collect_vbucket_num_stats(self,servers, buckets):
        """
            Method to extract the failovers stats given by cbstats tool

            Paramters:

            buckets: bucket informaiton
            servers: server information

            Returns:

            Failover stats as follows:
            if not collecting per node :: {bucket : [{key:value}]}
            if collecting per node :: {bucket : {node:[{key:value}]}}
        """
        active_bucketMap = {}
        replica_bucketMap = {}
        for bucket in buckets:
            active_map_data = {}
            replica_map_data = {}
            for server in servers:
                rest = RestConnection(server)
                port = rest.get_memcached_port()
                client = MemcachedClient(host=server.ip, port=port)
                stats = client.stats('')
                for key in stats.keys():
                    if key == 'vb_active_num':
                        active_map_data[server.ip] = int(stats[key])
                    if key == 'vb_replica_num':
                        replica_map_data[server.ip] = int(stats[key])
            active_bucketMap[bucket.name] = active_map_data
            replica_bucketMap[bucket.name] = replica_map_data
        return active_bucketMap,replica_bucketMap
Exemple #4
0
    def __init__(self,
                 serverip="localhost",
                 port=11211,
                 bucket="default",
                 password=""):

        self.client = MemcachedClient(serverip, port)
        self.client.sasl_auth_plain(bucket, password)
Exemple #5
0
 def wait_for_warmup(host, port):
     while True:
         client = McdClient(host, port)
         try:
             response = client.stats()
             if response['ep_degraded_mode'] == '0':
                 break
         except:
             pass
         time.sleep(1)
class SubdocSinglePathTests(SubdocBaseTest):
    def setUp(self):
        super(SubdocSinglePathTests, self).setUp()
        self.client = MemcachedClient(host=self.server.ip)
        self.jsonSchema = {
            "id" : "0",
            "number" : 0,
            "array" : [],
            "child" : {},
            "isDict" : True,
            "padding": None
        }

    def tearDown(self):
        super(SubdocSinglePathTests, self).tearDown()

    def insertBinaryDocument(self, key):
        pass

    def _createNestedJson(self, key, dict):
        if dict['levels'] == 0:
            return
        if dict['doc'] == {}:
            dict['doc'] = copy.copy(self.jsonSchema)
        else:
            dict['doc']['child'] = copy.copy(self.jsonSchema)
            dict['doc']['child']['array'] = []
            dict['doc'] = dict['doc']['child']

        dict['doc']['id'] = key
        dict['doc']['number'] = dict['levels']

        for level in xrange(0, dict['levels']):
            dict['doc']['array'].append(level)
        return self._createNestedJson(key, {'doc': dict['doc'], 'levels': dict['levels']-1})

    def insertJsonDocument(self, key, levels, expiry, size=512):
        dict = {'doc' : {}, 'levels' : levels }
        self._createNestedJson(key, dict)
        jsonDump = json.dumps(dict['doc'])
        self.client.set(key, expiry, 0, jsonDump)

    def deleteDoc(self, key):
        self.client.delete(key)

    def _load_all_docs(self, key=None, levels=0, num_items=0):
        dict = {'doc' : {}, 'levels' : levels }
        self._createNestedJson(key, dict)
        template = dict['doc']
        gen_load = SubdocDocumentGenerator(key, template,start=0, end=num_items)

        print "Inserting json data into bucket"
        self._load_all_buckets(self.server, gen_load, "create", 0)
        self._wait_for_stats_all_buckets([self.server])
    def collect_failovers_stats(self,buckets,servers,perNode = True):
        """
            Method to extract the failovers stats given by cbstats tool

            Paramters:

            buckets: bucket informaiton
            servers: server information
            perNode: if set collect per node information else all

            Returns:

            Failover stats as follows:
            if not collecting per node :: {bucket : [{key:value}]}
            if collecting per node :: {bucket : {node:[{key:value}]}}
        """
        bucketMap = {}
        for bucket in buckets:
            bucketMap[bucket.name] = {}
        for bucket in buckets:
            dataMap = {}
            for server in servers:
                rest = RestConnection(server)
                port = rest.get_memcached_port()
                client = MemcachedClient(host=server.ip, port=port)
                stats = client.stats('failovers')
                map_data = {}
                num_map ={}
                for o in stats.keys():
                    tokens = o.split(":")
                    vb = tokens[0]
                    key = tokens[1]
                    value = stats[o].split()
                    num = -1
                    if len(tokens)  ==  3:
                        vb = tokens[0]
                        num = int(tokens[1])
                        key = tokens[2]
                    if vb in map_data.keys() and (num == num_map[vb] or num > num_map[vb]):
                        map_data[vb][key] = value[0]
                        num_map[vb] = num
                    elif vb in map_data.keys() and key == "num_entries":
                        map_data[vb][key] = value[0]
                    elif vb not in map_data.keys():
                        m = {}
                        m[key] = value[0]
                        map_data[vb] = m
                        num_map[vb] = num
                if perNode:
                    dataMap[server.ip] = map_data
                else:
                    dataMap.update(map_data)
            bucketMap[bucket.name] = dataMap
        return bucketMap
Exemple #8
0
 def setUp(self):
     super(SubdocSinglePathTests, self).setUp()
     self.client = MemcachedClient(host=self.server.ip)
     self.jsonSchema = {
         "id" : "0",
         "number" : 0,
         "array" : [],
         "child" : {},
         "isDict" : True,
         "padding": None
     }
    def collect_vbucket_stats(self,buckets,servers,collect_vbucket = True,collect_vbucket_seqno = True,collect_vbucket_details = True,perNode = True):
        """
            Method to extract the vbuckets stats given by cbstats tool

            Paramters:

            buckets: bucket information
            servers: server information
            collect_vbucket: take vbucket type stats
            collect_vbucket_seqno: take vbucket-seqno type stats
            collect_vbucket_details: take vbucket-details type stats
            perNode: if True collects data per node else takes a union across nodes

            Returns:

            The output can be in two formats

            if we are doing per node data collection
            Vbucket Information :: {bucket { node : [vbucket_seqno {key:value} U vbucket_details {key:value} U vbucket {key:value}]}}

            if we are not doing per node data collection
            Vbucket Information :: {bucket : [vbucket_seqno {key:value} U vbucket_details {key:value} U vbucket {key:value}]}
        """
        bucketMap = {}
        vbucket = []
        vbucket_seqno = []
        vbucket_details = []
        for bucket in buckets:
            bucketMap[bucket.name] = {}
        for bucket in buckets:
            dataMap = {}
            for server in servers:
                map_data = {}
                rest = RestConnection(server)
                port = rest.get_memcached_port()
                client = MemcachedClient(host=server.ip, port=port)
                if collect_vbucket:
                    vbucket=client.stats('vbucket')
                    self.createMapVbucket(vbucket,map_data)
                if collect_vbucket_seqno:
                    vbucket_seqno=client.stats('vbucket-seqno')
                    self.createMapVbucket(vbucket_seqno,map_data)
                if collect_vbucket_details:
                    vbucket_details=client.stats('vbucket-details')
                    self.createMapVbucket(vbucket_details,map_data)
                if perNode:
                    dataMap[server.ip] = map_data
                else:
                    dataMap.update(map_data)
            bucketMap[bucket.name] = dataMap
        return bucketMap
Exemple #10
0
 def wait_for_warmup(self, host, port):
     if self.bucket_type == 'ephemeral': return
     while True:
         client = McdClient(host, port)
         try:
             client.bucket_select("default")
             response = client.stats()
             # check the old style or new style (as of 4.5) results
             mode = response.get('ep_degraded_mode')
             if mode is not None:
                 if mode == '0' or mode == 'false':
                     break
         except Exception as ex:
             pass
         time.sleep(1)
Exemple #11
0
 def wait_for_warmup(self, host, port):
     if self.bucket_type == 'ephemeral': return
     while True:
         client = McdClient(host, port)
         try:
             client.bucket_select("default")
             response = client.stats()
             # check the old style or new style (as of 4.5) results
             mode = response.get('ep_degraded_mode')
             if  mode is not None:
                 if mode == '0' or mode == 'false':
                     break
         except Exception as ex:
             pass
         time.sleep(1)
Exemple #12
0
 def __init__(self, testcase, bucket):
     self.testcase = testcase
     self.bucket = bucket
     self.input = TestInputSingleton.input
     self.servers = self.input.servers
     self.master = self.servers[0]
     self.rest = RestConnection(self.master)
     self.log = logger.Logger.get_logger()
     self.client = MemcachedClient(host=self.master.ip)
     self.jsonSchema = {
         "id" : "0",
         "number" : 0,
         "array" : [],
         "child" : {},
         "isDict" : True,
         "padding": None
     }
     self.jsonSchema_longPath = {
         "id" : "0",
         "number" : 0,
         "array12345678901234567890123456789" : [],
         "child12345678901234567890123456789" : {},
         "isDict" : True,
         "padding": None
     }
Exemple #13
0
class MemcachedHelper(object):
    def __init__(self,
                 serverip="localhost",
                 port=11211,
                 bucket="default",
                 password=""):

        self.client = MemcachedClient(serverip, port)
        self.client.sasl_auth_plain(bucket, password)

    def write_one_json(self, key, doc):

        count = 0
        loaded = False
        while count < 60 and not loaded:
            try:
                self.client.set(key, 0, 0, json.dumps(doc))
                loaded = True
            except MemcachedError as error:
                if error.status == 134:
                    print "Memcached TMP_OOM, Retrying in 5 seconds..."
                    count += 1
                    time.sleep(5)
                elif error.status == 7:
                    print "Not my vbucket error. If on MAC, please specify vbuckets as 64."
                    print "If rebalance is in progress. Please wait for it to finish.\n"
                    break
                else:
                    print error
                    break

    def read_one_json(self, key):

        doc = ""
        try:
            _, _, doc = self.client.get(key)
        except MemcachedError as error:
            print error

        return doc

    def write_batch(self, batch_id, data):
        print "Batch Insert not implemented for Memcached"
    def collect_compare_dcp_stats(self,buckets,servers,perNode = True, stat_name = 'unacked_bytes', compare_value = 0,  flow_control_buffer_size = 20971520, filter_list = []):
        """
            Method to extract the failovers stats given by cbstats tool

            Paramters:

            buckets: bucket informaiton
            servers: server information
            stat_name: stat we are searching to compare
            compare_value: the comparison value to be satisfied

            Returns:

            map of bucket informing if stat matching was satisfied/not satisfied

            example:: unacked_bytes in dcp
        """
        bucketMap = {}
        for bucket in buckets:
            bucketMap[bucket.name] = True
        for bucket in buckets:
            dataMap = {}
            for server in servers:
                rest = RestConnection(server)
                port = rest.get_memcached_port()
                client = MemcachedClient(host=server.ip, port=port)
                stats = client.stats('dcp')
                map_data = {}
                for key in stats.keys():
                    filter = False
                    if stat_name in key:
                        for filter_key in filter_list:
                            if filter_key in key:
                                filter = True
                        value = int(stats[key])
                        if not filter:
                            if value != compare_value:
                                if "eq_dcpq:mapreduce_view" in key:
                                    if value >= flow_control_buffer_size:
                                        bucketMap[bucket] = False
                                else:
                                    bucketMap[bucket] = False
        return bucketMap
class MemcachedHelper(object):
    
    def __init__(self, serverip = "localhost", port = 11211, bucket = "default", password = ""):
        
        self.client = MemcachedClient(serverip, port)
        self.client.sasl_auth_plain(bucket, password)

    def write_one_json(self, key, doc):

        count = 0 
        loaded = False
        while count < 60 and not loaded:
            try:
                #self.client.set(key, 0, 0, json.dumps(doc))
                self.client.set(key, 0, 0, doc)
                loaded = True
            except MemcachedError as error:
                if error.status == 134:
                    print "Memcached TMP_OOM, Retrying in 5 seconds..."
                    count += 1
                    time.sleep(5)
                elif error.status == 7:
                    print "Not my vbucket error. If on MAC, please specify vbuckets as 64."
                    print "If rebalance is in progress. Please wait for it to finish.\n"
                    break
                else:
                    print error
                    break

    def read_one_json(self, key):

        doc = ""
        try:
            _, _, doc = self.client.get(key)
        except MemcachedError as error:
            print error

        return doc

    def write_batch(self, batch_id, data):
        print "Batch Insert not implemented for Memcached"
Exemple #16
0
    def _add_conn(self, server):
        if not self.store:
            print("<%s> failed to add conn, invalid store object"\
                % self.__class__.__name__)
            return False

        if self.store.__class__.__name__ == "StoreMembaseBinary":
            print("<%s> _add_conn: %s"\
                % (self.__class__.__name__, server))
            host, port = server.split(":")
            conn = MemcachedClient(host, int(port))
            self.conns[server] = conn

        return True
Exemple #17
0
    def _build_conns(self):
        """build separate connections based on store"""
        if not self.store:
            print("<%s> failed to build connections, invalid store object"\
                % self.__class__.__name__)
            return False

        if self.store.__class__.__name__ == "StoreMemcachedBinary":
            conn = MemcachedClient(self.store.conn.host, self.store.conn.port)
            server_str = "{0}:{1}".format(self.store.conn.host,
                                          self.store.conn.port)
            self.conns[server_str] = conn
        elif self.store.__class__.__name__ == "StoreMembaseBinary":
            for memcached in self.store.awareness.memcacheds.values():
                conn = MemcachedClient(memcached.host, memcached.port)
                server_str = "{0}:{1}".format(conn.host, conn.port)
                self.conns[server_str] = conn
            self.awareness = self.store.awareness
        else:
            print("<%s> error: unsupported store object %s" %\
                  (self.__class__.__name__, store.__class__.__name__))
            return False

        return True
 def __init__(self, serverip = "localhost", port = 11211, bucket = "default", password = ""):
     
     self.client = MemcachedClient(serverip, port)
     self.client.sasl_auth_plain(bucket, password)
Exemple #19
0
    def test_xattr_compression(self):
        # MB-32669
        # subdoc.subdoc_simple_dataset.SubdocSimpleDataset.test_xattr_compression,compression=active
        mc = MemcachedClient(self.master.ip, 11210)
        mc.sasl_auth_plain(self.master.rest_username,
                           self.master.rest_password)
        mc.bucket_select('default')

        self.key = "test_xattr_compression"
        self.nesting_level = 5
        array = {'i_add': 0, 'i_sub': 1, 'a_i_a': [0, 1], 'ai_sub': [0, 1]}
        base_json = self.generate_json_for_nesting()
        nested_json = self.generate_nested(base_json, array,
                                           self.nesting_level)
        jsonDump = json.dumps(nested_json)
        stats = mc.stats()
        self.assertEquals(stats['ep_compression_mode'], 'active')

        scheme = "http"
        host = "{0}:{1}".format(self.master.ip, self.master.port)
        self.sdk_client = SDKClient(scheme=scheme,
                                    hosts=[host],
                                    bucket="default")

        self.sdk_client.set(self.key, value=jsonDump, ttl=60)
        rv = self.sdk_client.cb.mutate_in(self.key,
                                          SD.upsert('my.attr',
                                                    "value",
                                                    xattr=True,
                                                    create_parents=True),
                                          ttl=60)
        self.assertTrue(rv.success)

        # wait for it to persist and then evict the key
        persisted = 0
        while persisted == 0:
            opaque, rep_time, persist_time, persisted, cas = mc.observe(
                self.key)

        mc.evict_key(self.key)
        time.sleep(65)
        try:
            self.client.get(self.key)
            self.fail("the key should get expired")
        except mc_bin_client.MemcachedError as error:
            self.assertEquals(error.status, 1)

        stats = mc.stats()
        self.assertEquals(int(stats['curr_items']), 0)
        self.assertEquals(int(stats['curr_temp_items']), 0)