Beispiel #1
0
    def change_password(self, new_password="******"):
        nodes = RestConnection(self.cluster.master).node_statuses()

        cli = CouchbaseCLI(self.cluster.master,
                           self.cluster.master.rest_username,
                           self.cluster.master.rest_password)
        output, err, result = cli.setting_cluster(
            data_ramsize=False,
            index_ramsize=False,
            fts_ramsize=False,
            cluster_name=None,
            cluster_username=None,
            cluster_password=new_password,
            cluster_port=False)

        self.log.info(output)
        # MB-10136 & MB-9991
        if not result:
            raise Exception("Password didn't change!")
        self.log.info("new password '%s' on nodes: %s" %
                      (new_password, [node.ip for node in nodes]))
        for node in nodes:
            for server in self.cluster.servers:
                if server.ip == node.ip and int(server.port) == int(node.port):
                    server.rest_password = new_password
                    break
 def _initialize_node_with_new_data_location(self,
                                             server,
                                             data_location,
                                             services=None):
     init_port = server.port or '8091'
     init_tasks = []
     cli = CouchbaseCLI(server, server.rest_username, server.rest_password)
     output, error, status = cli.node_init(data_location, None, None)
     self.log.info(output)
     if error or "ERROR" in output:
         self.log.info(error)
         self.fail("Failed to set new data location. Check error message.")
     init_tasks.append(
         self.cluster.async_init_node(
             server,
             self.disabled_consistent_view,
             self.rebalanceIndexWaitingDisabled,
             self.rebalanceIndexPausingDisabled,
             self.maxParallelIndexers,
             self.maxParallelReplicaIndexers,
             init_port,
             self.quota_percent,
             services=services,
             index_quota_percent=self.index_quota_percent,
             gsi_type=self.gsi_type))
     for task in init_tasks:
         task.result()
Beispiel #3
0
    def __reset_node(self, cluster_util, cluster, node, crash_warning=False):
        shell = RemoteMachineShellConnection(node)
        rest = RestConnection(node)
        try:
            if '.com' in node.ip or ':' in node.ip:
                _ = rest.update_autofailover_settings(False, 120)
                cli = CouchbaseCLI(node, node.rest_username,
                                   node.rest_password)
                output, err, result = cli.set_address_family("ipv6")
                if not result:
                    raise Exception("Addr family was not changed to ipv6")
                _ = rest.update_autofailover_settings(True, 120)
            else:
                # Start node
                data_path = rest.get_data_path()
                core_path = \
                    str(rest.get_data_path()).split("data")[0] + "crash/"
                if not os.path.isdir(core_path):
                    core_path = "/opt/couchbase/var/lib/couchbase/crash/"

                # Stop node
                cluster_util.stop_server(cluster, node)
                # Delete Path
                shell.cleanup_data_config(data_path)
                if not crash_warning:
                    shell.cleanup_data_config(core_path)

                cluster_util.start_server(cluster, node)
                if not RestConnection(node).is_ns_server_running():
                    self.log.error("%s ns_server not running" % node.ip)
        except Exception as e:
            self.log.critical(e)
        finally:
            shell.disconnect()
 def setUp(self):
     super(basic_collections, self).setUp()
     self.log = logger.Logger.get_logger()
     self.input = TestInputSingleton.input
     self.default_bucket_name = self.input.param("default_bucket_name",
                                                 "default")
     self.servers = self.input.servers
     self.master = self.servers[0]
     self.use_rest = self.input.param("use_rest", True)
     self.use_cli = self.input.param("use_cli", False)
     self.rest = Collections_Rest(self.master)
     self.cli = CouchbaseCLI(self.master, self.master.rest_username,
                             self.master.rest_password)
     self.cli.enable_dp()
Beispiel #5
0
    def test_add_delete_cbas_nodes_CLI(self):
        '''
        Description: Test add/remove nodes via CLI.

        Steps:
        1. Add nodes by randomly picking up the services from the service_list.
        2. Check that correct services are running after the node is added.

        Author: Ritesh Agarwal
        '''
        service_list = {"data,analytics,index":["kv","cbas","index"],
                        "analytics,query,index":["cbas","n1ql","index"],
                        "data,analytics,query":["kv","cbas","n1ql"],
                        "analytics,query,fts":["cbas","n1ql","fts"],
                        }
        for cbas_server in self.cluster.cbas_nodes:
            if cbas_server.ip == self.cluster.master.ip:
                continue
            import random
            service = random.choice(service_list.keys())
            self.log.info("Adding %s to the cluster with services %s to cluster %s"%(cbas_server,service,self.cluster.master))

            stdout, stderr, result = CouchbaseCLI(self.cluster.master, self.cluster.master.rest_username, self.cluster.master.rest_password).server_add("http://"+cbas_server.ip+":"+cbas_server.port, cbas_server.rest_username, cbas_server.rest_password, None, service, None)
            self.assertTrue(result, "Server %s is not added to the cluster %s . Error: %s"%(cbas_server,self.cluster.master,stdout+stderr))
            self.assertTrue(self.cluster_util.rebalance(self.cluster),
                            "Rebalance Failed")

            '''Check for the correct services alloted to the nodes.'''
            nodes = self.rest.get_nodes_data_from_cluster()
            for node in nodes:
                if node["otpNode"].find(cbas_server.ip) != -1:
                    actual_services = set(node["services"])
                    expected_servcies = set(service_list[service])
                    self.log.info("Expected:%s Actual:%s"%(expected_servcies,actual_services))
                    self.assertTrue(actual_services == expected_servcies, "Service setting failed")
                    self.log.info("Successfully added %s to the cluster with services %s"%(node["otpNode"],service))

        to_remove = []
        for cbas_server in self.cluster.cbas_nodes:
            if cbas_server.ip == self.cluster.master.ip:
                continue
            else:
                to_remove.append(cbas_server.ip)
        self.log.info("Removing: %s from the cluster: %s"%(to_remove,self.cluster.master))
        stdout, stderr, result = CouchbaseCLI(self.cluster.master, self.cluster.master.rest_username, self.cluster.master.rest_password).rebalance(",".join(to_remove))
        if not result:
            self.log.info(15*"#"+"THIS IS A BUG: MB-24968. REMOVE THIS TRY-CATCH ONCE BUG IS FIXED."+15*"#")
            stdout, stderr, result = CouchbaseCLI(self.cluster.master, self.cluster.master.rest_username, self.cluster.master.rest_password).rebalance(",".join(to_remove))
        self.assertTrue(result, "Server %s are not removed from the cluster %s . Console Output: %s , Error: %s"%(to_remove,self.cluster.master,stdout,stderr))
    def setUp(self):

        self.log = logger.Logger.get_logger()
        self.input = TestInputSingleton.input
        self.default_bucket_name = self.input.param("default_bucket_name", "default")
        self.servers = self.input.servers
        self.master = self.servers[0]
        self.use_rest = self.input.param("use_rest", True)
        self.use_cli = self.input.param("use_cli", False)
        self.rest = Collections_Rest(self.master)
        self.cli = CouchbaseCLI(self.master, self.master.rest_username, self.master.rest_password)
        self.cli.enable_dp()
        RestConnection(self.master).delete_all_buckets()
        RestConnection(self.master).create_bucket(bucket=self.default_bucket_name,
                               ramQuotaMB=256,
                               proxyPort=11220)
class basic_collections(BaseTestCase):
    def setUp(self):
        super(basic_collections, self).setUp()
        self.log = logger.Logger.get_logger()
        self.input = TestInputSingleton.input
        self.default_bucket_name = self.input.param("default_bucket_name",
                                                    "default")
        self.servers = self.input.servers
        self.master = self.servers[0]
        self.use_rest = self.input.param("use_rest", True)
        self.use_cli = self.input.param("use_cli", False)
        self.rest = Collections_Rest(self.master)
        self.cli = CouchbaseCLI(self.master, self.master.rest_username,
                                self.master.rest_password)
        self.cli.enable_dp()

    def tearDown(self):
        RestConnection(self.master).delete_all_buckets()

    def generate_docs_bigdata(self,
                              docs_per_day,
                              start=0,
                              document_size=1024000):
        json_generator = JsonGenerator()
        return json_generator.generate_docs_bigdata(end=docs_per_day,
                                                    start=start,
                                                    value_size=document_size)

    def test_load_collection(self):
        #epengine.basic_collections.basic_collections.test_load_collection,value_size=200,num_items=100,collection=True
        self.value_size = 200
        self.enable_bloom_filter = False
        self.buckets = RestConnection(self.master).get_buckets()
        self.active_resident_threshold = float(
            self.input.param("active_resident_threshold", 100))

        gen_create = BlobGenerator('eviction',
                                   'eviction-',
                                   self.value_size,
                                   end=self.num_items)

        self._load_all_buckets(self.master, gen_create, "create", 0)

        self._wait_for_stats_all_buckets(self.servers[:self.nodes_init])

        self._verify_all_buckets(self.master)
        self._verify_stats_all_buckets(self.servers[:self.nodes_init])
 def __init__(self, node):
     self.log = logger.Logger.get_logger()
     self.cli = CouchbaseCLI(node)
class CollectionsCLI(object):
    def __init__(self, node):
        self.log = logger.Logger.get_logger()
        self.cli = CouchbaseCLI(node)

    def create_collection(self,
                          bucket="default",
                          scope="scope0",
                          collection="mycollection0"):
        status, content, success = self.cli.create_collection(
            bucket, scope, collection)
        if success:
            self.log.info("Collection created {}->{}->{}".format(
                bucket, scope, collection))
        else:
            raise Exception(
                "Create collection failed : status:{0},content:{1}".format(
                    status, content))
        return success

    def create_scope(self, bucket="default", scope="scope0"):
        status, content, success = self.cli.create_scope(bucket, scope)
        if success:
            self.log.info("Scope created {}->{}".format(bucket, scope))
        else:
            raise Exception(
                "Create scope failed : status:{0},content:{1}".format(
                    status, content))
        return success

    def delete_collection(self,
                          bucket="default",
                          scope='_default',
                          collection='_default'):
        status, content, success = self.cli.delete_collection(
            bucket, scope, collection)
        if success:
            self.log.info("Collection dropped {}->{}->{}".format(
                bucket, scope, collection))
        else:
            raise Exception(
                "Drop collection failed : status:{0},content:{1}".format(
                    status, content))
        return success

    def delete_scope(self, scope, bucket="default"):
        status, content, success = self.cli.delete_scope(bucket, scope)
        if success:
            self.log.info("Scope dropped {}->{}".format(bucket, scope))
        else:
            raise Exception(
                "Drop scope failed : status:{0},content:{1}".format(
                    status, content))
        return success

    def create_scope_collection(self, bucket, scope, collection):
        if self.cli.create_scope(bucket, scope):
            if self.cli.create_collection(bucket, scope, collection):
                return True
        return False

    def delete_scope_collection(self, bucket, scope, collection):
        if self.cli.delete_collection(bucket, scope, collection):
            if self.cli.delete_scope(bucket, scope):
                return True
        return False

    def get_bucket_scopes(self, bucket):
        return self.cli.get_bucket_scopes(bucket)

    def get_bucket_collections(self, bucket):
        return self.cli.get_bucket_collections(bucket)

    def get_scope_collections(self, bucket, scope):
        return self.cli.get_scope_collections(bucket, scope)
class basic_collections(BaseTestCase):

    def setUp(self):

        self.log = logger.Logger.get_logger()
        self.input = TestInputSingleton.input
        self.default_bucket_name = self.input.param("default_bucket_name", "default")
        self.servers = self.input.servers
        self.master = self.servers[0]
        self.use_rest = self.input.param("use_rest", True)
        self.use_cli = self.input.param("use_cli", False)
        self.rest = Collections_Rest(self.master)
        self.cli = CouchbaseCLI(self.master, self.master.rest_username, self.master.rest_password)
        self.cli.enable_dp()
        RestConnection(self.master).delete_all_buckets()
        RestConnection(self.master).create_bucket(bucket=self.default_bucket_name,
                               ramQuotaMB=256,
                               proxyPort=11220)

    def tearDown(self):
        RestConnection(self.master).delete_all_buckets()

    def test_valid_scope_name(self):
        # epengine.basic_collections.basic_collections.test_valid_scope_name
        # bucket is created, create scope with valid and invalid names
        valid_scope_name = ["MYSCPOE", "MY_SCOPE", "MY-Scope_27%", "Scope_With-Largename_81%Scope%", "8", "A", "a",
                            "aaa9999%"]
        for name in valid_scope_name:
            if self.use_rest:
                status = self.rest.create_scope(self.default_bucket_name, scope=name)
            elif self.use_cli:
                status = self.cli.create_scope(self.default_bucket_name, scope=name)

            if status is True:
                self.log.info("Scope creation passed, name={}".format(name))
            else:
                self.fail("Scope creation failed, name={}".format(name))

        invalid_scope_name = ["$scope", "%scope", "_myscope", "{scope", "s{[]/,.", "scope!@#^&*()", "!SCOPE"]
        for name in invalid_scope_name:
            if self.use_rest:
                status = self.rest.create_scope(scope=name, bucket=self.default_bucket_name)
            elif self.use_cli:
                status = self.cli.create_scope(scope=name, bucket=self.default_bucket_name)
            if status is True:
                self.fail("Scope creation passed for invalid name={}".format(name))
            else:
                self.log.info("Scope creation failed as expected for name={}".format(name))

    def test_valid_collection_name(self):
        # epengine.basic_collections.basic_collections.test_valid_collection_name
        # bucket is created, create scope and then chec for valid collection name.
        scope_name = "myscope"
        self.rest.create_scope(scope=scope_name, bucket=self.default_bucket_name)

        valid_collection_name = ["MYCOLLECTION", "MY_COLLECTION", "MY-Collection_27%", "CollectionsWithLargeNamechecki",
                                 "Colle_With-Largename_%Scope81%", "8", "A", "a", "aaa9999%"]
        for name in valid_collection_name:
            if self.use_rest:
                status = self.rest.create_collection(scope=scope_name, collection=name, bucket=self.default_bucket_name)
            elif self.use_cli:
                status = self.cli.create_collection(scope=scope_name, collection=name, bucket=self.default_bucket_name)
            if status is True:
                self.log.info("Collection creation passed, name={}".format(name))
            else:
                self.fail("Collection creation failed, name={}".format(name))

        invalid_collection_name = ["$collection", "%collection", "_mycollection", "{collection", "s{[]/,.",
                                   "collection!@#^&*()", "!COLLECTIONS"]
        for name in invalid_collection_name:
            try:
                if self.use_rest:
                    status = self.rest.create_collection(scope=scope_name, collection=name,
                                                         bucket=self.default_bucket_name)
                elif self.use_cli:
                    status = self.cli.create_collection(scope=scope_name, collection=name,
                                                        bucket=self.default_bucket_name)
                if status is True:
                    self.fail("Collection creation passed for invalid name={}".format(name))
                else:
                    self.log.info("Collection creation failed as expected for name={}".format(name))
            except:
                self.log.info("Collection creation failed as expected for name={}".format(name))

    def test_memecached_basic_api(self):
        # epengine.basic_collections.basic_collections.test_memecached_basic_api
        scope_name = "ScopeWith30CharactersinName123"
        Collection_name = "CollectionsWithLargeNamechecki"
        self.rest.create_scope(scope=scope_name)
        self.rest.create_collection(scope=scope_name, collection=Collection_name, bucket=self.default_bucket_name)

        collection = scope_name + "." + Collection_name
        self.log.info("collection name is {}".format(collection))

        self.sleep(10)

        # create memcached client
        mc = MemcachedClient(self.master.ip, 11210)
        mc.sasl_auth_plain(self.master.rest_username, self.master.rest_password)

        # enable collection and get collections
        mc.enable_collections()
        mc.bucket_select('default')
        # mc.hello(memcacheConstants.FEATURE_COLLECTIONS)
        mc.hello("set_collection")

        mc.get_collections(True)
        self.log.info("get collections completed")

        try:
            mc.set("key", 0, 0, "value", collection=collection)
            flag, keyx, value = mc.get(key="key", collection=collection)
            print("flag:{} keyx:{} value:{}".format(flag, keyx, value))

        except MemcachedError as exp:
            self.fail("Exception with setting and getting the key in collections {0}".format(exp))



    def test_delete_default_collection(self):
        # epengine.basic_collections.basic_collections.test_delete_default_collection
        if self.use_rest:
            status = self.rest.delete_collection(self.default_bucket_name, "_default", "_default")
        elif self.use_cli:
            status = self.cli.delete_collection(self.default_bucket_name, "_default", "_default")
        if status is True:
            self.log.info("default collection deleted")
        else:
            self.log.info("default collection delete failed")
Beispiel #11
0
 def test_opposite_address_family_is_blocked(self):
     services_in = []
     for service in self.services_in.split("-"):
         services_in.append(service.split(":")[0])
     # Validate before the test starts
     self._validate_ip_addrress_family()
     nodes_in = self.servers[self.nodes_init:]
     rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init], nodes_in, [],
                                              services=services_in)
     self.sleep(2)
     rest = RestConnection(self.master)
     reached = RestHelper(rest).rebalance_reached(percentage=30)
     if self.change_addr_family:
         if self.ipv4_only:
             cli = CouchbaseCLI(self.master, self.master.rest_username, self.master.rest_password)
             cli.setting_autofailover(0, 60)
             _, _, success = cli.set_ip_family("ipv6only")
             if not success:
                 self.fail("Unable to change ip-family to ipv6only")
             self.check_ip_family_enforcement(ip_family="ipv6_only")
             self.sleep(2)
             _, _, success = cli.set_ip_family("ipv4only")
             if not success:
                 self.fail("Unable to change ip-family to ipv4only")
             cli.setting_autofailover(1, 60)
             self.check_ip_family_enforcement(ip_family="ipv4_only")
         if self.ipv6_only:
             cli = CouchbaseCLI(self.master, self.master.rest_username, self.master.rest_password)
             cli.setting_autofailover(0, 60)
             _, _, success = cli.set_ip_family("ipv4only")
             if not success:
                 self.fail("Unable to change ip-family to ipv4only")
             self.check_ip_family_enforcement(ip_family="ipv4_only")
             self.sleep(2)
             _, _, success = cli.set_ip_family("ipv6only")
             if not success:
                 self.fail("Unable to change ip-family to ipv6only")
             cli.setting_autofailover(1, 60)
             self.check_ip_family_enforcement(ip_family="ipv6_only")
     self.assertTrue(reached, "rebalance failed, stuck or did not complete")
     # Validate during rebalance
     self._validate_ip_addrress_family()
     rebalance.result()
     self.sleep(20)
     # Validate post rebalance
     self._validate_ip_addrress_family()
     # Reboot the master node
     shell = RemoteMachineShellConnection(self.master)
     shell.reboot_node()
     self.sleep(180)
     # Validate post reboot
     self._validate_ip_addrress_family()
class Collections_CLI(object):
    def __init__(self, node):
        self.log = logger.Logger.get_logger()
        self.cli = CouchbaseCLI(node)

    def create_collection(self, bucket="default", scope="scope0", collection="mycollection0"):
        return self.cli.create_collection(bucket, scope, collection)

    def create_scope(self, bucket="default", scope="scope0"):
        return self.cli.create_scope(bucket, scope)

    def delete_collection(self, bucket="default", scope='_default', collection='_default'):
        return self.cli.delete_collection(bucket, scope, collection)

    def delete_scope(self, scope, bucket="default"):
        return self.cli.delete_scope(bucket, scope)

    def create_scope_collection(self, bucket, scope, collection):
        self.cli.create_scope(bucket, scope)
        self.cli.create_collection(bucket, scope, collection)

    def delete_scope_collection(self, bucket, scope, collection):
        self.cli.delete_collection(bucket, scope, collection)
        self.cli.delete_scope(bucket, scope)

    def get_bucket_scopes(self, bucket):
        return self.cli.get_bucket_scopes(bucket)

    def get_bucket_collections(self, bucket):
        return self.cli.get_bucket_collections(bucket)

    def get_scope_collections(self, bucket, scope):
        return self.cli.get_scope_collections(bucket, scope)
Beispiel #13
0
class basic_collections(BaseTestCase):
    def setUp(self):
        #super(basic_collections, self).setUp()
        self.log = logger.Logger.get_logger()
        self.input = TestInputSingleton.input
        self.default_bucket_name = self.input.param("default_bucket_name",
                                                    "default")
        self.servers = self.input.servers
        self.master = self.servers[0]
        self.use_rest = self.input.param("use_rest", True)
        self.use_cli = self.input.param("use_cli", False)
        self.rest = Collections_Rest(self.master)
        self.cli = CouchbaseCLI(self.master, self.master.rest_username,
                                self.master.rest_password)
        self.cli.enable_dp()
        RestConnection(self.master).delete_all_buckets()
        RestConnection(self.master).create_bucket(
            bucket=self.default_bucket_name, ramQuotaMB=256, proxyPort=11220)

    def tearDown(self):
        RestConnection(self.master).delete_all_buckets()

    def test_valid_scope_name(self):
        # epengine.basic_collections.basic_collections.test_valid_scope_name
        # bucket is created, create scope with valid and invalid names
        valid_scope_name = [
            "MYSCPOE", "MY_SCOPE", "MY-Scope_27%",
            "Scope_With-Largename_81%Scope%", "8", "A", "a", "aaa9999%"
        ]
        for name in valid_scope_name:
            if self.use_rest:
                status = self.rest.create_scope(self.default_bucket_name,
                                                scope=name)
            elif self.use_cli:
                status = self.cli.create_scope(self.default_bucket_name,
                                               scope=name)

            if status is True:
                self.log.info("Scope creation passed, name={}".format(name))
            else:
                self.fail("Scope creation failed, name={}".format(name))

        invalid_scope_name = [
            "$scope", "%scope", "_myscope", "{scope", "s{[]/,.",
            "scope!@#^&*()", "!SCOPE"
        ]
        for name in invalid_scope_name:
            if self.use_rest:
                status = self.rest.create_scope(
                    scope=name, bucket=self.default_bucket_name)
            elif self.use_cli:
                status = self.cli.create_scope(scope=name,
                                               bucket=self.default_bucket_name)
            if status is True:
                self.fail(
                    "Scope creation passed for invalid name={}".format(name))
            else:
                self.log.info(
                    "Scope creation failed as expected for name={}".format(
                        name))

    def test_valid_collection_name(self):
        # epengine.basic_collections.basic_collections.test_valid_collection_name
        # bucket is created, create scope and then chec for valid collection name.
        scope_name = "myscope"
        self.rest.create_scope(scope=scope_name,
                               bucket=self.default_bucket_name)

        valid_collection_name = [
            "MYCOLLECTION", "MY_COLLECTION", "MY-Collection_27%",
            "CollectionsWithLargeNamechecki", "Colle_With-Largename_%Scope81%",
            "8", "A", "a", "aaa9999%"
        ]
        for name in valid_collection_name:
            if self.use_rest:
                status = self.rest.create_collection(
                    scope=scope_name,
                    collection=name,
                    bucket=self.default_bucket_name)
            elif self.use_cli:
                status = self.cli.create_collection(
                    scope=scope_name,
                    collection=name,
                    bucket=self.default_bucket_name)
            if status is True:
                self.log.info(
                    "Collection creation passed, name={}".format(name))
            else:
                self.fail("Collection creation failed, name={}".format(name))

        invalid_collection_name = [
            "$collection", "%collection", "_mycollection", "{collection",
            "s{[]/,.", "collection!@#^&*()", "!COLLECTIONS"
        ]
        for name in invalid_collection_name:
            try:
                if self.use_rest:
                    status = self.rest.create_collection(
                        scope=scope_name,
                        collection=name,
                        bucket=self.default_bucket_name)
                elif self.use_cli:
                    status = self.cli.create_collection(
                        scope=scope_name,
                        collection=name,
                        bucket=self.default_bucket_name)
                if status is True:
                    self.fail("Collection creation passed for invalid name={}".
                              format(name))
                else:
                    self.log.info(
                        "Collection creation failed as expected for name={}".
                        format(name))
            except:
                self.log.info(
                    "Collection creation failed as expected for name={}".
                    format(name))

    def test_memecached_basic_api(self):
        # epengine.basic_collections.basic_collections.test_memecached_basic_api
        scope_name = "ScopeWith30CharactersinName123"
        Collection_name = "CollectionsWithLargeNamechecki"
        self.rest.create_scope(scope=scope_name)
        self.rest.create_collection(scope=scope_name,
                                    collection=Collection_name,
                                    bucket=self.default_bucket_name)

        collection = scope_name + "." + Collection_name
        self.log.info("collection name is {}".format(collection))

        self.sleep(10)

        # create memcached client
        mc = MemcachedClient(self.master.ip, 11210)
        mc.sasl_auth_plain(self.master.rest_username,
                           self.master.rest_password)

        # enable collection and get collections
        mc.enable_collections()
        mc.bucket_select('default')
        # mc.hello(memcacheConstants.FEATURE_COLLECTIONS)
        mc.hello("set_collection")

        mc.get_collections(True)
        self.log.info("get collections completed")

        try:
            mc.set("key", 0, 0, "value", collection=collection)
            flag, keyx, value = mc.get(key="key", collection=collection)
            print("flag:{} keyx:{} value:{}".format(flag, keyx, value))

        except MemcachedError as exp:
            self.fail(
                "Exception with setting and getting the key in collections {0}"
                .format(exp))

    def generate_docs_bigdata(self,
                              docs_per_day,
                              start=0,
                              document_size=1024000):
        json_generator = JsonGenerator()
        return json_generator.generate_docs_bigdata(end=docs_per_day,
                                                    start=start,
                                                    value_size=document_size)

    def test_load_collection(self):
        #epengine.basic_collections.basic_collections.test_load_collection,value_size=200,num_items=100,collection=True
        self.value_size = 200
        self.enable_bloom_filter = False
        self.buckets = RestConnection(self.master).get_buckets()
        self.active_resident_threshold = float(
            self.input.param("active_resident_threshold", 100))

        name = self.default_bucket_name + '0'
        self.collection_name[name] = []

        gen_create = BlobGenerator('eviction',
                                   'eviction-',
                                   self.value_size,
                                   end=100)

        self._load_all_buckets(self.master, gen_create, "create", 0)

        self._wait_for_stats_all_buckets(self.servers[:self.nodes_init])

        self._verify_all_buckets(self.master)
        self._verify_stats_all_buckets(self.servers[:self.nodes_init])

        # Add a node, rebalance and verify data
        self._load_all_buckets(self.master, gen_create, "delete", 0)
        self._verify_stats_all_buckets(self.servers[:self.nodes_init])

        self._load_all_buckets(self.master, gen_create, "create", 0)
        # Add a node, rebalance and verify data
        self._load_all_buckets(self.master, gen_create, "read", 0)
        self._verify_stats_all_buckets(self.servers[:self.nodes_init])

        self._load_all_buckets(self.master, gen_create, "update", 0)
        self._verify_stats_all_buckets(self.servers[:self.nodes_init])
        self._verify_all_buckets(self.master)

    def test_delete_default_collection(self):
        # epengine.basic_collections.basic_collections.test_delete_default_collection
        if self.use_rest:
            status = self.rest.delete_collection(self.default_bucket_name,
                                                 "_default", "_default")
        elif self.use_cli:
            status = self.cli.delete_collection(self.default_bucket_name,
                                                "_default", "_default")
        if status is True:
            self.log.info("default collection deleted")
        else:
            self.log.info("default collection delete failed")