def cluster_server_groups_write(self,username,password,host,port=8091, servers=None,cluster=None,httpCode=None,user_role=None):
        _cluster_server_groups_write = {
            "create_server_group":"pools/default/serverGroups;POST",
            "delete_server_group":"pools/default/serverGroups/<uuid>;DELETE",
            "edit_server_group":"pools/default/serverGroups;PUT",
            "edit_server_group_id":"pools/default/serverGroups/<uuid>;PUT"
        }

        rest = RestConnection(servers[0])
        try:
            rest.delete_zone('rbacNewGroup')
            rest.delete_zone('rbackGroup')
            rest.delete_zone('rbacNewGroupUpdated')

            create_server_group = {"create_server_group":"pools/default/serverGroups;POST;" + "{'name':'rbacGroup'}"}
            result = self._return_http_code(create_server_group,username,password,host=host,port=port, httpCode=httpCode, user_role=user_role)

            zones = rest.get_zone_names()
            create_server_group = {"delete_server_group":"pools/default/serverGroups/" + zones['rbacGroup'] + ";DELETE"}
            result = self._return_http_code(create_server_group,username,password,host=host,port=port, httpCode=httpCode, user_role=user_role)

            rest.add_zone('rbacNewGroup')
            zones = rest.get_zone_names()
            create_server_group = {"create_server_group":"pools/default/serverGroups/" + zones['rbacNewGroup'] + ";PUT;" + "{'name':'rbacNewGroupUpdated'}"}
            result = self._return_http_code(create_server_group,username,password,host=host,port=port, httpCode=httpCode, user_role=user_role)

        except:
            print "Issues with Server Group add test case"
Ejemplo n.º 2
0
 def test_delete_empty_defautl_zone(self):
     zone_name = "test1"
     default_zone = "Group 1"
     moved_node = []
     serverInfo = self.servers[0]
     moved_node.append(serverInfo.ip)
     rest = RestConnection(serverInfo)
     try:
         self.log.info("create zone {0}".format(zone_name))
         rest.add_zone(zone_name)
         if rest.is_zone_exist(zone_name):
             self.log.info("Move node {0} from zone {1} to zone {2}" \
                           .format(moved_node, default_zone, zone_name))
             status = rest.shuffle_nodes_in_zones(moved_node, default_zone,
                                                  zone_name)
             if status:
                 rest.delete_zone(default_zone)
             else:
                 self.fail("Failed to move node {0} from zone {1} to zone {2}" \
                           .format(moved_node, default_zone, zone_name))
             if not rest.is_zone_exist(default_zone):
                 self.log.info("successful delete default zone")
             else:
                 raise Exception("Failed to delete default zone")
         rest.rename_zone(zone_name, default_zone)
     except Exception as e:
         print(e)
Ejemplo n.º 3
0
 def shuffle_nodes_between_two_zones(self):
     """
     Creates 'Group 2' zone and shuffles nodes between
     Group 1 and Group 2 in an alternate manner ie;
     1st node in Group 1, 2nd node in Group 2, 3rd node in Group 1 and so on
     and finally rebalances the resulting cluster
     :return: nodes of 2nd zone
     """
     serverinfo = self.cluster.master
     rest = RestConnection(serverinfo)
     zones = ["Group 1", "Group 2"]
     rest.add_zone("Group 2")
     nodes_in_zone = {"Group 1": [serverinfo.ip], "Group 2": []}
     second_zone_servers = list()  # Keep track of second zone's nodes
     # Divide the nodes between zones.
     for i in range(1, len(self.nodes_in_cluster)):
         server_group = i % 2
         nodes_in_zone[zones[server_group]].append(
             self.nodes_in_cluster[i].ip)
         if zones[server_group] == "Group 2":
             second_zone_servers.append(self.nodes_in_cluster[i])
     # Shuffle the nodes
     node_in_zone = list(
         set(nodes_in_zone[zones[1]]) -
         set([node for node in rest.get_nodes_in_zone(zones[1])]))
     rest.shuffle_nodes_in_zones(node_in_zone, zones[0], zones[1])
     self.task.rebalance(self.nodes_in_cluster, [], [])
     return second_zone_servers
Ejemplo n.º 4
0
 def test_delete_empty_defautl_zone(self):
     zone_name ="test1"
     default_zone = "Group 1"
     moved_node = []
     serverInfo = self.servers[0]
     moved_node.append(serverInfo.ip)
     rest = RestConnection(serverInfo)
     try:
         self.log.info("create zone {0}".format(zone_name))
         rest.add_zone(zone_name)
         if rest.is_zone_exist(zone_name):
             self.log.info("Move node {0} from zone {1} to zone {2}" \
                           .format(moved_node, default_zone, zone_name))
             status = rest.shuffle_nodes_in_zones(moved_node, default_zone, zone_name)
             if status:
                 rest.delete_zone(default_zone)
             else:
                 self.fail("Failed to move node {0} from zone {1} to zone {2}" \
                           .format(moved_node, default_zone, zone_name))
             if not rest.is_zone_exist(default_zone):
                 self.log.info("successful delete default zone")
             else:
                 raise Exception("Failed to delete default zone")
         rest.rename_zone(zone_name, default_zone)
     except Exception,e :
         print e
Ejemplo n.º 5
0
 def shuffle_nodes_between_zones_and_rebalance(self, to_remove=None):
     """
     Shuffle the nodes present in the cluster if zone > 1.
     Rebalance the nodes in the end.
     Nodes are divided into groups iteratively. i.e: 1st node in Group 1,
     2nd in Group 2, 3rd in Group 1 & so on, when zone=2
     :param to_remove: List of nodes to be removed.
     """
     if not to_remove:
         to_remove = []
     serverinfo = self.servers[0]
     rest = RestConnection(serverinfo)
     zones = ["Group 1"]
     nodes_in_zone = {"Group 1": [serverinfo.ip]}
     # Create zones, if not existing, based on params zone in test.
     # Shuffle the nodes between zones.
     if int(self.zone) > 1:
         for i in range(1, int(self.zone)):
             a = "Group "
             zones.append(a + str(i + 1))
             if not rest.is_zone_exist(zones[i]):
                 rest.add_zone(zones[i])
             nodes_in_zone[zones[i]] = []
         # Divide the nodes between zones.
         nodes_in_cluster = \
             [node.ip for node in self.cluster_util.get_nodes_in_cluster(
                 self.cluster)]
         nodes_to_remove = [node.ip for node in to_remove]
         for i in range(1, len(self.servers)):
             if self.servers[i].ip in nodes_in_cluster \
                     and self.servers[i].ip not in nodes_to_remove:
                 server_group = i % int(self.zone)
                 nodes_in_zone[zones[server_group]].append(
                     self.servers[i].ip)
         # Shuffle the nodesS
         for i in range(1, self.zone):
             node_in_zone = list(
                 set(nodes_in_zone[zones[i]]) -
                 set([node for node in rest.get_nodes_in_zone(zones[i])]))
             rest.shuffle_nodes_in_zones(node_in_zone, zones[0], zones[i])
     otpnodes = [node.id for node in rest.node_statuses()]
     nodes_to_remove = [
         node.id for node in rest.node_statuses()
         if node.ip in [t.ip for t in to_remove]
     ]
     # Start rebalance and monitor it.
     started = rest.rebalance(otpNodes=otpnodes,
                              ejectedNodes=nodes_to_remove)
     if started:
         result = rest.monitorRebalance()
         self.assertTrue(result, msg="Rebalance failed{}".format(result))
         msg = "successfully rebalanced cluster {0}"
         self.log.info(msg.format(result))
     # Verify replicas of one node should not be in the same zone
     # as active vbuckets of the node.
     if self.zone > 1:
         self.cluster_util.verify_replica_distribution_in_zones(
             self.cluster, nodes_in_zone)
Ejemplo n.º 6
0
 def test_create_zone_with_lower_case_name(self):
     zone_name = "allwithlowercaseeeeeee"
     serverInfo = self.servers[0]
     rest = RestConnection(serverInfo)
     try:
         self.log.info("create zone {0}".format(zone_name))
         rest.add_zone(zone_name)
     except Exception, e:
         print e
Ejemplo n.º 7
0
 def test_create_zone_with_upper_case_name(self):
     zone_name = "ALLWITHUPTERCASE"
     serverInfo = self.servers[0]
     rest = RestConnection(serverInfo)
     try:
         self.log.info("create zone {0}".format(zone_name))
         rest.add_zone(zone_name)
     except Exception, e:
         print e
Ejemplo n.º 8
0
 def test_create_zone_with_all_number_name(self):
     zone_name = "3223345557666760"
     serverInfo = self.servers[0]
     rest = RestConnection(serverInfo)
     try:
         self.log.info("create zone {0}".format(zone_name))
         rest.add_zone(zone_name)
     except Exception, e:
         print e
Ejemplo n.º 9
0
 def test_create_zone_with_all_number_name(self):
     zone_name = "3223345557666760"
     serverInfo = self.servers[0]
     rest = RestConnection(serverInfo)
     try:
         self.log.info("create zone {0}".format(zone_name))
         rest.add_zone(zone_name)
     except Exception,e :
         print e
Ejemplo n.º 10
0
 def test_create_second_default_zone(self):
     zone_name = "Group 1"
     serverInfo = self.servers[0]
     rest = RestConnection(serverInfo)
     try:
         self.log.info("create additional default zone")
         rest.add_zone(zone_name)
     except Exception as e:
         print(e)
Ejemplo n.º 11
0
 def test_create_zone_with_upper_lower_number_and_space_name(self):
     zone_name = " AAAB BBCCC aakkk kmmm3 456 72 "
     serverInfo = self.servers[0]
     rest = RestConnection(serverInfo)
     try:
         self.log.info("create zone {0}".format(zone_name))
         rest.add_zone(zone_name)
     except Exception,e :
         print e
Ejemplo n.º 12
0
 def test_create_second_default_zone(self):
     zone_name = "Group 1"
     serverInfo = self.servers[0]
     rest = RestConnection(serverInfo)
     try:
         self.log.info("create additional default zone")
         rest.add_zone(zone_name)
     except Exception,e :
         print e
Ejemplo n.º 13
0
 def test_create_zone_with_upper_case_name(self):
     zone_name = "ALLWITHUPTERCASE"
     serverInfo = self.servers[0]
     rest = RestConnection(serverInfo)
     try:
         self.log.info("create zone {0}".format(zone_name))
         rest.add_zone(zone_name)
     except Exception,e :
         print e
Ejemplo n.º 14
0
 def test_create_zone_with_lower_case_name(self):
     zone_name = "allwithlowercaseeeeeee"
     serverInfo = self.servers[0]
     rest = RestConnection(serverInfo)
     try:
         self.log.info("create zone {0}".format(zone_name))
         rest.add_zone(zone_name)
     except Exception,e :
         print e
Ejemplo n.º 15
0
 def test_create_zone_with_upper_lower_number_and_space_name(self):
     zone_name = " AAAB BBCCC aakkk kmmm3 456 72 "
     serverInfo = self.cluster.servers[0]
     rest = RestConnection(serverInfo)
     try:
         self.log.info("create zone {0}".format(zone_name))
         rest.add_zone(zone_name)
     except Exception, e:
         print e
Ejemplo n.º 16
0
 def test_create_zone_with_none_ascii_name(self):
     # zone name is limited to 64 bytes
     zone_name = "abcdGHIJKLMNOPQRSTUVWXYZ0123456789efghijklmnopqrstuvwyABCDEF_-.%"
     serverInfo = self.servers[0]
     rest = RestConnection(serverInfo)
     try:
         self.log.info("create zone {0}".format(zone_name))
         rest.add_zone(zone_name)
     except Exception, e:
         print e
Ejemplo n.º 17
0
 def test_create_zone_with_none_ascii_name(self):
     # zone name is limited to 64 bytes
     zone_name = "abcdGHIJKLMNOPQRSTUVWXYZ0123456789efghijklmnopqrstuvwyABCDEF_-.%"
     serverInfo = self.servers[0]
     rest = RestConnection(serverInfo)
     try:
         self.log.info("create zone {0}".format(zone_name))
         rest.add_zone(zone_name)
     except Exception,e :
         print e
Ejemplo n.º 18
0
 def shuffle_nodes_between_zones_and_rebalance(self, to_remove=None):
     """
     Shuffle the nodes present in the cluster if zone > 1. Rebalance the nodes in the end.
     Nodes are divided into groups iteratively i.e. 1st node in Group 1, 2nd in Group 2, 3rd in Group 1 and so on, when
     zone=2.
     :param to_remove: List of nodes to be removed.
     """
     if not to_remove:
         to_remove = []
     serverinfo = self.orchestrator
     rest = RestConnection(serverinfo)
     zones = ["Group 1"]
     nodes_in_zone = {"Group 1": [serverinfo.ip]}
     # Create zones, if not existing, based on params zone in test.
     # Shuffle the nodes between zones.
     if int(self.zone) > 1:
         for i in range(1, int(self.zone)):
             a = "Group "
             zones.append(a + str(i + 1))
             if not rest.is_zone_exist(zones[i]):
                 rest.add_zone(zones[i])
             nodes_in_zone[zones[i]] = []
         # Divide the nodes between zones.
         nodes_in_cluster = [
             node.ip for node in self.get_nodes_in_cluster()
         ]
         nodes_to_remove = [node.ip for node in to_remove]
         for i in range(1, len(self.servers)):
             if self.servers[i].ip in nodes_in_cluster and self.servers[
                     i].ip not in nodes_to_remove:
                 server_group = i % int(self.zone)
                 nodes_in_zone[zones[server_group]].append(
                     self.servers[i].ip)
         # Shuffle the nodesS
         for i in range(1, self.zone):
             node_in_zone = list(
                 set(nodes_in_zone[zones[i]]) -
                 {node
                  for node in rest.get_nodes_in_zone(zones[i])})
             rest.shuffle_nodes_in_zones(node_in_zone, zones[0], zones[i])
     self.zones = nodes_in_zone
     otpnodes = [node.id for node in rest.node_statuses()]
     nodes_to_remove = [
         node.id for node in rest.node_statuses()
         if node.ip in [t.ip for t in to_remove]
     ]
     # Start rebalance and monitor it.
     started = rest.rebalance(otpNodes=otpnodes,
                              ejectedNodes=nodes_to_remove)
     if started:
         result = rest.monitorRebalance()
         msg = "successfully rebalanced cluster {0}"
         self.log.info(msg.format(result))
Ejemplo n.º 19
0
    def test_zone_enable_after_upgrade_from_ce_to_ee(self):
        params = {}
        params['product'] = self.product
        params['version'] = self.version
        params['vbuckets'] = [self.vbuckets]
        params['type'] = self.type
        """ install couchbasse server community edition to run the test """
        InstallerJob().parallel_install(self.servers[:3], params)

        params["type"] = "enterprise"
        zone_name = "AAABBBCCCaakkkkmmm345672"
        serverInfo = self.servers[0]
        ini_servers = self.servers[:self.nodes_init]
        rest = RestConnection(serverInfo)
        self.user = serverInfo.rest_username
        self.password = serverInfo.rest_password
        if len(ini_servers) > 1:
            self.cluster.rebalance([ini_servers[0]], ini_servers[1:], [])
        rest = RestConnection(self.master)
        self._bucket_creation()

        """ verify all nodes in cluster in CE """
        if rest.is_enterprise_edition():
            raise Exception("This test needs couchbase server community edition to run")

        self._load_all_buckets(self.servers[0], self.gen_load, "create", 0)
        try:
            self.log.info("create zone {0}".format(zone_name))
            result = rest.add_zone(zone_name)
            if result:
                raise Exception("Zone feature should not be available in CE version")
        except Exception,e :
            if "Failed" in e:
                pass
Ejemplo n.º 20
0
    def test_zone_enable_after_upgrade_from_ce_to_ee(self):
        params = {}
        params['product'] = self.product
        params['version'] = self.version
        params['vbuckets'] = [self.vbuckets]
        params['type'] = self.type
        """ install couchbasse server community edition to run the test """
        InstallerJob().parallel_install(self.servers[:3], params)

        params["type"] = "enterprise"
        zone_name = "AAABBBCCCaakkkkmmm345672"
        serverInfo = self.servers[0]
        ini_servers = self.servers[:self.nodes_init]
        rest = RestConnection(serverInfo)
        self.user = serverInfo.rest_username
        self.password = serverInfo.rest_password
        if len(ini_servers) > 1:
            self.cluster.rebalance([ini_servers[0]], ini_servers[1:], [])
        rest = RestConnection(self.master)
        self._bucket_creation()
        """ verify all nodes in cluster in CE """
        if rest.is_enterprise_edition():
            raise Exception(
                "This test needs couchbase server community edition to run")

        self._load_all_buckets(self.servers[0], self.gen_load, "create", 0)
        try:
            self.log.info("create zone {0}".format(zone_name))
            result = rest.add_zone(zone_name)
            if result:
                raise Exception(
                    "Zone feature should not be available in CE version")
        except Exception, e:
            if "Failed" in e:
                pass
Ejemplo n.º 21
0
 def shuffle_nodes_between_zones_and_rebalance(self, to_remove=None):
     """
     Shuffle the nodes present in the cluster if zone > 1. Rebalance the nodes in the end.
     Nodes are divided into groups iteratively i.e. 1st node in Group 1, 2nd in Group 2, 3rd in Group 1 and so on, when
     zone=2.
     :param to_remove: List of nodes to be removed.
     """
     if not to_remove:
         to_remove = []
     serverinfo = self.servers[0]
     rest = RestConnection(serverinfo)
     zones = ["Group 1"]
     nodes_in_zone = {"Group 1": [serverinfo.ip]}
     # Create zones, if not existing, based on params zone in test.
     # Shuffle the nodes between zones.
     if int(self.zone) > 1:
         for i in range(1, int(self.zone)):
             a = "Group "
             zones.append(a + str(i + 1))
             if not rest.is_zone_exist(zones[i]):
                 rest.add_zone(zones[i])
             nodes_in_zone[zones[i]] = []
         # Divide the nodes between zones.
         nodes_in_cluster = [node.ip for node in self.get_nodes_in_cluster()]
         nodes_to_remove = [node.ip for node in to_remove]
         for i in range(1, len(self.servers)):
             if self.servers[i].ip in nodes_in_cluster and self.servers[i].ip not in nodes_to_remove:
                 server_group = i % int(self.zone)
                 nodes_in_zone[zones[server_group]].append(self.servers[i].ip)
         # Shuffle the nodesS
         for i in range(1, self.zone):
             node_in_zone = list(set(nodes_in_zone[zones[i]]) -
                                 set([node for node in rest.get_nodes_in_zone(zones[i])]))
             rest.shuffle_nodes_in_zones(node_in_zone, zones[0], zones[i])
     otpnodes = [node.id for node in rest.node_statuses()]
     nodes_to_remove = [node.id for node in rest.node_statuses() if node.ip in [t.ip for t in to_remove]]
     # Start rebalance and monitor it.
     started = rest.rebalance(otpNodes=otpnodes, ejectedNodes=nodes_to_remove)
     if started:
         result = rest.monitorRebalance()
         msg = "successfully rebalanced cluster {0}"
         self.log.info(msg.format(result))
     # Verify replicas of one node should not be in the same zone as active vbuckets of the node.
     if self.zone > 1:
         self._verify_replica_distribution_in_zones(nodes_in_zone)
Ejemplo n.º 22
0
                 if result:
                     raise Exception(\
                      "Zone feature should not be available in CE version")
             except Exception, e:
                 if "Failed" in e:
                     pass
 serverInfo = self.servers[1]
 rest = RestConnection(serverInfo)
 self.user = serverInfo.rest_username
 self.password = serverInfo.rest_password
 if not rest.is_enterprise_edition():
     raise Exception("Test failed to upgrade cluster from CE to EE")
 self.log.info("try to create zone {0} "
               "when cluster {1} is completely EE".format(
                   zone_name, serverInfo.ip))
 result = rest.add_zone(zone_name)
 self.log.info("sleep  5 seconds")
 time.sleep(5)
 if result:
     self.log.info("Zone feature is available in this cluster")
 else:
     raise Exception("Could not create zone with name: %s in cluster.  "
                     "It's a bug" % zone_name)
 if rest.is_zone_exist(zone_name.strip()):
     self.log.info("verified! zone '{0}' is existed".format(
         zone_name.strip()))
 else:
     raise Exception("There is not zone with name: %s in cluster.  "
                     "It's a bug" % zone_name)
 """ re-install enterprise edition for next test if there is any """
 InstallerJob().parallel_install([self.servers[0]], params)
Ejemplo n.º 23
0
class CommunityTests(CommunityBaseTest):
    def setUp(self):
        super(CommunityTests, self).setUp()
        self.command = self.input.param("command", "")
        self.zone = self.input.param("zone", 1)
        self.replica = self.input.param("replica", 1)
        self.command_options = self.input.param("command_options", '')
        self.set_get_ratio = self.input.param("set_get_ratio", 0.9)
        self.item_size = self.input.param("item_size", 128)
        self.shutdown_zone = self.input.param("shutdown_zone", 1)
        self.do_verify = self.input.param("do-verify", True)
        self.num_node = self.input.param("num_node", 4)
        self.services = self.input.param("services", None)
        self.start_node_services = self.input.param("start_node_services",
                                                    "kv")
        self.add_node_services = self.input.param("add_node_services", "kv")
        self.timeout = 6000
        self.user_add = self.input.param("user_add", None)
        self.user_role = self.input.param("user_role", None)

    def tearDown(self):
        super(CommunityTests, self).tearDown()

    def test_disabled_zone(self):
        disabled_zone = False
        zone_name = "group1"
        serverInfo = self.servers[0]
        self.rest = RestConnection(serverInfo)
        try:
            self.log.info("create zone name 'group1'!")
            result = self.rest.add_zone(zone_name)
            print("result  ", result)
        except Exception as e:
            if e:
                print(e)
                disabled_zone = True
                pass
        if not disabled_zone:
            self.fail("CE version should not have zone feature")

    def check_audit_available(self):
        audit_available = False
        try:
            self.rest.getAuditSettings()
            audit_available = True
        except Exception as e:
            if e:
                print(e)
        if audit_available:
            self.fail("This feature 'audit' only available on "
                      "Enterprise Edition")

    def check_ldap_available(self):
        ldap_available = False
        self.rest = RestConnection(self.master)
        try:
            s, c, h = self.rest.clearLDAPSettings()
            if s:
                ldap_available = True
        except Exception as e:
            if e:
                print(e)
        if ldap_available:
            self.fail("This feature 'ldap' only available on "
                      "Enterprise Edition")

    def check_set_services(self):
        self.rest.force_eject_node()
        self.sleep(7, "wait for node reset done")
        try:
            status = self.rest.init_node_services(hostname=self.master.ip,
                                                  services=[self.services])
        except Exception as e:
            if e:
                print(e)
        if self.services == "kv":
            if status:
                self.log.info("CE could set {0} only service.".format(
                    self.services))
            else:
                self.fail("Failed to set {0} only service.".format(
                    self.services))
        elif self.services == "index,kv":
            if status:
                self.fail("CE does not support kv and index on same node")
            else:
                self.log.info("services enforced in CE")
        elif self.services == "kv,n1ql":
            if status:
                self.fail("CE does not support kv and n1ql on same node")
            else:
                self.log.info("services enforced in CE")
        elif self.services == "kv,eventing":
            if status:
                self.fail("CE does not support kv and eventing on same node")
            else:
                self.log.info("services enforced in CE")
        elif self.services == "index,n1ql":
            if status:
                self.fail("CE does not support index and n1ql on same node")
            else:
                self.log.info("services enforced in CE")
        elif self.services == "index,kv,n1ql":
            if status:
                self.log.info(
                    "CE could set all services {0} on same nodes.".format(
                        self.services))
            else:
                self.fail("Failed to set kv, index and query services on CE")
        elif self.version[:5] in COUCHBASE_FROM_WATSON:
            if self.version[:
                            5] in COUCHBASE_FROM_VULCAN and "eventing" in self.services:
                if status:
                    self.fail("CE does not support eventing in vulcan")
                else:
                    self.log.info("services enforced in CE")
            elif self.services == "fts,index,kv":
                if status:
                    self.fail(
                        "CE does not support fts, index and kv on same node")
                else:
                    self.log.info("services enforced in CE")
            elif self.services == "fts,index,n1ql":
                if status:
                    self.fail(
                        "CE does not support fts, index and n1ql on same node")
                else:
                    self.log.info("services enforced in CE")
            elif self.services == "fts,kv,n1ql":
                if status:
                    self.fail(
                        "CE does not support fts, kv and n1ql on same node")
                else:
                    self.log.info("services enforced in CE")
            elif self.services == "fts,index,kv,n1ql":
                if status:
                    self.log.info(
                        "CE could set all services {0} on same nodes.".format(
                            self.services))
                else:
                    self.fail("Failed to set "
                              "fts, index, kv, and query services on CE")
        else:
            self.fail("some services don't support")

    def check_set_services_when_add_node(self):
        self.rest.force_eject_node()
        sherlock_services_in_ce = ["kv", "index,kv,n1ql"]
        watson_services_in_ce = ["kv", "index,kv,n1ql", "fts,index,kv,n1ql"]
        self.sleep(5, "wait for node reset done")
        try:
            self.log.info("Initialize node with services {0}".format(
                self.start_node_services))
            status = self.rest.init_node_services(
                hostname=self.master.ip, services=[self.start_node_services])
            init_node = self.cluster.async_init_node(
                self.master, services=[self.start_node_services])
        except Exception as e:
            if e:
                print(e)
        if not status:
            if self.version not in COUCHBASE_FROM_WATSON and \
                         self.start_node_services not in sherlock_services_in_ce:
                self.log.info(
                    "initial services setting enforced in Sherlock CE")
            elif self.version in COUCHBASE_FROM_WATSON and \
                         self.start_node_services not in watson_services_in_ce:
                self.log.info("initial services setting enforced in Watson CE")

        elif status and init_node.result() != 0:
            add_node = False
            try:
                self.log.info("node with services {0} try to add".format(
                    self.add_node_services))
                add_node = self.cluster.rebalance(
                    self.servers[:2],
                    self.servers[1:2], [],
                    services=[self.add_node_services])
            except Exception:
                pass
            if add_node:
                self.get_services_map()
                list_nodes = self.get_nodes_from_services_map(
                    get_all_nodes=True)
                map = self.get_nodes_services()
                if map[self.master.ip] == self.start_node_services and \
                    map[self.servers[1].ip] == self.add_node_services:
                    self.log.info(
                        "services set correctly when node added & rebalance")
                else:
                    self.fail("services set incorrectly when node added & rebalance. "
                        "cluster expected services: {0}; set cluster services {1} ."
                        "add node expected srv: {2}; set add node srv {3}"\
                        .format(map[self.master.ip], self.start_node_services, \
                         map[self.servers[1].ip], self.add_node_services))
            else:
                if self.version not in COUCHBASE_FROM_WATSON:
                    if self.start_node_services in ["kv", "index,kv,n1ql"] and \
                          self.add_node_services not in ["kv", "index,kv,n1ql"]:
                        self.log.info("services are enforced in CE")
                    elif self.start_node_services not in [
                            "kv", "index,kv,n1ql"
                    ]:
                        self.log.info("services are enforced in CE")
                    else:
                        self.fail("maybe bug in add node")
                elif self.version in COUCHBASE_FROM_WATSON:
                    if self.start_node_services in ["kv", "index,kv,n1ql",
                         "fts,index,kv,n1ql"] and self.add_node_services not in \
                                    ["kv", "index,kv,n1ql", "fts,index,kv,n1ql"]:
                        self.log.info("services are enforced in CE")
                    elif self.start_node_services not in [
                            "kv", "index,kv,n1ql", "fts,index,kv,n1ql"
                    ]:
                        self.log.info("services are enforced in CE")
                    else:
                        self.fail("maybe bug in add node")
        else:
            self.fail("maybe bug in node initialization")

    def check_full_backup_only(self):
        """ for windows vm, ask IT to put uniq.exe at
            /cygdrive/c/Program Files (x86)/ICW/bin directory """

        self.remote = RemoteMachineShellConnection(self.master)
        """ put params items=0 in test param so that init items = 0 """
        self.remote.execute_command("{0}cbworkloadgen -n {1}:8091 -j -i 1000 " \
                                    "-u Administrator -p password" \
                                            .format(self.bin_path, self.master.ip))
        """ delete backup location before run backup """
        self.remote.execute_command("rm -rf {0}*".format(self.backup_location))
        output, error = self.remote.execute_command("ls -lh {0}".format(
            self.backup_location))
        self.remote.log_command_output(output, error)
        """ first full backup """
        self.remote.execute_command("{0}cbbackup http://{1}:8091 {2} -m full " \
                                    "-u Administrator -p password"\
                                    .format(self.bin_path,
                                            self.master.ip,
                                            self.backup_c_location))
        output, error = self.remote.execute_command("ls -lh {0}*/".format(
            self.backup_location))
        self.remote.log_command_output(output, error)
        output, error = self.remote.execute_command("{0}cbtransfer -u Administrator "\
                                           "-p password {1}*/*-full/ " \
                                           "stdout: | grep set | uniq | wc -l"\
                                           .format(self.bin_path,
                                                   self.backup_c_location))
        self.remote.log_command_output(output, error)
        if int(output[0]) != 1000:
            self.fail("full backup did not work in CE. "
                      "Expected 1000, actual: {0}".format(output[0]))
        self.remote.execute_command("{0}cbworkloadgen -n {1}:8091 -j -i 1000 "\
                                    " -u Administrator -p password --prefix=t_"
                                    .format(self.bin_path, self.master.ip))
        """ do different backup mode """
        self.remote.execute_command("{0}cbbackup -u Administrator -p password "\
                                    "http://{1}:8091 {2} -m {3}"\
                                    .format(self.bin_path,
                                            self.master.ip,
                                            self.backup_c_location,
                                            self.backup_option))
        output, error = self.remote.execute_command("ls -lh {0}".format(
            self.backup_location))
        self.remote.log_command_output(output, error)
        output, error = self.remote.execute_command("{0}cbtransfer -u Administrator "\
                                           "-p password {1}*/*-{2}/ stdout: "\
                                           "| grep set | uniq | wc -l"\
                                           .format(self.bin_path,
                                                   self.backup_c_location,
                                                   self.backup_option))
        self.remote.log_command_output(output, error)
        if int(output[0]) == 2000:
            self.log.info("backup option 'diff' is enforced in CE")
        elif int(output[0]) == 1000:
            self.fail("backup option 'diff' is not enforced in CE. "
                      "Expected 2000, actual: {0}".format(output[0]))
        else:
            self.fail("backup failed to backup correct items")
        self.remote.disconnect()

    def check_ent_backup(self):
        """ for CE version from Watson, cbbackupmgr exe file should not in bin """
        command = "cbbackupmgr"
        self.remote = RemoteMachineShellConnection(self.master)
        self.log.info("check if {0} in {1} directory".format(
            command, self.bin_path))
        found = self.remote.file_exists(self.bin_path, command)
        if found:
            self.log.info("found {0} in {1} directory".format(
                command, self.bin_path))
            self.fail("CE from Watson should not contain {0}".format(command))
        elif not found:
            self.log.info("Ent. backup in CE is enforced, not in bin!")
        self.remote.disconnect()

    def check_memory_optimized_storage_mode(self):
        """ from Watson, CE should not have option 'memory_optimized' to set """
        self.rest.force_eject_node()
        self.sleep(5, "wait for node reset done")
        try:
            self.log.info("Initialize node with 'Memory Optimized' option")
            status = self.rest.set_indexer_storage_mode(
                username=self.input.membase_settings.rest_username,
                password=self.input.membase_settings.rest_password,
                storageMode='memory_optimized')
        except Exception as ex:
            if ex:
                print(ex)
        if not status:
            self.log.info("Memory Optimized setting enforced in CE "
                          "Could not set memory_optimized option")
        else:
            self.fail("Memory Optimzed setting does not enforced in CE "
                      "We could set this option in")

    def check_x509_cert(self):
        """ from Watson, X509 certificate only support in EE """
        api = self.rest.baseUrl + "pools/default/certificate?extended=true"
        self.log.info("request to get certificate at "
                      "'pools/default/certificate?extended=true' "
                      "should return False")
        try:
            status, content, header = self.rest._http_request(api, 'GET')
        except Exception as ex:
            if ex:
                print(ex)
        if status:
            self.fail("This X509 certificate feature only available in EE")
        elif not status:
            if "requires enterprise edition" in content:
                self.log.info("X509 cert is enforced in CE")

    def check_roles_base_access(self):
        """ from Watson, roles base access for admin should not in in CE """
        if self.user_add is None:
            self.fail(
                "We need to pass user name (user_add) to run this test. ")
        if self.user_role is None:
            self.fail(
                "We need to pass user roles (user_role) to run this test. ")
        api = self.rest.baseUrl + "settings/rbac/users/" + self.user_add
        self.log.info("url to run this test: %s" % api)
        """ add admin user """
        param = "name=%s&roles=%s" % (self.user_add, self.user_role)
        try:
            status, content, header = self.rest._http_request(
                api, 'PUT', param)
        except Exception as ex:
            if ex:
                print(ex)
        if status:
            self.fail("CE should not allow to add admin users")
        else:
            self.log.info("roles base is enforced in CE! ")

    def check_root_certificate(self):
        """ from watson, ce should not see root certificate
            manual test:
            curl -u Administrator:password -X GET
                            http://localhost:8091/pools/default/certificate """
        api = self.rest.baseUrl + "pools/default/certificate"
        try:
            status, content, header = self.rest._http_request(api, 'GET')
        except Exception as ex:
            if ex:
                print(ex)
        if status:
            self.fail("CE should not see root certificate!")
        elif "requires enterprise edition" in content:
            self.log.info("root certificate is enforced in CE! ")

    def check_settings_audit(self):
        """ from watson, ce should not set audit
            manual test:
            curl -u Administrator:password -X GET
                            http://localhost:8091/settings/audit """
        api = self.rest.baseUrl + "settings/audit"
        try:
            status, content, header = self.rest._http_request(api, 'GET')
        except Exception as ex:
            if ex:
                print(ex)
        if status:
            self.fail("CE should not allow to set audit !")
        elif "requires enterprise edition" in content:
            self.log.info("settings audit is enforced in CE! ")

    def check_infer(self):
        """ from watson, ce should not see infer
            manual test:
            curl -H "Content-Type: application/json" -X POST
                 -d '{"statement":"infer `bucket_name`;"}'
                       http://localhost:8093/query/service
            test params: new_services=kv-index-n1ql,default_bucket=False """
        self.rest.force_eject_node()
        self.sleep(7, "wait for node reset done")
        self.rest.init_node()
        bucket = "default"
        self.rest.create_bucket(bucket, ramQuotaMB=200)
        api = self.rest.query_baseUrl + "query/service"
        param = urllib.parse.urlencode({"statement": "infer `%s` ;" % bucket})
        try:
            status, content, header = self.rest._http_request(
                api, 'POST', param)
            json_parsed = json.loads(content)
        except Exception as ex:
            if ex:
                print(ex)
        if json_parsed["status"] == "success":
            self.fail("CE should not allow to run INFER !")
        elif json_parsed["status"] == "fatal":
            self.log.info("INFER is enforced in CE! ")

    def check_auto_complete(self):
        """ this feature has not complete to block in CE """

    """ Check new features from spock start here """

    def check_cbbackupmgr(self):
        """ cbbackupmgr should not available in CE from spock """
        if self.cb_version[:5] in COUCHBASE_FROM_SPOCK:
            file_name = "cbbackupmgr" + self.file_extension
            self.log.info("check if cbbackupmgr in bin dir in CE")
            result = self.remote.file_exists(self.bin_path, file_name)
            if result:
                self.fail("cbbackupmgr should not in bin dir of CE")
            else:
                self.log.info("cbbackupmgr is enforced in CE")
        self.remote.disconnect()

    def test_max_ttl_bucket(self):
        """
            From vulcan, EE bucket has has an option to set --max-ttl, not it CE.
            This test is make sure CE could not create bucket with option --max-ttl
            This test must pass default_bucket=False
        """
        if self.cb_version[:5] not in COUCHBASE_FROM_VULCAN:
            self.log.info("This test only for vulcan and later")
            return
        cmd = 'curl -X POST -u Administrator:password \
                                    http://{0}:8091/pools/default/buckets \
                                 -d name=bucket0 \
                                 -d maxTTL=100 \
                                 -d authType=sasl \
                                 -d ramQuotaMB=100 '.format(self.master.ip)
        if self.cli_test:
            cmd = "{0}couchbase-cli bucket-create -c {1}:8091 --username Administrator \
                --password password --bucket bucket0 --bucket-type couchbase \
                --bucket-ramsize 512 --bucket-replica 1 --bucket-priority high \
                --bucket-eviction-policy fullEviction --enable-flush 0 \
                --enable-index-replica 1 --max-ttl 200".format(
                self.bin_path, self.master.ip)
        conn = RemoteMachineShellConnection(self.master)
        output, error = conn.execute_command(cmd)
        conn.log_command_output(output, error)
        mesg = "Max TTL is supported in enterprise edition only"
        if self.cli_test:
            mesg = "Maximum TTL can only be configured on enterprise edition"
        if output and mesg not in str(output[0]):
            self.fail("max ttl feature should not in Community Edition")
        buckets = RestConnection(self.master).get_buckets()
        if buckets:
            for bucket in buckets:
                self.log.info("bucekt in cluser: {0}".format(bucket.name))
                if bucket.name == "bucket0":
                    self.fail("Failed to enforce feature max ttl in CE.")
        conn.disconnect()

    def test_setting_audit(self):
        """
           CE does not allow to set audit from vulcan 5.5.0
        """
        if self.cb_version[:5] not in COUCHBASE_FROM_VULCAN:
            self.log.info("This test only for vulcan and later")
            return
        cmd = 'curl -X POST -u Administrator:password \
              http://{0}:8091/settings/audit \
              -d auditdEnabled=true '.format(self.master.ip)
        if self.cli_test:
            cmd = "{0}couchbase-cli setting-audit -c {1}:8091 -u Administrator \
                -p password --audit-enabled 1 --audit-log-rotate-interval 604800 \
                --audit-log-path /opt/couchbase/var/lib/couchbase/logs "\
                .format(self.bin_path, self.master.ip)

        conn = RemoteMachineShellConnection(self.master)
        output, error = conn.execute_command(cmd)
        conn.log_command_output(output, error)
        mesg = "This http API endpoint requires enterprise edition"
        if output and mesg not in str(output[0]):
            self.fail("setting-audit feature should not in Community Edition")
        conn.disconnect()

    def test_setting_autofailover_enterprise_only(self):
        """
           CE does not allow set auto failover if disk has issue
           and failover group from vulcan 5.5.0
        """
        if self.cb_version[:5] not in COUCHBASE_FROM_VULCAN:
            self.log.info("This test only for vulcan and later")
            return
        self.failover_disk_period = self.input.param("failover_disk_period",
                                                     False)
        self.failover_server_group = self.input.param("failover_server_group",
                                                      False)

        failover_disk_period = ""
        if self.failover_disk_period:
            if self.cli_test:
                failover_disk_period = "--failover-data-disk-period 300"
            else:
                failover_disk_period = "-d failoverOnDataDiskIssues[timePeriod]=300"
        failover_server_group = ""
        if self.failover_server_group and self.cli_test:
            failover_server_group = "--enable-failover-of-server-group 1"

        cmd = 'curl -X POST -u Administrator:password \
              http://{0}:8091/settings/autoFailover -d enabled=true -d timeout=120 \
              -d maxCount=1 \
              -d failoverOnDataDiskIssues[enabled]=true {1} \
              -d failoverServerGroup={2}'.format(self.master.ip,
                                                 failover_disk_period,
                                                 self.failover_server_group)
        if self.cli_test:
            cmd = "{0}couchbase-cli setting-autofailover -c {1}:8091 \
                   -u Administrator -p password \
                   --enable-failover-on-data-disk-issues 1 {2} {3} "\
                  .format(self.bin_path, self.master.ip,
                          failover_disk_period,
                          failover_server_group)
        conn = RemoteMachineShellConnection(self.master)
        output, error = conn.execute_command(cmd)
        conn.log_command_output(output, error)
        mesg = "Auto failover on Data Service disk issues can only be " + \
               "configured on enterprise edition"
        if not self.cli_test:
            if self.failover_disk_period or \
                                   self.failover_server_group:
                if output and not error:
                    self.fail("setting autofailover disk issues feature\
                               should not in Community Edition")
        else:
            if self.failover_server_group:
                mesg = "--enable-failover-of-server-groups can only be " + \
                       "configured on enterprise edition"

        if output and mesg not in str(output[0]):
            self.fail("Setting EE autofailover features \
                       should not in Community Edition")
        else:
            self.log.info("EE setting autofailover are disable in CE")
        conn.disconnect()

    def test_set_bucket_compression(self):
        """
           CE does not allow to set bucket compression to bucket
           from vulcan 5.5.0.   Mode compression: off,active,passive
           Note: must set defaultbucket=False for this test
        """
        if self.cb_version[:5] not in COUCHBASE_FROM_VULCAN:
            self.log.info("This test only for vulcan and later")
            return
        self.compression_mode = self.input.param("compression_mode", "off")
        cmd = 'curl -X POST -u Administrator:password \
                                    http://{0}:8091/pools/default/buckets \
                                 -d name=bucket0 \
                                 -d compressionMode={1} \
                                 -d authType=sasl \
                                 -d ramQuotaMB=100 '.format(
            self.master.ip, self.compression_mode)
        if self.cli_test:
            cmd = "{0}couchbase-cli bucket-create -c {1}:8091 --username Administrator \
                --password password --bucket bucket0 --bucket-type couchbase \
                --bucket-ramsize 512 --bucket-replica 1 --bucket-priority high \
                --bucket-eviction-policy fullEviction --enable-flush 0 \
                --enable-index-replica 1 --compression-mode {2}".format(
                self.bin_path, self.master.ip, self.compression_mode)
        conn = RemoteMachineShellConnection(self.master)
        output, error = conn.execute_command(cmd)
        conn.log_command_output(output, error)
        mesg = "Compression mode is supported in enterprise edition only"
        if self.cli_test:
            mesg = "Compression mode can only be configured on enterprise edition"
        if output and mesg not in str(output[0]):
            self.fail("Setting bucket compression should not in CE")
        conn.disconnect()
Ejemplo n.º 24
0
    def test_zone_enable_after_upgrade_from_ce_to_ee(self):
        self.services = self.input.param("services", "kv")
        params = {}
        params['product'] = self.product
        params['version'] = self.version
        params['vbuckets'] = [self.vbuckets]
        params['type'] = self.type
        """ install couchbasse server community edition to run the test """
        InstallerJob().parallel_install(self.servers[:3], params)

        params["type"] = "enterprise"
        zone_name = "AAABBBCCCaakkkkmmm345672"
        serverInfo = self.servers[0]
        ini_servers = self.servers[:self.nodes_init]
        rest = RestConnection(serverInfo)
        self.user = serverInfo.rest_username
        self.password = serverInfo.rest_password
        if len(ini_servers) > 1:
            self.cluster.rebalance([ini_servers[0]], ini_servers[1:], [],\
                                                 services = self.services)
        rest = RestConnection(self.master)
        self._bucket_creation()
        """ verify all nodes in cluster in CE """
        if rest.is_enterprise_edition():
            raise Exception(
                "This test needs couchbase server community edition to run")

        self._load_all_buckets(self.servers[0], self.gen_load, "create", 0)
        try:
            self.log.info("create zone {0}".format(zone_name))
            result = rest.add_zone(zone_name)
            if result:
                raise Exception(
                    "Zone feature should not be available in CE version")
        except Exception as e:
            if "Failed" in e:
                pass

        for i in range(1, int(self.nodes_init) + 1):
            if i == 1:
                """ install EE on one node to do swap rebalance """
                InstallerJob().parallel_install(self.servers[3:], params)
                self.cluster.rebalance([ini_servers[0]], \
                                       [self.servers[int(self.nodes_init)]],\
                                       [self.servers[int(self.nodes_init) - i]],
                                                       services = self.services)
                self.log.info("sleep  5 seconds")
                time.sleep(5)
                try:
                    self.log.info(
                        "try to create zone {0} "
                        "when cluster is not completely EE".format(zone_name))
                    result = rest.add_zone(zone_name)
                    if result:
                        raise Exception(\
                            "Zone feature should not be available in CE version")
                except Exception as e:
                    if "Failed" in e:
                        pass
            else:
                InstallerJob().parallel_install([self.servers[int(self.nodes_init)\
                                                               - (i - 1)]], params)
                self.cluster.rebalance([ini_servers[0]],\
                                    [self.servers[int(self.nodes_init) - (i -1)]],
                                         [self.servers[int(self.nodes_init) - i]],
                                                         services = self.services)
                self.sleep(12, "wait 12 seconds after rebalance")
                if i < int(self.nodes_init):
                    try:
                        self.log.info(
                            "try to create zone {0} "
                            "when cluster is not completely EE".format(
                                zone_name))
                        result = rest.add_zone(zone_name)
                        if result:
                            raise Exception(\
                             "Zone feature should not be available in CE version")
                    except Exception as e:
                        if "Failed" in e:
                            pass
        serverInfo = self.servers[1]
        rest = RestConnection(serverInfo)
        self.user = serverInfo.rest_username
        self.password = serverInfo.rest_password
        if not rest.is_enterprise_edition():
            raise Exception("Test failed to upgrade cluster from CE to EE")
        self.log.info("try to create zone {0} "
                      "when cluster {1} is completely EE".format(
                          zone_name, serverInfo.ip))
        result = rest.add_zone(zone_name)
        self.log.info("sleep  5 seconds")
        time.sleep(5)
        if result:
            self.log.info("Zone feature is available in this cluster")
        else:
            raise Exception("Could not create zone with name: %s in cluster.  "
                            "It's a bug" % zone_name)
        if rest.is_zone_exist(zone_name.strip()):
            self.log.info("verified! zone '{0}' is existed".format(
                zone_name.strip()))
        else:
            raise Exception("There is not zone with name: %s in cluster.  "
                            "It's a bug" % zone_name)
        """ re-install enterprise edition for next test if there is any """
        InstallerJob().parallel_install([self.servers[0]], params)
        """ reset master node to new node to teardown cluster """
        self.log.info("Start to clean up cluster")
        self.master = self.servers[1]
        self.servers = self.servers[1:]
Ejemplo n.º 25
0
    def test_replica_distribution_in_zone(self):
        if len(self.servers) < int(self.num_node):
            msg = "This test needs minimum {1} servers to run.\n  Currently in ini file \
                   has only {0} servers".format(len(self.servers),
                                                self.num_node)
            self.log.error("{0}".format(msg))
            raise Exception(msg)
        if self.shutdown_zone >= self.zone:
            msg = "shutdown zone should smaller than zone"
            raise Exception(msg)
        serverInfo = self.servers[0]
        rest = RestConnection(serverInfo)
        zones = []
        zones.append("Group 1")
        nodes_in_zone = {}
        nodes_in_zone["Group 1"] = [serverInfo.ip]
        """ Create zone base on params zone in test"""
        try:
            if int(self.zone) > 1:
                for i in range(1, int(self.zone)):
                    a = "Group "
                    zone_name = a + str(i + 1)
                    zones.append(zone_name)
                    rest.add_zone(zone_name)
            servers_rebalanced = []
            self.user = serverInfo.rest_username
            self.password = serverInfo.rest_password
            if len(self.servers) % int(self.zone) != 0:
                msg = "unbalance zone.  Recaculate to make balance ratio node/zone"
                raise Exception(msg)
            """ Add node to each zone """
            k = 1
            for i in range(0, self.zone):
                if "Group 1" in zones[i]:
                    total_node_per_zone = int(len(self.servers)) // int(
                        self.zone) - 1
                else:
                    nodes_in_zone[zones[i]] = []
                    total_node_per_zone = int(len(self.servers)) // int(
                        self.zone)
                for n in range(0, total_node_per_zone):
                    nodes_in_zone[zones[i]].append(self.servers[k].ip)
                    rest.add_node(user=self.user, password=self.password, \
                        remoteIp=self.servers[k].ip, port='8091', zone_name=zones[i])
                    k += 1
            otpNodes = [node.id for node in rest.node_statuses()]
            """ Start rebalance and monitor it. """
            started = rest.rebalance(otpNodes, [])

            if started:
                try:
                    result = rest.monitorRebalance()
                except RebalanceFailedException as e:
                    self.log.error("rebalance failed: {0}".format(e))
                    return False, servers_rebalanced
                msg = "successfully rebalanced cluster {0}"
                self.log.info(msg.format(result))
            """ Verify replica of one node should not in same zone of active. """
            self._verify_replica_distribution_in_zones(nodes_in_zone, "tap")
            """ Simulate entire nodes down in zone(s) by killing erlang process"""
            shutdown_nodes = []
            if self.shutdown_zone >= 1 and self.zone >= 2:
                self.log.info("Start to shutdown nodes in zone to failover")
                for down_zone in range(1, self.shutdown_zone + 1):
                    down_zone = "Group " + str(down_zone + 1)
                    for sv in nodes_in_zone[down_zone]:
                        for si in self.servers:
                            if si.ip == sv:
                                server = si
                                shutdown_nodes.append(si)

                        shell = RemoteMachineShellConnection(server)
                        shell.kill_erlang(self.os_name)
                        """ Failover down node(s)"""
                        self.log.info("----> start failover node %s" %
                                      server.ip)
                        failed_over = rest.fail_over("ns_1@" + server.ip)
                        if not failed_over:
                            self.log.info(
                                "unable to failover the node the first time. \
                                           try again in 75 seconds..")
                            time.sleep(75)
                            failed_over = rest.fail_over("ns_1@" + server.ip)
                        self.assertTrue(
                            failed_over,
                            "unable to failover node after erlang was killed")
            otpNodes = [node.id for node in rest.node_statuses()]
            self.log.info("----> start rebalance after failover.")
            """ Start rebalance and monitor it. """
            started = rest.rebalance(otpNodes, [])
            if started:
                try:
                    result = rest.monitorRebalance()
                except RebalanceFailedException as e:
                    self.log.error("rebalance failed: {0}".format(e))
                    return False, servers_rebalanced
                msg = "successfully rebalanced in selected nodes from the cluster ? {0}"
                self.log.info(msg.format(result))
            """ Compare current keys in bucekt with initial loaded keys count. """
            self._verify_total_keys(self.servers[0], self.num_items)
        except Exception as e:
            self.log.error(e)
            raise
        finally:
            self.log.info("---> remove all nodes in all zones")
            if shutdown_nodes:
                for node in shutdown_nodes:
                    conn = RemoteMachineShellConnection(node)
                    self.log.info(
                        "---> re-start nodes %s after erlang killed " %
                        node.ip)
                    conn.start_couchbase()
                    time.sleep(5)
            BucketOperationHelper.delete_all_buckets_or_assert(
                self.servers, self)
            ClusterOperationHelper.cleanup_cluster(self.servers,
                                                   master=self.master)
            self.log.info("---> remove all zones in cluster")
            rm_zones = rest.get_zone_names()
            for zone in rm_zones:
                if zone != "Group 1":
                    rest.delete_zone(zone)
class AutoRetryFailedRebalance(RebalanceBaseTest):
    def setUp(self):
        super(AutoRetryFailedRebalance, self).setUp()
        self.rest = RestConnection(self.servers[0])
        self.sleep_time = self.input.param("sleep_time", 15)
        self.enabled = self.input.param("enabled", True)
        self.afterTimePeriod = self.input.param("afterTimePeriod", 300)
        self.maxAttempts = self.input.param("maxAttempts", 1)
        self.log.info("Changing the retry rebalance settings ....")
        self.change_retry_rebalance_settings(
            enabled=self.enabled,
            afterTimePeriod=self.afterTimePeriod,
            maxAttempts=self.maxAttempts)
        self.rebalance_operation = self.input.param("rebalance_operation",
                                                    "rebalance_out")
        self.disable_auto_failover = self.input.param("disable_auto_failover",
                                                      True)
        self.auto_failover_timeout = self.input.param("auto_failover_timeout",
                                                      120)
        if self.disable_auto_failover:
            self.rest.update_autofailover_settings(False, 120)
        else:
            self.rest.update_autofailover_settings(True,
                                                   self.auto_failover_timeout)

    def tearDown(self):
        self.reset_retry_rebalance_settings()
        # Reset to default value
        super(AutoRetryFailedRebalance, self).tearDown()
        rest = RestConnection(self.servers[0])
        zones = rest.get_zone_names()
        for zone in zones:
            if zone != "Group 1":
                rest.delete_zone(zone)

    def test_auto_retry_of_failed_rebalance_where_failure_happens_before_rebalance(
            self):
        before_rebalance_failure = self.input.param("before_rebalance_failure",
                                                    "stop_server")
        # induce the failure before the rebalance starts
        self._induce_error(before_rebalance_failure)
        self.sleep(self.sleep_time)
        try:
            operation = self._rebalance_operation(self.rebalance_operation)
            operation.result()
        except Exception as e:
            self.log.info("Rebalance failed with : {0}".format(str(e)))
            # Recover from the error
            self._recover_from_error(before_rebalance_failure)
            self.check_retry_rebalance_succeeded()
        else:
            self.fail(
                "Rebalance did not fail as expected. Hence could not validate auto-retry feature.."
            )
        finally:
            if self.disable_auto_failover:
                self.rest.update_autofailover_settings(True, 120)
            self.start_server(self.servers[1])
            self.stop_firewall_on_node(self.servers[1])

    def test_auto_retry_of_failed_rebalance_where_failure_happens_during_rebalance(
            self):
        during_rebalance_failure = self.input.param("during_rebalance_failure",
                                                    "stop_server")
        try:
            operation = self._rebalance_operation(self.rebalance_operation)
            self.sleep(self.sleep_time)
            # induce the failure during the rebalance
            self._induce_error(during_rebalance_failure)
            operation.result()
        except Exception as e:
            self.log.info("Rebalance failed with : {0}".format(str(e)))
            # Recover from the error
            self._recover_from_error(during_rebalance_failure)
            self.check_retry_rebalance_succeeded()
        else:
            # This is added as the failover task is not throwing exception
            if self.rebalance_operation == "graceful_failover":
                # Recover from the error
                self._recover_from_error(during_rebalance_failure)
                self.check_retry_rebalance_succeeded()
            else:
                self.fail(
                    "Rebalance did not fail as expected. Hence could not validate auto-retry feature.."
                )
        finally:
            if self.disable_auto_failover:
                self.rest.update_autofailover_settings(True, 120)
            self.start_server(self.servers[1])
            self.stop_firewall_on_node(self.servers[1])

    def test_auto_retry_of_failed_rebalance_does_not_get_triggered_when_rebalance_is_stopped(
            self):
        operation = self._rebalance_operation(self.rebalance_operation)
        reached = RestHelper(self.rest).rebalance_reached(30)
        self.assertTrue(reached,
                        "Rebalance failed or did not reach {0}%".format(30))
        self.rest.stop_rebalance(wait_timeout=self.sleep_time)
        result = json.loads(self.rest.get_pending_rebalance_info())
        self.log.info(result)
        retry_rebalance = result["retry_rebalance"]
        if retry_rebalance != "not_pending":
            self.fail(
                "Auto-retry succeeded even when Rebalance was stopped by user")

    def test_negative_auto_retry_of_failed_rebalance_where_rebalance_will_be_cancelled(
            self):
        during_rebalance_failure = self.input.param("during_rebalance_failure",
                                                    "stop_server")
        post_failure_operation = self.input.param("post_failure_operation",
                                                  "cancel_pending_rebalance")
        try:
            operation = self._rebalance_operation(self.rebalance_operation)
            self.sleep(self.sleep_time)
            # induce the failure during the rebalance
            self._induce_error(during_rebalance_failure)
            operation.result()
        except Exception as e:
            self.log.info("Rebalance failed with : {0}".format(str(e)))
            # Recover from the error
            self._recover_from_error(during_rebalance_failure)
            result = json.loads(self.rest.get_pending_rebalance_info())
            self.log.info(result)
            retry_rebalance = result["retry_rebalance"]
            rebalance_id = result["rebalance_id"]
            if retry_rebalance != "pending":
                self.fail("Auto-retry of failed rebalance is not triggered")
            if post_failure_operation == "cancel_pending_rebalance":
                # cancel pending rebalance
                self.log.info(
                    "Cancelling rebalance Id: {0}".format(rebalance_id))
                self.rest.cancel_pending_rebalance(rebalance_id)
            elif post_failure_operation == "disable_auto_retry":
                # disable the auto retry of the failed rebalance
                self.log.info(
                    "Disable the the auto retry of the failed rebalance")
                self.change_retry_rebalance_settings(enabled=False)
            elif post_failure_operation == "retry_failed_rebalance_manually":
                # retry failed rebalance manually
                self.log.info(
                    "Retrying failed rebalance Id: {0}".format(rebalance_id))
                self.cluster.rebalance(self.servers[:self.nodes_init], [], [])
            else:
                self.fail("Invalid post_failure_operation option")
            # Now check and ensure retry won't happen
            result = json.loads(self.rest.get_pending_rebalance_info())
            self.log.info(result)
            retry_rebalance = result["retry_rebalance"]
            if retry_rebalance != "not_pending":
                self.fail("Auto-retry of failed rebalance is not cancelled")
        else:
            self.fail(
                "Rebalance did not fail as expected. Hence could not validate auto-retry feature.."
            )
        finally:
            if self.disable_auto_failover:
                self.rest.update_autofailover_settings(True, 120)
            self.start_server(self.servers[1])
            self.stop_firewall_on_node(self.servers[1])

    def test_negative_auto_retry_of_failed_rebalance_where_rebalance_will_not_be_cancelled(
            self):
        during_rebalance_failure = self.input.param("during_rebalance_failure",
                                                    "stop_server")
        post_failure_operation = self.input.param("post_failure_operation",
                                                  "create_delete_buckets")
        zone_name = "Group_{0}_{1}".format(random.randint(1, 1000000000),
                                           self._testMethodName)
        zone_name = zone_name[0:60]
        default_zone = "Group 1"
        moved_node = []
        moved_node.append(self.servers[1].ip)
        try:
            operation = self._rebalance_operation(self.rebalance_operation)
            self.sleep(self.sleep_time)
            # induce the failure during the rebalance
            self._induce_error(during_rebalance_failure)
            operation.result()
        except Exception as e:
            self.log.info("Rebalance failed with : {0}".format(str(e)))
            # Recover from the error
            self._recover_from_error(during_rebalance_failure)
            result = json.loads(self.rest.get_pending_rebalance_info())
            self.log.info(result)
            retry_rebalance = result["retry_rebalance"]
            if retry_rebalance != "pending":
                self.fail("Auto-retry of failed rebalance is not triggered")
            if post_failure_operation == "create_delete_buckets":
                # delete buckets and create new one
                BucketOperationHelper.delete_all_buckets_or_assert(
                    servers=self.servers, test_case=self)
                self.sleep(self.sleep_time)
                BucketOperationHelper.create_bucket(self.master,
                                                    test_case=self)
            elif post_failure_operation == "change_replica_count":
                # change replica count
                self.log.info("Changing replica count of buckets")
                for bucket in self.buckets:
                    self.rest.change_bucket_props(bucket, replicaNumber=2)
            elif post_failure_operation == "change_server_group":
                # change server group
                self.log.info("Creating new zone " + zone_name)
                self.rest.add_zone(zone_name)
                self.log.info("Moving {0} to new zone {1}".format(
                    moved_node, zone_name))
                status = self.rest.shuffle_nodes_in_zones(
                    moved_node, default_zone, zone_name)
            else:
                self.fail("Invalid post_failure_operation option")
            # In these failure scenarios while the retry is pending, then the retry will be attempted but fail
            try:
                self.check_retry_rebalance_succeeded()
            except Exception as e:
                self.log.info(e)
                if "Retrying of rebalance still did not help. All the retries exhausted" not in str(
                        e):
                    self.fail(
                        "Auto retry of failed rebalance succeeded when it was expected to fail"
                    )
        else:
            self.fail(
                "Rebalance did not fail as expected. Hence could not validate auto-retry feature.."
            )
        finally:
            if post_failure_operation == "change_server_group":
                status = self.rest.shuffle_nodes_in_zones(
                    moved_node, zone_name, default_zone)
                self.log.info(
                    "Shuffle the node back to default group . Status : {0}".
                    format(status))
                self.sleep(self.sleep_time)
                self.log.info("Deleting new zone " + zone_name)
                try:
                    self.rest.delete_zone(zone_name)
                except:
                    self.log.info("Errors in deleting zone")
            if self.disable_auto_failover:
                self.rest.update_autofailover_settings(True, 120)
            self.start_server(self.servers[1])
            self.stop_firewall_on_node(self.servers[1])

    def test_auto_retry_of_failed_rebalance_with_rebalance_test_conditions(
            self):
        test_failure_condition = self.input.param("test_failure_condition")
        # induce the failure before the rebalance starts
        self._induce_rebalance_test_condition(test_failure_condition)
        self.sleep(self.sleep_time)
        try:
            operation = self._rebalance_operation(self.rebalance_operation)
            operation.result()
        except Exception as e:
            self.log.info("Rebalance failed with : {0}".format(str(e)))
            # Delete the rebalance test condition so that we recover from the error
            self._delete_rebalance_test_condition(test_failure_condition)
            self.check_retry_rebalance_succeeded()
        else:
            self.fail(
                "Rebalance did not fail as expected. Hence could not validate auto-retry feature.."
            )
        finally:
            if self.disable_auto_failover:
                self.rest.update_autofailover_settings(True, 120)
            self._delete_rebalance_test_condition(test_failure_condition)

    def test_auto_retry_of_failed_rebalance_with_autofailvoer_enabled(self):
        before_rebalance_failure = self.input.param("before_rebalance_failure",
                                                    "stop_server")
        # induce the failure before the rebalance starts
        self._induce_error(before_rebalance_failure)
        try:
            operation = self._rebalance_operation(self.rebalance_operation)
            operation.result()
        except Exception as e:
            self.log.info("Rebalance failed with : {0}".format(str(e)))
            if self.auto_failover_timeout < self.afterTimePeriod:
                self.sleep(self.auto_failover_timeout)
                result = json.loads(self.rest.get_pending_rebalance_info())
                self.log.info(result)
                retry_rebalance = result["retry_rebalance"]
                if retry_rebalance != "not_pending":
                    self.fail(
                        "Auto-failover did not cancel pending retry of the failed rebalance"
                    )
            else:
                try:
                    self.check_retry_rebalance_succeeded()
                except Exception as e:
                    if "Retrying of rebalance still did not help" not in str(
                            e):
                        self.fail(
                            "retry rebalance succeeded even without failover")
                    self.sleep(self.auto_failover_timeout)
                    self.cluster.rebalance(self.servers[:self.nodes_init], [],
                                           [])
        else:
            self.fail(
                "Rebalance did not fail as expected. Hence could not validate auto-retry feature.."
            )
        finally:
            if self.disable_auto_failover:
                self.rest.update_autofailover_settings(True, 120)
            self.start_server(self.servers[1])
            self.stop_firewall_on_node(self.servers[1])

    def _rebalance_operation(self, rebalance_operation):
        self.log.info("Starting rebalance operation of type : {0}".format(
            rebalance_operation))
        if rebalance_operation == "rebalance_out":
            operation = self.cluster.async_rebalance(
                self.servers[:self.nodes_init], [], self.servers[1:])
        elif rebalance_operation == "rebalance_in":
            operation = self.cluster.async_rebalance(
                self.servers[:self.nodes_init],
                [self.servers[self.nodes_init]], [])
        elif rebalance_operation == "swap_rebalance":
            self.rest.add_node(self.master.rest_username,
                               self.master.rest_password,
                               self.servers[self.nodes_init].ip,
                               self.servers[self.nodes_init].port)
            operation = self.cluster.async_rebalance(
                self.servers[:self.nodes_init], [],
                [self.servers[self.nodes_init - 1]])
        elif rebalance_operation == "graceful_failover":
            # TODO : retry for graceful failover is not yet implemented
            operation = self.cluster.async_failover(
                [self.master],
                failover_nodes=[self.servers[1]],
                graceful=True,
                wait_for_pending=120)
        return operation

    def _induce_error(self, error_condition):
        if error_condition == "stop_server":
            self.stop_server(self.servers[1])
        elif error_condition == "enable_firewall":
            self.start_firewall_on_node(self.servers[1])
        elif error_condition == "kill_memcached":
            self.kill_server_memcached(self.servers[1])
        elif error_condition == "reboot_server":
            shell = RemoteMachineShellConnection(self.servers[1])
            shell.reboot_node()
        elif error_condition == "kill_erlang":
            shell = RemoteMachineShellConnection(self.servers[1])
            shell.kill_erlang()
            self.sleep(self.sleep_time * 3)
        else:
            self.fail("Invalid error induce option")

    def _recover_from_error(self, error_condition):
        if error_condition == "stop_server" or error_condition == "kill_erlang":
            self.start_server(self.servers[1])
            self.sleep(self.sleep_time * 4)
        elif error_condition == "enable_firewall":
            self.stop_firewall_on_node(self.servers[1])
        elif error_condition == "reboot_server":
            self.sleep(self.sleep_time * 4)
            # wait till node is ready after warmup
            ClusterOperationHelper.wait_for_ns_servers_or_assert(
                [self.servers[1]], self, wait_if_warmup=True)

    def _induce_rebalance_test_condition(self, test_failure_condition):
        if test_failure_condition == "verify_replication":
            set_command = "testconditions:set(verify_replication, {fail, \"" + "default" + "\"})"
        elif test_failure_condition == "backfill_done":
            set_command = "testconditions:set(backfill_done, {for_vb_move, \"" + "default\", 1 , " + "fail})"
        else:
            set_command = "testconditions:set({0}, fail)".format(
                test_failure_condition)
        get_command = "testconditions:get({0})".format(test_failure_condition)
        for server in self.servers:
            rest = RestConnection(server)
            _, content = rest.diag_eval(set_command)
            self.log.info("Command : {0} Return : {1}".format(
                set_command, content))

        for server in self.servers:
            rest = RestConnection(server)
            _, content = rest.diag_eval(get_command)
            self.log.info("Command : {0} Return : {1}".format(
                get_command, content))

    def _delete_rebalance_test_condition(self, test_failure_condition):
        delete_command = "testconditions:delete({0})".format(
            test_failure_condition)
        get_command = "testconditions:get({0})".format(test_failure_condition)
        for server in self.servers:
            rest = RestConnection(server)
            _, content = rest.diag_eval(delete_command)
            self.log.info("Command : {0} Return : {1}".format(
                delete_command, content))

        for server in self.servers:
            rest = RestConnection(server)
            _, content = rest.diag_eval(get_command)
            self.log.info("Command : {0} Return : {1}".format(
                get_command, content))
Ejemplo n.º 27
0
    def test_replica_distribution_in_zone(self):
        if len(self.servers) < int(self.num_node):
            msg = "This test needs minimum {1} servers to run.\n  Currently in ini file \
                   has only {0} servers".format(len(self.servers), self.num_node)
            self.log.error("{0}".format(msg))
            raise Exception(msg)
        if self.shutdown_zone >= self.zone:
            msg = "shutdown zone should smaller than zone"
            raise Exception(msg)
        serverInfo = self.servers[0]
        rest = RestConnection(serverInfo)
        zones = []
        zones.append("Group 1")
        nodes_in_zone = {}
        nodes_in_zone["Group 1"] = [serverInfo.ip]
        """ Create zone base on params zone in test"""
        if int(self.zone) > 1:
            for i in range(1,int(self.zone)):
                a = "Group "
                zones.append(a + str(i + 1))
                rest.add_zone(a + str(i + 1))
        servers_rebalanced = []
        self.user = serverInfo.rest_username
        self.password = serverInfo.rest_password
        if len(self.servers)%int(self.zone) != 0:
            msg = "unbalance zone.  Recaculate to make balance ratio node/zone"
            raise Exception(msg)
        """ Add node to each zone """
        k = 1
        for i in range(0, self.zone):
            if "Group 1" in zones[i]:
                total_node_per_zone = int(len(self.servers))/int(self.zone) - 1
            else:
                nodes_in_zone[zones[i]] = []
                total_node_per_zone = int(len(self.servers))/int(self.zone)
            for n in range(0,total_node_per_zone):
                nodes_in_zone[zones[i]].append(self.servers[k].ip)
                rest.add_node(user=self.user, password=self.password, \
                    remoteIp=self.servers[k].ip, port='8091', zone_name=zones[i])
                k += 1
        otpNodes = [node.id for node in rest.node_statuses()]
        """ Start rebalance and monitor it. """
        started = rest.rebalance(otpNodes, [])

        if started:
            try:
                result = rest.monitorRebalance()
            except RebalanceFailedException as e:
                self.log.error("rebalance failed: {0}".format(e))
                return False, servers_rebalanced
            msg = "successfully rebalanced cluster {0}"
            self.log.info(msg.format(result))
        """ Verify replica of one node should not in same zone of active. """
        self._verify_replica_distribution_in_zones(nodes_in_zone, "tap")

        """ Simulate entire nodes down in zone(s) by killing erlang process"""
        if self.shutdown_zone >= 1 and self.zone >=2:
            self.log.info("Start to shutdown nodes in zone to failover")
            for down_zone in range(1, self.shutdown_zone + 1):
                down_zone = "Group " + str(down_zone + 1)
                for sv in nodes_in_zone[down_zone]:
                    for si in self.servers:
                        if si.ip == sv:
                            server = si

                    shell = RemoteMachineShellConnection(server)
                    os_info = shell.extract_remote_info()
                    shell.kill_erlang(os_info)
                    """ Failover down node(s)"""
                    failed_over = rest.fail_over("ns_1@" + server.ip)
                    if not failed_over:
                        self.log.info("unable to failover the node the first time. \
                                       try again in 75 seconds..")
                        time.sleep(75)
                        failed_over = rest.fail_over("ns_1@" + server.ip)
                    self.assertTrue(failed_over, "unable to failover node after erlang killed")
        otpNodes = [node.id for node in rest.node_statuses()]
        self.log.info("start rebalance after failover.")
        """ Start rebalance and monitor it. """
        started = rest.rebalance(otpNodes, [])
        if started:
            try:
                result = rest.monitorRebalance()
            except RebalanceFailedException as e:
                self.log.error("rebalance failed: {0}".format(e))
                return False, servers_rebalanced
            msg = "successfully rebalanced in selected nodes from the cluster ? {0}"
            self.log.info(msg.format(result))
        """ Compare current keys in bucekt with initial loaded keys count. """
        self._verify_total_keys(self.servers[0], self.num_items)
Ejemplo n.º 28
0
class CommunityTests(CommunityBaseTest):
    def setUp(self):
        super(CommunityTests, self).setUp()
        self.command = self.input.param("command", "")
        self.zone = self.input.param("zone", 1)
        self.replica = self.input.param("replica", 1)
        self.command_options = self.input.param("command_options", '')
        self.set_get_ratio = self.input.param("set_get_ratio", 0.9)
        self.item_size = self.input.param("item_size", 128)
        self.shutdown_zone = self.input.param("shutdown_zone", 1)
        self.do_verify = self.input.param("do-verify", True)
        self.num_node = self.input.param("num_node", 4)
        self.services = self.input.param("services", None)
        self.start_node_services = self.input.param("start_node_services",
                                                    "kv")
        self.add_node_services = self.input.param("add_node_services", "kv")
        self.timeout = 6000
        self.user_add = self.input.param("user_add", None)
        self.user_role = self.input.param("user_role", None)

    def tearDown(self):
        super(CommunityTests, self).tearDown()

    def test_disabled_zone(self):
        disabled_zone = False
        zone_name = "group1"
        serverInfo = self.servers[0]
        self.rest = RestConnection(serverInfo)
        try:
            self.log.info("create zone name 'group1'!")
            result = self.rest.add_zone(zone_name)
            print("result  ", result)
        except Exception as e:
            if e:
                print(e)
                disabled_zone = True
                pass
        if not disabled_zone:
            self.fail("CE version should not have zone feature")

    def check_audit_available(self):
        audit_available = False
        try:
            self.rest.getAuditSettings()
            audit_available = True
        except Exception as e:
            if e:
                print(e)
        if audit_available:
            self.fail("This feature 'audit' only available on "
                      "Enterprise Edition")

    def check_ldap_available(self):
        ldap_available = False
        self.rest = RestConnection(self.master)
        try:
            s, c, h = self.rest.clearLDAPSettings()
            if s:
                ldap_available = True
        except Exception as e:
            if e:
                print(e)
        if ldap_available:
            self.fail("This feature 'ldap' only available on "
                      "Enterprise Edition")

    def check_set_services(self):
        self.rest.force_eject_node()
        self.sleep(7, "wait for node reset done")
        try:
            status = self.rest.init_node_services(hostname=self.master.ip,
                                                  services=[self.services])
        except Exception as e:
            if e:
                print(e)
        if self.services == "kv":
            if status:
                self.log.info("CE could set {0} only service.".format(
                    self.services))
            else:
                self.fail("Failed to set {0} only service.".format(
                    self.services))
        elif self.services == "index,kv":
            if status:
                self.fail("CE does not support kv and index on same node")
            else:
                self.log.info("services enforced in CE")
        elif self.services == "kv,n1ql":
            if status:
                self.fail("CE does not support kv and n1ql on same node")
            else:
                self.log.info("services enforced in CE")
        elif self.services == "kv,eventing":
            if status:
                self.fail("CE does not support kv and eventing on same node")
            else:
                self.log.info("services enforced in CE")
        elif self.services == "index,n1ql":
            if status:
                self.fail("CE does not support index and n1ql on same node")
            else:
                self.log.info("services enforced in CE")
        elif self.services == "index,kv,n1ql":
            if status:
                self.log.info(
                    "CE could set all services {0} on same nodes.".format(
                        self.services))
            else:
                self.fail("Failed to set kv, index and query services on CE")
        elif self.version[:5] in COUCHBASE_FROM_WATSON:
            if self.version[:
                            5] in COUCHBASE_FROM_VULCAN and "eventing" in self.services:
                if status:
                    self.fail("CE does not support eventing in vulcan")
                else:
                    self.log.info("services enforced in CE")
            elif self.services == "fts,index,kv":
                if status:
                    self.fail(
                        "CE does not support fts, index and kv on same node")
                else:
                    self.log.info("services enforced in CE")
            elif self.services == "fts,index,n1ql":
                if status:
                    self.fail(
                        "CE does not support fts, index and n1ql on same node")
                else:
                    self.log.info("services enforced in CE")
            elif self.services == "fts,kv,n1ql":
                if status:
                    self.fail(
                        "CE does not support fts, kv and n1ql on same node")
                else:
                    self.log.info("services enforced in CE")
            elif self.services == "fts,index,kv,n1ql":
                if status:
                    self.log.info(
                        "CE could set all services {0} on same nodes.".format(
                            self.services))
                else:
                    self.fail("Failed to set "
                              "fts, index, kv, and query services on CE")
        else:
            self.fail("some services don't support")

    def check_set_services_when_add_node(self):
        self.rest.force_eject_node()
        sherlock_services_in_ce = ["kv", "index,kv,n1ql"]
        watson_services_in_ce = ["kv", "index,kv,n1ql", "fts,index,kv,n1ql"]
        self.sleep(5, "wait for node reset done")
        kv_quota = 0
        while kv_quota == 0:
            time.sleep(1)
            kv_quota = int(self.rest.get_nodes_self().mcdMemoryReserved)
        info = self.rest.get_nodes_self()
        kv_quota = int(info.mcdMemoryReserved * (CLUSTER_QUOTA_RATIO))
        self.rest.set_service_memoryQuota(service='indexMemoryQuota',
                                          memoryQuota=INDEX_QUOTA)
        self.rest.set_service_memoryQuota(service='ftsMemoryQuota',
                                          memoryQuota=FTS_QUOTA)
        self.rest.init_cluster_memoryQuota(
            self.input.membase_settings.rest_username,
            self.input.membase_settings.rest_password,
            kv_quota - INDEX_QUOTA - FTS_QUOTA - 100)
        try:
            self.log.info("Initialize node with services {0}".format(
                self.start_node_services))
            status = self.rest.init_node_services(
                hostname=self.master.ip, services=[self.start_node_services])
            self.rest.init_cluster()
        except Exception as e:
            if e:
                print(e)
        if not status:
            if self.version not in COUCHBASE_FROM_WATSON and \
                         self.start_node_services not in sherlock_services_in_ce:
                self.log.info(
                    "initial services setting enforced in Sherlock CE")
            elif self.version in COUCHBASE_FROM_WATSON and \
                         self.start_node_services not in watson_services_in_ce:
                self.log.info("initial services setting enforced in Watson CE")

        elif status:
            add_node = False
            try:
                self.log.info("node with services {0} try to add".format(
                    self.add_node_services))
                add_node = self.cluster.rebalance(
                    self.servers[:2],
                    self.servers[1:2], [],
                    services=[self.add_node_services])
            except Exception:
                pass
            if add_node:
                self.get_services_map()
                list_nodes = self.get_nodes_from_services_map(
                    get_all_nodes=True)
                map = self.get_nodes_services()
                if map[self.master.ip] == self.start_node_services and \
                    map[self.servers[1].ip] == self.add_node_services:
                    self.log.info(
                        "services set correctly when node added & rebalance")
                else:
                    self.fail("services set incorrectly when node added & rebalance. "
                        "cluster expected services: {0}; set cluster services {1} ."
                        "add node expected srv: {2}; set add node srv {3}"\
                        .format(map[self.master.ip], self.start_node_services, \
                         map[self.servers[1].ip], self.add_node_services))
            else:
                if self.version not in COUCHBASE_FROM_WATSON:
                    if self.start_node_services in ["kv", "index,kv,n1ql"] and \
                          self.add_node_services not in ["kv", "index,kv,n1ql"]:
                        self.log.info("services are enforced in CE")
                    elif self.start_node_services not in [
                            "kv", "index,kv,n1ql"
                    ]:
                        self.log.info("services are enforced in CE")
                    else:
                        self.fail("maybe bug in add node")
                elif self.version in COUCHBASE_FROM_WATSON:
                    if self.start_node_services in ["kv", "index,kv,n1ql",
                         "fts,index,kv,n1ql"] and self.add_node_services not in \
                                    ["kv", "index,kv,n1ql", "fts,index,kv,n1ql"]:
                        self.log.info("services are enforced in CE")
                    elif self.start_node_services not in [
                            "kv", "index,kv,n1ql", "fts,index,kv,n1ql"
                    ]:
                        self.log.info("services are enforced in CE")
                    else:
                        self.fail("maybe bug in add node")
        else:
            self.fail("maybe bug in node initialization")

    def check_full_backup_only(self):
        """ for windows vm, ask IT to put uniq.exe at
            /cygdrive/c/Program Files (x86)/ICW/bin directory """

        self.remote = RemoteMachineShellConnection(self.master)
        """ put params items=0 in test param so that init items = 0 """
        self.remote.execute_command("{0}cbworkloadgen -n {1}:8091 -j -i 1000 " \
                                    "-u Administrator -p password" \
                                            .format(self.bin_path, self.master.ip))
        """ delete backup location before run backup """
        self.remote.execute_command("rm -rf {0}*".format(self.backup_location))
        output, error = self.remote.execute_command("ls -lh {0}".format(
            self.backup_location))
        self.remote.log_command_output(output, error)
        """ first full backup """
        self.remote.execute_command("{0}cbbackup http://{1}:8091 {2} -m full " \
                                    "-u Administrator -p password"\
                                    .format(self.bin_path,
                                            self.master.ip,
                                            self.backup_c_location))
        output, error = self.remote.execute_command("ls -lh {0}*/".format(
            self.backup_location))
        self.remote.log_command_output(output, error)
        output, error = self.remote.execute_command("{0}cbtransfer -u Administrator "\
                                           "-p password {1}*/*-full/ " \
                                           "stdout: | grep set | uniq | wc -l"\
                                           .format(self.bin_path,
                                                   self.backup_c_location))
        self.remote.log_command_output(output, error)
        if int(output[0]) != 1000:
            self.fail("full backup did not work in CE. "
                      "Expected 1000, actual: {0}".format(output[0]))
        self.remote.execute_command("{0}cbworkloadgen -n {1}:8091 -j -i 1000 "\
                                    " -u Administrator -p password --prefix=t_"
                                    .format(self.bin_path, self.master.ip))
        """ do different backup mode """
        self.remote.execute_command("{0}cbbackup -u Administrator -p password "\
                                    "http://{1}:8091 {2} -m {3}"\
                                    .format(self.bin_path,
                                            self.master.ip,
                                            self.backup_c_location,
                                            self.backup_option))
        output, error = self.remote.execute_command("ls -lh {0}".format(
            self.backup_location))
        self.remote.log_command_output(output, error)
        output, error = self.remote.execute_command("{0}cbtransfer -u Administrator "\
                                           "-p password {1}*/*-{2}/ stdout: "\
                                           "| grep set | uniq | wc -l"\
                                           .format(self.bin_path,
                                                   self.backup_c_location,
                                                   self.backup_option))
        self.remote.log_command_output(output, error)
        if int(output[0]) == 2000:
            self.log.info("backup option 'diff' is enforced in CE")
        elif int(output[0]) == 1000:
            self.fail("backup option 'diff' is not enforced in CE. "
                      "Expected 2000, actual: {0}".format(output[0]))
        else:
            self.fail("backup failed to backup correct items")
        self.remote.disconnect()

    def check_ent_backup(self):
        """ for CE version from Watson, cbbackupmgr exe file should not in bin """
        command = "cbbackupmgr"
        self.remote = RemoteMachineShellConnection(self.master)
        self.log.info("check if {0} in {1} directory".format(
            command, self.bin_path))
        found = self.remote.file_exists(self.bin_path, command)
        if found:
            self.log.info("found {0} in {1} directory".format(
                command, self.bin_path))
            self.log.info("Ent. backup in CE is in bin!")
        elif not found:
            self.fail(
                "CE from Cheshire Cat should contain {0}".format(command))
        self.remote.disconnect()

    def check_memory_optimized_storage_mode(self):
        """ from Watson, CE should not have option 'memory_optimized' to set """
        self.rest.force_eject_node()
        self.sleep(5, "wait for node reset done")
        try:
            self.log.info("Initialize node with 'Memory Optimized' option")
            status = self.rest.set_indexer_storage_mode(
                username=self.input.membase_settings.rest_username,
                password=self.input.membase_settings.rest_password,
                storageMode='memory_optimized')
        except Exception as ex:
            if ex:
                print(ex)
        if not status:
            self.log.info("Memory Optimized setting enforced in CE "
                          "Could not set memory_optimized option")
        else:
            self.fail("Memory Optimzed setting does not enforced in CE "
                      "We could set this option in")

    def check_plasma_storage_mode(self):
        """ from Watson, CE should not have option 'memory_optimized' to set """
        self.rest.force_eject_node()
        self.sleep(5, "wait for node reset done")
        try:
            self.log.info("Initialize node with 'Memory Optimized' option")
            status = self.rest.set_indexer_storage_mode(
                username=self.input.membase_settings.rest_username,
                password=self.input.membase_settings.rest_password,
                storageMode='plasma')
        except Exception as ex:
            if ex:
                print(ex)
        if not status:
            self.log.info("Plasma setting enforced in CE "
                          "Could not set Plasma option")
        else:
            self.fail("Plasma setting does not enforced in CE "
                      "We could set this option in")

    def check_x509_cert(self):
        """ from Watson, X509 certificate only support in EE """
        api = self.rest.baseUrl + "pools/default/certificate?extended=true"
        self.log.info("request to get certificate at "
                      "'pools/default/certificate?extended=true' "
                      "should return False")
        try:
            status, content, header = self.rest._http_request(api, 'GET')
        except Exception as ex:
            if ex:
                print(ex)
        if status:
            self.fail("This X509 certificate feature only available in EE")
        elif not status:
            if b'requires enterprise edition' in content:
                self.log.info("X509 cert is enforced in CE")

    def check_roles_base_access(self):
        """ from Watson, roles base access for admin should not in in CE """
        if self.user_add is None:
            self.fail(
                "We need to pass user name (user_add) to run this test. ")
        if self.user_role is None:
            self.fail(
                "We need to pass user roles (user_role) to run this test. ")
        api = self.rest.baseUrl + "settings/rbac/users/" + self.user_add
        self.log.info("url to run this test: %s" % api)
        """ add admin user """
        param = "name=%s&roles=%s" % (self.user_add, self.user_role)
        try:
            status, content, header = self.rest._http_request(
                api, 'PUT', param)
        except Exception as ex:
            if ex:
                print(ex)
        if status:
            self.fail("CE should not allow to add admin users")
        else:
            self.log.info("roles base is enforced in CE! ")

    def check_root_certificate(self):
        """ from watson, ce should not see root certificate
            manual test:
            curl -u Administrator:password -X GET
                            http://localhost:8091/pools/default/certificate """
        api = self.rest.baseUrl + "pools/default/certificate"
        try:
            status, content, header = self.rest._http_request(api, 'GET')
        except Exception as ex:
            if ex:
                print(ex)
        if status:
            self.fail("CE should not see root certificate!")
        elif b'requires enterprise edition' in content:
            self.log.info("root certificate is enforced in CE! ")

    def check_settings_audit(self):
        """ from watson, ce should not set audit
            manual test:
            curl -u Administrator:password -X GET
                            http://localhost:8091/settings/audit """
        api = self.rest.baseUrl + "settings/audit"
        try:
            status, content, header = self.rest._http_request(api, 'GET')
        except Exception as ex:
            if ex:
                print(ex)
        if status:
            self.fail("CE should not allow to set audit !")
        elif b'requires enterprise edition' in content:
            self.log.info("settings audit is enforced in CE! ")

    def check_infer(self):
        """ from watson, ce should not see infer
            manual test:
            curl -H "Content-Type: application/json" -X POST
                 -d '{"statement":"infer `bucket_name`;"}'
                       http://localhost:8093/query/service
            test params: new_services=kv-index-n1ql,default_bucket=False """
        self.rest.force_eject_node()
        self.sleep(7, "wait for node reset done")
        self.rest.init_node()
        bucket = "default"
        self.rest.create_bucket(bucket, ramQuotaMB=200)
        api = self.rest.query_baseUrl + "query/service"
        param = urllib.parse.urlencode({"statement": "infer `%s` ;" % bucket})
        try:
            status, content, header = self.rest._http_request(
                api, 'POST', param)
            json_parsed = json.loads(content)
        except Exception as ex:
            if ex:
                print(ex)
        if json_parsed["status"] == "success":
            self.fail("CE should not allow to run INFER !")
        elif json_parsed["status"] == "fatal":
            self.log.info("INFER is enforced in CE! ")

    def check_query_monitoring(self):
        self.rest.force_eject_node()
        self.sleep(7, "wait for node reset done")
        self.rest.init_node()
        bucket = "default"
        self.rest.create_bucket(bucket, ramQuotaMB=200)
        api = self.rest.query_baseUrl + "admin/settings"
        param = {'profile': 'phases'}
        try:
            status, content, header = self.rest._http_request(
                api, 'POST', json.dumps(param))
        except Exception as ex:
            if ex:
                print(ex)
        if status:
            self.fail("CE should not be allowed to do query monitoring !")
        elif b'Profiling is an EE only feature' in content:
            self.log.info("Query monitoring is enforced in CE! ")

    def check_flex_index(self):
        """ from watson, ce should not see infer
            manual test:
            curl -H "Content-Type: application/json" -X POST
                 -d '{"statement":"infer `bucket_name`;"}'
                       http://localhost:8093/query/service
            test params: new_services=kv-index-n1ql,default_bucket=False """
        self.rest.force_eject_node()
        self.sleep(7, "wait for node reset done")
        self.rest.init_node()
        bucket = "default"
        self.rest.create_bucket(bucket, ramQuotaMB=200)
        api = self.rest.query_baseUrl + "query/service"
        param = urllib.parse.urlencode({
            "statement":
            "SELECT META(d).id FROM `%s` AS d USE INDEX (USING FTS) WHERE d.f2 = 100;"
            % bucket
        })
        try:
            status, content, header = self.rest._http_request(
                api, 'POST', param)
            json_parsed = json.loads(content)
        except Exception as ex:
            if ex:
                print(ex)
        if json_parsed["status"] == "success":
            self.fail("CE should not allow to run flex index !")
        elif json_parsed["status"] == "fatal":
            self.log.info("Flex index is enforced in CE! ")

    def check_index_partitioning(self):
        self.rest.force_eject_node()
        self.sleep(7, "wait for node reset done")
        self.rest.init_node()
        bucket = "default"
        self.rest.create_bucket(bucket, ramQuotaMB=200)
        api = self.rest.query_baseUrl + "query/service"
        param = urllib.parse.urlencode({
            "statement":
            "CREATE INDEX idx ON `%s`(id) PARTITION BY HASH(META().id)" %
            bucket
        })
        try:
            status, content, header = self.rest._http_request(
                api, 'POST', param)
            json_parsed = json.loads(content)
        except Exception as ex:
            if ex:
                print(ex)
        if json_parsed["status"] == "success":
            self.fail("CE should not be allowed to run index partitioning !")
        elif json_parsed["status"] == "fatal":
            self.log.info("Index partitioning is enforced in CE! ")

    def check_query_cost_based_optimizer(self):
        self.rest.force_eject_node()
        self.sleep(7, "wait for node reset done")
        self.rest.init_node()
        bucket = "default"
        self.rest.create_bucket(bucket, ramQuotaMB=200)
        api = self.rest.query_baseUrl + "query/service"
        param = urllib.parse.urlencode({
            "statement":
            "UPDATE STATISTICS for `hotel` (type, address, city, country, free_breakfast, id, phone);"
        })
        try:
            status, content, header = self.rest._http_request(
                api, 'POST', param)
            json_parsed = json.loads(content)
        except Exception as ex:
            if ex:
                print(ex)
        if json_parsed["status"] == "success":
            self.fail("CE should not be allowed to run CBO !")
        elif json_parsed["status"] == "fatal":
            self.log.info("CBO is enforced in CE! ")

    def check_query_window_functions(self):
        self.rest.force_eject_node()
        self.sleep(7, "wait for node reset done")
        self.rest.init_node()
        bucket = "default"
        self.rest.create_bucket(bucket, ramQuotaMB=200)
        api = self.rest.query_baseUrl + "query/service"
        param = urllib.parse.urlencode({
            "statement":
            "SELECT d.id, d.destinationairport, CUME_DIST() OVER (PARTITION BY d.destinationairport \
                            ORDER BY d.distance NULLS LAST) AS `rank` \
                            FROM `%s` AS d \
                            WHERE d.type='route' \
                            LIMIT 7;" % bucket
        })
        try:
            status, content, header = self.rest._http_request(
                api, 'POST', param)
            json_parsed = json.loads(content)
        except Exception as ex:
            if ex:
                print(ex)
        if json_parsed["status"] == "success":
            self.fail("CE should not be allowed to use window functions !")
        elif json_parsed["status"] == "fatal":
            self.log.info("Window functions is enforced in CE! ")

    def check_auto_complete(self):
        """ this feature has not complete to block in CE """

    """ Check new features from spock start here """

    def check_cbbackupmgr(self):
        """ cbbackupmgr should not available in CE from spock """
        if self.cb_version[:5] in COUCHBASE_FROM_SPOCK:
            file_name = "cbbackupmgr" + self.file_extension
            self.log.info("check if cbbackupmgr in bin dir in CE")
            result = self.remote.file_exists(self.bin_path, file_name)
            if result:
                self.fail("cbbackupmgr should not in bin dir of CE")
            else:
                self.log.info("cbbackupmgr is enforced in CE")
        self.remote.disconnect()

    def test_max_ttl_bucket(self):
        """
            From vulcan, EE bucket has has an option to set --max-ttl, not it CE.
            This test is make sure CE could not create bucket with option --max-ttl
            This test must pass default_bucket=False
        """
        if self.cb_version[:5] not in COUCHBASE_FROM_VULCAN:
            self.log.info("This test only for vulcan and later")
            return
        cmd = 'curl -X POST -u Administrator:password \
                                    http://{0}:8091/pools/default/buckets \
                                 -d name=bucket0 \
                                 -d maxTTL=100 \
                                 -d ramQuotaMB=100 '.format(self.master.ip)
        if self.cli_test:
            cmd = "{0}couchbase-cli bucket-create -c {1}:8091 --username Administrator \
                --password password --bucket bucket0 --bucket-type couchbase \
                --bucket-ramsize 512 --bucket-replica 1 --bucket-priority high \
                --bucket-eviction-policy fullEviction --enable-flush 0 \
                --enable-index-replica 1 --max-ttl 200".format(
                self.bin_path, self.master.ip)
        conn = RemoteMachineShellConnection(self.master)
        output, error = conn.execute_command(cmd)
        conn.log_command_output(output, error)
        mesg = "Max TTL is supported in enterprise edition only"
        if self.cli_test:
            mesg = "Maximum TTL can only be configured on enterprise edition"
        if output and mesg not in str(output[0]):
            self.fail("max ttl feature should not in Community Edition")
        buckets = RestConnection(self.master).get_buckets()
        if buckets:
            for bucket in buckets:
                self.log.info("bucekt in cluser: {0}".format(bucket.name))
                if bucket.name == "bucket0":
                    self.fail("Failed to enforce feature max ttl in CE.")
        conn.disconnect()

    def test_setting_audit(self):
        """
           CE does not allow to set audit from vulcan 5.5.0
        """
        if self.cb_version[:5] not in COUCHBASE_FROM_VULCAN:
            self.log.info("This test only for vulcan and later")
            return
        cmd = 'curl -X POST -u Administrator:password \
              http://{0}:8091/settings/audit \
              -d auditdEnabled=true '.format(self.master.ip)
        if self.cli_test:
            cmd = "{0}couchbase-cli setting-audit -c {1}:8091 -u Administrator \
                -p password --audit-enabled 1 --audit-log-rotate-interval 604800 \
                --audit-log-path /opt/couchbase/var/lib/couchbase/logs --set"\
                .format(self.bin_path, self.master.ip)

        conn = RemoteMachineShellConnection(self.master)
        output, error = conn.execute_command(cmd)
        conn.log_command_output(output, error)
        mesg = "This http API endpoint requires enterprise edition"
        if output and mesg not in str(output[0]):
            self.fail("setting-audit feature should not in Community Edition")
        conn.disconnect()

    def test_setting_autofailover_enterprise_only(self):
        """
           CE does not allow set auto failover if disk has issue
           and failover group from vulcan 5.5.0
        """
        if self.cb_version[:5] not in COUCHBASE_FROM_VULCAN:
            self.log.info("This test only for vulcan and later")
            return
        self.failover_disk_period = self.input.param("failover_disk_period",
                                                     False)
        self.failover_server_group = self.input.param("failover_server_group",
                                                      False)

        failover_disk_period = ""
        if self.failover_disk_period:
            if self.cli_test:
                failover_disk_period = "--failover-data-disk-period 300"
            else:
                failover_disk_period = "-d failoverOnDataDiskIssues[timePeriod]=300"
        failover_server_group = ""
        if self.failover_server_group and self.cli_test:
            failover_server_group = "--enable-failover-of-server-group 1"

        cmd = 'curl -X POST -u Administrator:password \
              http://{0}:8091/settings/autoFailover -d enabled=true -d timeout=120 \
              -d maxCount=1 \
              -d failoverOnDataDiskIssues[enabled]=true {1} \
              -d failoverServerGroup={2}'.format(self.master.ip,
                                                 failover_disk_period,
                                                 self.failover_server_group)
        if self.cli_test:
            cmd = "{0}couchbase-cli setting-autofailover -c {1}:8091 \
                   -u Administrator -p password \
                   --enable-failover-on-data-disk-issues 1 {2} {3} "\
                  .format(self.bin_path, self.master.ip,
                          failover_disk_period,
                          failover_server_group)
        conn = RemoteMachineShellConnection(self.master)
        output, error = conn.execute_command(cmd)
        conn.log_command_output(output, error)
        mesg = "Auto failover on Data Service disk issues can only be " + \
               "configured on enterprise edition"
        if not self.cli_test:
            if self.failover_disk_period or \
                                   self.failover_server_group:
                if output and not error:
                    self.fail("setting autofailover disk issues feature\
                               should not in Community Edition")
        else:
            if self.failover_server_group:
                mesg = "--enable-failover-of-server-groups can only be " + \
                       "configured on enterprise edition"

        if output and mesg not in str(output[0]):
            self.fail("Setting EE autofailover features \
                       should not in Community Edition")
        else:
            self.log.info("EE setting autofailover are disable in CE")
        conn.disconnect()

    def test_set_bucket_compression(self):
        """
           CE does not allow to set bucket compression to bucket
           from vulcan 5.5.0.   Mode compression: off,active,passive
           Note: must set defaultbucket=False for this test
        """
        if self.cb_version[:5] not in COUCHBASE_FROM_VULCAN:
            self.log.info("This test only for vulcan and later")
            return
        self.compression_mode = self.input.param("compression_mode", "off")
        cmd = 'curl -X POST -u Administrator:password \
                                    http://{0}:8091/pools/default/buckets \
                                 -d name=bucket0 \
                                 -d compressionMode={1} \
                                 -d ramQuotaMB=100 '.format(
            self.master.ip, self.compression_mode)
        if self.cli_test:
            cmd = "{0}couchbase-cli bucket-create -c {1}:8091 --username Administrator \
                --password password --bucket bucket0 --bucket-type couchbase \
                --bucket-ramsize 512 --bucket-replica 1 --bucket-priority high \
                --bucket-eviction-policy fullEviction --enable-flush 0 \
                --enable-index-replica 1 --compression-mode {2}".format(
                self.bin_path, self.master.ip, self.compression_mode)
        conn = RemoteMachineShellConnection(self.master)
        output, error = conn.execute_command(cmd)
        conn.log_command_output(output, error)
        mesg = "Compression mode is supported in enterprise edition only"
        if self.cli_test:
            mesg = "Compression mode can only be configured on enterprise edition"
        if output and mesg not in str(output[0]):
            self.fail("Setting bucket compression should not in CE")
        conn.disconnect()

    def test_ldap_groups(self):
        """
           LDAP Groups feature is not available in CE
        """
        if self.cb_version[:5] not in COUCHBASE_FROM_MAD_HATTER:
            self.log.info("This test is only for MH and later")
            return
        cmd = 'curl -X POST -u Administrator:password \
                                    http://{0}:8091/settings/rbac/groups/admins \
                                 -d roles=admin \
                                 -d description="Couchbase+Server+Administrators" \
                                 --data-urlencode ldap_group_ref="uid=cbadmins,ou=groups,dc=example,dc=com"'\
                                .format(self.master.ip)
        if self.cli_test:
            cmd = '{0}couchbase-cli user-manage -c {1}:8091 --username Administrator \
                --password password  \
                --set-group \
                --group-name admins \
                --roles admin \
                --group-description "Couchbase Server Administrators" \
                --ldap-ref "uid=cbadmins,ou=groups,dc=example,dc=com"'.format(
                self.bin_path, self.master.ip)
        conn = RemoteMachineShellConnection(self.master)
        output, error = conn.execute_command(cmd)
        conn.log_command_output(output, error)
        mesg = "Requested resource not found."
        if self.cli_test:
            mesg = "ERROR: This http API endpoint requires enterprise edition"
        if output and mesg not in str(output[0]):
            self.fail("LDAP Groups should not be in CE")
        conn.disconnect()

    def test_ldap_cert(self):
        """
           LDAP Cert feature is not available in CE
        """
        if self.cb_version[:5] not in COUCHBASE_FROM_MAD_HATTER:
            self.log.info("This test is only for MH and later")
            return
        cmd = 'curl -X POST -u Administrator:password http://{0}:8091/settings/ldap \
                                 -d hosts={1} \
                                 -d port=389 \
                                 -d encryption=StartTLSExtension \
                                 -d serverCertValidation=true \
                                 --data-urlencode [email protected] \
                                 -d bindDN="cn=admin,dc=example,dc=com" \
                                 -d bindPass=password \
                                 -d authenticationEnabled=true \
                                 -d authorizationEnabled=true \
                                 --data-urlencode groupsQuery="ou=groups,dc=example,dc=com??one?(member=%D)"'\
                                .format(self.master.ip, self.master.ip)
        if self.cli_test:
            cmd = '{0}couchbase-cli setting-ldap -c {1}:8091 --username Administrator \
                --password password  \
                --authentication-enabled 1 \
                --authorization-enabled 1 \
                --hosts {2} \
                --encryption startTLS \
                --client-cert root.crt \
                --bind-dn "cn=admin,dc=example,dc=com" \
                --bind-password password \
                --group-query "ou=groups,dc=example,dc=com??one?(member=%D)"'.format(
                self.bin_path, self.master.ip, self.master.ip)
        conn = RemoteMachineShellConnection(self.master)
        output, error = conn.execute_command(cmd)
        conn.log_command_output(output, error)
        mesg = "This http API endpoint requires enterprise edition"
        if self.cli_test:
            mesg = "ERROR: Command only available in enterprise edition"
        if output and mesg not in str(output[0]):
            self.fail("LDAP Cert should not be in CE")
        conn.disconnect()

    def test_network_encryption(self):
        """
           Encrypted network access is not available in CE
        """
        if self.cb_version[:5] not in COUCHBASE_FROM_MAD_HATTER:
            self.log.info("This test is only for MH and later")
            return
        cmd = 'curl  -u Administrator:password -v -X POST \
                    http://{0}:8091/settings/security \
                    -d disableUIOverHttp=true \
                    -d clusterEncryptionLevel=control \
                    -d tlsMinVersion=tlsv1.1 \
                    -d "cipherSuites=["TLS_RSA_WITH_AES_128_CBC_SHA", "TLS_RSA_WITH_AES_256_CBC_SHA"]"'\
                                .format(self.master.ip)
        conn = RemoteMachineShellConnection(self.master)
        output, error = conn.execute_command(cmd)
        conn.log_command_output(output, error)
        mesg = "not supported in community edition"
        if output and mesg not in str(output[0]):
            self.fail("Encrypted network access should not be in CE")
        conn.disconnect()

    def test_n2n_encryption(self):
        """
           Encrypted network access is not available in CE
        """
        if self.cb_version[:5] not in COUCHBASE_FROM_MAD_HATTER:
            self.log.info("This test is only for MH and later")
            return
        cmd = '/opt/couchbase/bin/couchbase-cli node-to-node-encryption \
                -c http://{0}:8091 \
                -u Administrator \
                -p password \
                --enable'\
                .format(self.master.ip)
        conn = RemoteMachineShellConnection(self.master)
        output, error = conn.execute_command(cmd)
        conn.log_command_output(output, error)
        mesg = "not supported in community edition"
        if output and mesg not in str(output[0]):
            self.fail("Encrypted network access should not be in CE")
        conn.disconnect()

    def test_log_redaction(self):
        """
            Log redaction feature is not available in CE
        """
        if self.cb_version[:5] not in COUCHBASE_FROM_MAD_HATTER:
            self.log.info("This test is only for MH and later")
            return
        cmd = 'curl -X POST -u Administrator:password \
                                            http://{0}:8091/controller/startLogsCollection \
                                         -d nodes="*" \
                                         -d logRedactionLevel=partial'.format(
            self.master.ip)
        if self.cli_test:
            cmd = '{0}couchbase-cli collect-logs-start -c {1}:8091 --username Administrator \
                        --password password  \
                        --all-nodes \
                        --redaction-level partial'.format(
                self.bin_path, self.master.ip)
        conn = RemoteMachineShellConnection(self.master)
        output, error = conn.execute_command(cmd)
        conn.log_command_output(output, error)
        mesg = "log redaction is an enterprise only feature"
        if output and mesg not in str(output[0]):
            self.fail("Log redaction should not be in CE")
        conn.disconnect()
Ejemplo n.º 29
0
class AutoRetryFailedRebalance(RebalanceBaseTest):
    def setUp(self):
        super(AutoRetryFailedRebalance, self).setUp()
        self.rest = RestConnection(self.cluster.master)
        self.sleep_time = self.input.param("sleep_time", 15)
        self.enabled = self.input.param("enabled", True)
        self.afterTimePeriod = self.input.param("afterTimePeriod", 300)
        self.maxAttempts = self.input.param("maxAttempts", 1)
        self.log.info("Changing the retry rebalance settings ....")
        self.change_retry_rebalance_settings(
            enabled=self.enabled,
            afterTimePeriod=self.afterTimePeriod,
            maxAttempts=self.maxAttempts)
        self.rebalance_operation = self.input.param("rebalance_operation",
                                                    "rebalance_out")
        self.disable_auto_failover = self.input.param("disable_auto_failover",
                                                      True)
        self.auto_failover_timeout = self.input.param("auto_failover_timeout",
                                                      120)
        if self.disable_auto_failover:
            self.rest.update_autofailover_settings(False, 120)
        else:
            self.rest.update_autofailover_settings(True,
                                                   self.auto_failover_timeout)
        self.cb_collect_failure_nodes = dict()
        # To support data load during auto retry op
        self.data_load = self.input.param("data_load", False)
        self.rebalance_failed_msg = "Rebalance failed as expected"

    def tearDown(self):
        self.reset_retry_rebalance_settings()
        self.cbcollect_info()
        # Reset to default value
        super(AutoRetryFailedRebalance, self).tearDown()
        rest = RestConnection(self.servers[0])
        zones = rest.get_zone_names()
        for zone in zones:
            if zone != "Group 1":
                rest.delete_zone(zone)

    def __update_cbcollect_expected_node_failures(self, nodes, reason):
        for node in nodes:
            self.cb_collect_failure_nodes[node.ip] = reason

    def set_retry_exceptions(self, doc_loading_spec):
        retry_exceptions = list()
        retry_exceptions.append(SDKException.AmbiguousTimeoutException)
        retry_exceptions.append(SDKException.TimeoutException)
        retry_exceptions.append(SDKException.RequestCanceledException)
        retry_exceptions.append(SDKException.DocumentNotFoundException)
        retry_exceptions.append(SDKException.ServerOutOfMemoryException)
        if self.durability_level:
            retry_exceptions.append(SDKException.DurabilityAmbiguousException)
            retry_exceptions.append(SDKException.DurabilityImpossibleException)
        doc_loading_spec[MetaCrudParams.RETRY_EXCEPTIONS] = retry_exceptions

    def async_data_load(self):
        doc_loading_spec = self.bucket_util.get_crud_template_from_package(
            "volume_test_load")
        self.set_retry_exceptions(doc_loading_spec)
        tasks = self.bucket_util.run_scenario_from_spec(
            self.task,
            self.cluster,
            self.cluster.buckets,
            doc_loading_spec,
            mutation_num=0,
            async_load=True,
            batch_size=self.batch_size,
            process_concurrency=self.process_concurrency)
        return tasks

    def data_validation(self, tasks):
        self.task.jython_task_manager.get_task_result(tasks)
        self.bucket_util.validate_doc_loading_results(tasks)
        if tasks.result is False:
            self.fail("Doc_loading failed")

        self.cluster_util.print_cluster_stats(self.cluster)
        self.bucket_util._wait_for_stats_all_buckets(self.cluster,
                                                     self.cluster.buckets,
                                                     timeout=1200)
        self.bucket_util.validate_docs_per_collections_all_buckets(
            self.cluster)
        self.bucket_util.print_bucket_stats(self.cluster)

    def test_auto_retry_of_failed_rebalance_where_failure_happens_before_rebalance(
            self):
        tasks = None
        before_rebalance_failure = self.input.param("before_rebalance_failure",
                                                    "stop_server")
        # induce the failure before the rebalance starts
        self._induce_error(before_rebalance_failure)
        self.sleep(self.sleep_time)

        try:
            rebalance = self._rebalance_operation(self.rebalance_operation)
            self.task.jython_task_manager.get_task_result(rebalance)
            self.assertTrue(rebalance.result, self.rebalance_failed_msg)
        except Exception as e:
            self.log.info("Rebalance failed with: {0}".format(str(e)))
            # Trigger cbcollect after rebalance failure
            self.cbcollect_info(trigger=True,
                                validate=False,
                                known_failures=self.cb_collect_failure_nodes)
            # Recover from the error
            self._recover_from_error(before_rebalance_failure)
            if self.data_load:
                tasks = self.async_data_load()
            self.check_retry_rebalance_succeeded()
            # Validate cbcollect result after rebalance retry
            self.cbcollect_info(trigger=False,
                                validate=True,
                                known_failures=self.cb_collect_failure_nodes)
            if self.data_load:
                self.data_validation(tasks)
        else:
            self.fail("Rebalance did not fail as expected. "
                      "Hence could not validate auto-retry feature..")
        finally:
            if self.disable_auto_failover:
                self.rest.update_autofailover_settings(True, 120)
            self.cluster_util.start_server(self.cluster, self.servers[1])
            self.cluster_util.stop_firewall_on_node(self.cluster,
                                                    self.servers[1])

    def test_auto_retry_of_failed_rebalance_where_failure_happens_during_rebalance(
            self):
        tasks = None
        during_rebalance_failure = self.input.param("during_rebalance_failure",
                                                    "stop_server")
        try:
            rebalance = self._rebalance_operation(self.rebalance_operation)
            self.sleep(self.sleep_time)
            # induce the failure during the rebalance
            self._induce_error(during_rebalance_failure)
            self.task.jython_task_manager.get_task_result(rebalance)
            self.assertTrue(rebalance.result, self.rebalance_failed_msg)
        except Exception as e:
            self.log.info("Rebalance failed with: {0}".format(str(e)))
            # Recover from the error
            self._recover_from_error(during_rebalance_failure)
            if self.data_load:
                tasks = self.async_data_load()
            self.check_retry_rebalance_succeeded()
            if self.data_load:
                self.data_validation(tasks)
        else:
            # This is added as the failover task is not throwing exception
            if self.rebalance_operation == "graceful_failover":
                # Recover from the error
                self._recover_from_error(during_rebalance_failure)
                if self.data_load:
                    tasks = self.async_data_load()
                self.check_retry_rebalance_succeeded()
                if self.data_load:
                    self.data_validation(tasks)
            else:
                self.fail("Rebalance did not fail as expected. "
                          "Hence could not validate auto-retry feature..")
        finally:
            if self.disable_auto_failover:
                self.rest.update_autofailover_settings(True, 120)
            self.cluster_util.start_server(self.cluster, self.servers[1])
            self.cluster_util.stop_firewall_on_node(self.cluster,
                                                    self.servers[1])

    def test_auto_retry_of_failed_rebalance_does_not_get_triggered_when_rebalance_is_stopped(
            self):
        _ = self._rebalance_operation(self.rebalance_operation)
        reached = self.cluster_util.rebalance_reached(self.rest, 30)
        self.assertTrue(reached, "Rebalance failed or did not reach 30%")
        # Trigger cbcollect before interrupting the rebalance
        self.cbcollect_info(trigger=True, validate=False)
        self.rest.stop_rebalance(wait_timeout=self.sleep_time)
        result = json.loads(self.rest.get_pending_rebalance_info())
        self.log.info(result)
        # Validate cbcollect results
        self.cbcollect_info(trigger=False, validate=True)
        retry_rebalance = result["retry_rebalance"]
        if retry_rebalance != "not_pending":
            self.fail(
                "Auto-retry succeeded even when Rebalance was stopped by user")

    def test_negative_auto_retry_of_failed_rebalance_where_rebalance_will_be_cancelled(
            self):
        during_rebalance_failure = self.input.param("during_rebalance_failure",
                                                    "stop_server")
        post_failure_operation = self.input.param("post_failure_operation",
                                                  "cancel_pending_rebalance")
        try:
            rebalance = self._rebalance_operation(self.rebalance_operation)
            self.sleep(self.sleep_time)
            # induce the failure during the rebalance
            self._induce_error(during_rebalance_failure)
            self.task.jython_task_manager.get_task_result(rebalance)
            self.assertTrue(rebalance.result, self.rebalance_failed_msg)
        except Exception as e:
            self.log.info("Rebalance failed with: %s" % e)
            # Recover from the error
            self._recover_from_error(during_rebalance_failure)
            # TODO : Data load at this stage fails;
            # if self.data_load:
            #     tasks = self.async_data_load()
            result = json.loads(self.rest.get_pending_rebalance_info())
            # if self.data_load:
            #     self.data_validation(tasks)
            self.log.info(result)
            retry_rebalance = result["retry_rebalance"]
            rebalance_id = result["rebalance_id"]
            if retry_rebalance != "pending":
                self.fail("Auto-retry of failed rebalance is not triggered")
            if post_failure_operation == "cancel_pending_rebalance":
                # cancel pending rebalance
                self.log.info("Cancelling rebalance-id: %s" % rebalance_id)
                self.rest.cancel_pending_rebalance(rebalance_id)
            elif post_failure_operation == "disable_auto_retry":
                # disable the auto retry of the failed rebalance
                self.log.info("Disable the auto retry of the failed rebalance")
                self.change_retry_rebalance_settings(enabled=False)
            elif post_failure_operation == "retry_failed_rebalance_manually":
                # retry failed rebalance manually
                self.log.info("Retrying failed rebalance id %s" % rebalance_id)
                self.cluster.rebalance(self.servers[:self.nodes_init], [], [])
            else:
                self.fail("Invalid post_failure_operation option")
            # Now check and ensure retry won't happen
            result = json.loads(self.rest.get_pending_rebalance_info())
            self.log.info(result)
            retry_rebalance = result["retry_rebalance"]
            if retry_rebalance != "not_pending":
                self.fail("Auto-retry of failed rebalance is not cancelled")
        else:
            self.fail("Rebalance did not fail as expected. "
                      "Hence could not validate auto-retry feature..")
        finally:
            if self.disable_auto_failover:
                self.rest.update_autofailover_settings(True, 120)
            self.cluster_util.start_server(self.cluster, self.servers[1])
            self.cluster_util.stop_firewall_on_node(self.cluster,
                                                    self.servers[1])

    def test_negative_auto_retry_of_failed_rebalance_where_rebalance_will_not_be_cancelled(
            self):
        during_rebalance_failure = self.input.param("during_rebalance_failure",
                                                    "stop_server")
        post_failure_operation = self.input.param("post_failure_operation",
                                                  "create_delete_buckets")
        zone_name = "Group_{0}_{1}".format(random.randint(1, 1000000000),
                                           self._testMethodName)
        zone_name = zone_name[0:60]
        default_zone = "Group 1"
        moved_node = [self.servers[1].ip]
        try:
            rebalance = self._rebalance_operation(self.rebalance_operation)
            self.sleep(self.sleep_time)
            # induce the failure during the rebalance
            self._induce_error(during_rebalance_failure)
            self.task.jython_task_manager.get_task_result(rebalance)
            self.assertTrue(rebalance.result, self.rebalance_failed_msg)
        except Exception as e:
            self.log.info("Rebalance failed with : {0}".format(str(e)))
            # Recover from the error
            self._recover_from_error(during_rebalance_failure)
            result = json.loads(self.rest.get_pending_rebalance_info())
            self.log.info(result)
            retry_rebalance = result["retry_rebalance"]
            if retry_rebalance != "pending":
                self.fail("Auto-retry of failed rebalance is not triggered")
            # if post_failure_operation == "create_delete_buckets":
            #     # delete buckets and create new one
            #     BucketOperationHelper.delete_all_buckets_or_assert(servers=self.servers, test_case=self)
            #     self.sleep(self.sleep_time)
            #     BucketOperationHelper.create_bucket(self.master, test_case=self)

            # Start cbcollect only if auto-retry of rebalance is triggered
            self.cbcollect_info(trigger=True, validate=False)
            if post_failure_operation == "change_replica_count":
                # change replica count
                self.log.info("Changing replica count of buckets")
                for bucket in self.cluster.buckets:
                    self.bucket_util.update_bucket_property(
                        self.cluster.master, bucket, replica_number=2)
            elif post_failure_operation == "change_server_group":
                # change server group
                self.log.info("Creating new zone " + zone_name)
                self.rest.add_zone(zone_name)
                self.log.info("Moving {0} to new zone {1}".format(
                    moved_node, zone_name))
                _ = self.rest.shuffle_nodes_in_zones(moved_node, default_zone,
                                                     zone_name)
            else:
                self.fail("Invalid post_failure_operation option")
            # In these failure scenarios while the retry is pending,
            # then the retry will be attempted but fail
            try:
                self.check_retry_rebalance_succeeded()
                # Validate cbcollect results
                self.cbcollect_info(trigger=False, validate=True)
            except Exception as e:
                self.log.info(e)
                # Wait for cbstat to complete before asserting
                self.cbcollect_info(trigger=False, validate=True)
                if "Retrying of rebalance still did not help. All the retries exhausted" not in str(
                        e):
                    self.fail(
                        "Auto retry of failed rebalance succeeded when it was expected to fail"
                    )
        else:
            self.fail(
                "Rebalance did not fail as expected. Hence could not validate auto-retry feature.."
            )
        finally:
            if post_failure_operation == "change_server_group":
                status = self.rest.shuffle_nodes_in_zones(
                    moved_node, zone_name, default_zone)
                self.log.info("Shuffle the node back to default group. "
                              "Status: %s" % status)
                self.sleep(self.sleep_time)
                self.log.info("Deleting new zone " + zone_name)
                try:
                    self.rest.delete_zone(zone_name)
                except:
                    self.log.info("Errors in deleting zone")
            if self.disable_auto_failover:
                self.rest.update_autofailover_settings(True, 120)
            self.cluster_util.start_server(self.cluster, self.servers[1])
            self.cluster_util.stop_firewall_on_node(self.cluster,
                                                    self.servers[1])

    def test_auto_retry_of_failed_rebalance_with_rebalance_test_conditions(
            self):
        tasks = None
        test_failure_condition = self.input.param("test_failure_condition")
        # induce the failure before the rebalance starts
        self._induce_rebalance_test_condition(test_failure_condition)
        self.sleep(self.sleep_time)
        try:
            rebalance = self._rebalance_operation(self.rebalance_operation)
            self.task.jython_task_manager.get_task_result(rebalance)
            self.assertTrue(rebalance.result, self.rebalance_failed_msg)
        except Exception as e:
            self.log.info("Rebalance failed with: %s" % e)
            # Delete the rebalance test condition to recover from the error
            self._delete_rebalance_test_condition(test_failure_condition)
            if self.data_load:
                tasks = self.async_data_load()
            self.check_retry_rebalance_succeeded()
            if self.data_load:
                self.data_validation(tasks)
        else:
            self.fail("Rebalance did not fail as expected. "
                      "Hence could not validate auto-retry feature..")
        finally:
            if self.disable_auto_failover:
                self.rest.update_autofailover_settings(True, 120)
            self._delete_rebalance_test_condition(test_failure_condition)

    def test_auto_retry_of_failed_rebalance_with_autofailvoer_enabled(self):
        before_rebalance_failure = self.input.param("before_rebalance_failure",
                                                    "stop_server")
        # induce the failure before the rebalance starts
        self._induce_error(before_rebalance_failure)
        try:
            rebalance = self._rebalance_operation(self.rebalance_operation)
            self.task.jython_task_manager.get_task_result(rebalance)
            self.assertTrue(rebalance.result, self.rebalance_failed_msg)
        except Exception as e:
            self.log.info("Rebalance failed with: {0}".format(str(e)))
            self.cbcollect_info(trigger=True,
                                validate=False,
                                known_failures=self.cb_collect_failure_nodes)
            if self.auto_failover_timeout < self.afterTimePeriod:
                self.sleep(self.auto_failover_timeout)
                result = json.loads(self.rest.get_pending_rebalance_info())
                self.log.info(result)
                retry_rebalance = result["retry_rebalance"]
                if retry_rebalance != "not_pending":
                    # Wait for cbcollect to complete before asserting
                    self.cbcollect_info(
                        trigger=False,
                        validate=True,
                        known_failures=self.cb_collect_failure_nodes)
                    self.fail("Auto-failover did not cancel pending retry "
                              "of the failed rebalance")
            else:
                try:
                    self.check_retry_rebalance_succeeded()
                except Exception as e:
                    expected_msg = "Retrying of rebalance still did not help"
                    if expected_msg not in str(e):
                        self.fail("Retry rebalance succeeded "
                                  "even without failover")
                    self.sleep(self.auto_failover_timeout)
                    self.cluster.rebalance(self.servers[:self.nodes_init], [],
                                           [])
                finally:
                    self.cbcollect_info(
                        trigger=False,
                        validate=True,
                        known_failures=self.cb_collect_failure_nodes)
        else:
            self.fail("Rebalance did not fail as expected. "
                      "Hence could not validate auto-retry feature..")
        finally:
            if self.disable_auto_failover:
                self.rest.update_autofailover_settings(True, 120)
            self.cluster_util.start_server(self.cluster, self.servers[1])
            self.cluster_util.stop_firewall_on_node(self.cluster,
                                                    self.servers[1])

    def test_cbcollect_with_rebalance_delay_condition(self):
        test_failure_condition = self.input.param("test_failure_condition")
        vb_num = self.input.param("target_vb")
        delay_milliseconds = self.input.param("delay_time", 60) * 1000
        # induce the failure before the rebalance starts
        self._induce_rebalance_test_condition(test_failure_condition,
                                              vb_num=vb_num,
                                              delay_time=delay_milliseconds)
        self.sleep(self.sleep_time,
                   "Wait for rebalance_test_condition to take effect")
        rebalance = self._rebalance_operation(self.rebalance_operation)
        # Start and validate cbcollect with rebalance delay
        self.cbcollect_info(trigger=True, validate=True)
        self.task.jython_task_manager.get_task_result(rebalance)
        if self.disable_auto_failover:
            self.rest.update_autofailover_settings(True, 120)
        self._delete_rebalance_test_condition(test_failure_condition)
        if rebalance.result is False:
            self.fail("Rebalance failed with test_condition: %s" %
                      test_failure_condition)

    def _rebalance_operation(self, rebalance_operation):
        operation = None
        self.log.info("Starting rebalance operation of type: %s" %
                      rebalance_operation)
        if rebalance_operation == "rebalance_out":
            operation = self.task.async_rebalance(
                self.cluster, [],
                self.cluster.servers[1:],
                retry_get_process_num=self.retry_get_process_num)
            self.__update_cbcollect_expected_node_failures(
                self.cluster.servers[1:], "out_node")
        elif rebalance_operation == "rebalance_in":
            operation = self.task.async_rebalance(
                self.cluster, [self.cluster.servers[self.nodes_init]], [],
                retry_get_process_num=self.retry_get_process_num)
            self.__update_cbcollect_expected_node_failures(
                [self.cluster.servers[self.nodes_init]], "in_node")
        elif rebalance_operation == "swap_rebalance":
            self.rest.add_node(self.cluster.master.rest_username,
                               self.cluster.master.rest_password,
                               self.cluster.servers[self.nodes_init].ip,
                               self.cluster.servers[self.nodes_init].port)
            operation = self.task.async_rebalance(
                self.cluster, [], [self.cluster.servers[self.nodes_init - 1]],
                retry_get_process_num=self.retry_get_process_num)
            self.__update_cbcollect_expected_node_failures(
                [self.cluster.servers[self.nodes_init]], "in_node")
            self.__update_cbcollect_expected_node_failures(
                [self.cluster.servers[self.nodes_init - 1]], "out_node")
        elif rebalance_operation == "graceful_failover":
            # TODO : retry for graceful failover is not yet implemented
            operation = self.task.async_failover(
                [self.cluster.master],
                failover_nodes=[self.cluster.servers[1]],
                graceful=True,
                wait_for_pending=300)
        return operation

    def _induce_error(self, error_condition):
        cb_collect_err_str = None
        if error_condition == "stop_server":
            cb_collect_err_str = "failed"
            self.cluster_util.stop_server(self.cluster, self.servers[1])
        elif error_condition == "enable_firewall":
            cb_collect_err_str = "failed"
            self.cluster_util.start_firewall_on_node(self.cluster,
                                                     self.servers[1])
        elif error_condition == "kill_memcached":
            self.cluster_util.kill_memcached(self.cluster,
                                             node=self.servers[1])
        elif error_condition == "reboot_server":
            cb_collect_err_str = "failed"
            shell = RemoteMachineShellConnection(self.servers[1])
            shell.reboot_node()
            shell.disconnect()
        elif error_condition == "kill_erlang":
            cb_collect_err_str = "failed"
            shell = RemoteMachineShellConnection(self.servers[1])
            shell.kill_erlang()
            shell.disconnect()
            self.sleep(self.sleep_time * 3)
        else:
            self.fail("Invalid error induce option")

        if cb_collect_err_str:
            self.__update_cbcollect_expected_node_failures([self.servers[1]],
                                                           cb_collect_err_str)

    def _recover_from_error(self, error_condition):
        if error_condition == "stop_server" \
                or error_condition == "kill_erlang":
            self.cluster_util.start_server(self.cluster, self.servers[1])
        elif error_condition == "enable_firewall":
            self.cluster_util.stop_firewall_on_node(self.cluster,
                                                    self.servers[1])
        elif error_condition == "reboot_server":
            self.sleep(self.sleep_time * 4)
            self.cluster_util.stop_firewall_on_node(self.cluster,
                                                    self.servers[1])

    def _induce_rebalance_test_condition(self,
                                         test_failure_condition,
                                         bucket_name="default",
                                         vb_num=1,
                                         delay_time=60000):
        if test_failure_condition == "verify_replication":
            set_command = 'testconditions:set(verify_replication, ' \
                          '{fail, "%s"})' % bucket_name
        elif test_failure_condition == "backfill_done":
            set_command = 'testconditions:set(backfill_done, ' \
                          '{for_vb_move, "%s", %s , fail})' \
                          % (bucket_name, vb_num)
        elif test_failure_condition == "delay_rebalance_start":
            set_command = 'testconditions:set(rebalance_start, {delay, %s}).' \
                          % delay_time
        elif test_failure_condition == "delay_verify_replication":
            set_command = 'testconditions:set(verify_replication, ' \
                          '{delay, "%s", %s})' % (bucket_name, delay_time)
        elif test_failure_condition == "delay_backfill_done":
            set_command = 'testconditions:set(backfill_done, ' \
                          '{for_vb_move, "%s", %s, {delay, %s}})' \
                          % (bucket_name, vb_num, delay_time)
        else:
            set_command = "testconditions:set(%s, fail)" \
                          % test_failure_condition
        get_command = "testconditions:get(%s)" % test_failure_condition
        for server in self.servers:
            rest = RestConnection(server)
            shell = RemoteMachineShellConnection(server)
            shell.enable_diag_eval_on_non_local_hosts()
            _, content = rest.diag_eval(set_command)
            self.log.debug("Set Command: %s. Return: %s" %
                           (set_command, content))
            shell.disconnect()

        for server in self.servers:
            rest = RestConnection(server)
            shell = RemoteMachineShellConnection(server)
            shell.enable_diag_eval_on_non_local_hosts()
            _, content = rest.diag_eval(get_command)
            self.log.info("Command: %s, Return: %s" % (get_command, content))

    def _delete_rebalance_test_condition(self, test_failure_condition):
        if test_failure_condition.startswith("delay_"):
            test_failure_condition = test_failure_condition[6:]

        delete_command = "testconditions:delete(%s)" % test_failure_condition
        get_command = "testconditions:get(%s)" % test_failure_condition
        for server in self.servers:
            rest = RestConnection(server)
            shell = RemoteMachineShellConnection(server)
            shell.enable_diag_eval_on_non_local_hosts()
            _, content = rest.diag_eval(delete_command)
            self.log.info("Command: %s, Return: %s" %
                          (delete_command, content))

        for server in self.servers:
            rest = RestConnection(server)
            shell = RemoteMachineShellConnection(server)
            shell.enable_diag_eval_on_non_local_hosts()
            _, content = rest.diag_eval(get_command)
            self.log.info("Command: %s, Return: %s" % (get_command, content))
Ejemplo n.º 30
0
                    try:
                        self.log.info("try to create zone {0} when cluster is not completely EE".format(zone_name))
                        result = rest.add_zone(zone_name)
                        if result:
                            raise Exception("Zone feature should not be available in CE version")
                    except Exception,e :
                        if "Failed" in e:
                            pass
        serverInfo = self.servers[1]
        rest = RestConnection(serverInfo)
        self.user = serverInfo.rest_username
        self.password = serverInfo.rest_password
        if not rest.is_enterprise_edition():
            raise Exception("Test failed to upgrade cluster from CE to EE")
        self.log.info("try to create zone {0} when cluster {1} is completely EE".format(zone_name, serverInfo.ip))
        result = rest.add_zone(zone_name)
        self.log.info("sleep  5 seconds")
        time.sleep(5)
        if result:
            self.log.info("Zone feature is available in this cluster")
        else:
            raise Exception("Could not create zone with name: %s in cluster.  It's a bug" % zone_name)
        if rest.is_zone_exist(zone_name.strip()):
            self.log.info("verified! zone '{0}' is existed".format(zone_name.strip()))
        else:
            raise Exception("There is not zone with name: %s in cluster.  It's a bug" % zone_name)

        """ re-install enterprise edition for next test if there is any """
        InstallerJob().parallel_install([self.servers[0]], params)

        """ reset master node to new node to teardown cluster """
    def test_settingsCluster(self):
        ops = self.input.param("ops", None)
        source = 'ns_server'
        user = self.master.rest_username
        password = self.master.rest_password
        rest = RestConnection(self.master)

        if (ops == 'memoryQuota'):
            expectedResults = {'memory_quota':512, 'source':source, 'user':user, 'ip':self.ipAddress, 'port':12345, 'cluster_name':'', 'index_memory_quota':512,'fts_memory_quota': 302}
            rest.init_cluster_memoryQuota(expectedResults['user'], password, expectedResults['memory_quota'])

        elif (ops == 'loadSample'):
            expectedResults = {'name':'gamesim-sample', 'source':source, "user":user, 'ip':self.ipAddress, 'port':12345}
            rest.addSamples()
            #Get a REST Command for loading sample

        elif (ops == 'enableAutoFailover'):
            expectedResults = {'max_nodes':1, "timeout":120, 'source':source, "user":user, 'ip':self.ipAddress, 'port':12345}
            rest.update_autofailover_settings(True, expectedResults['timeout'])

        elif (ops == 'disableAutoFailover'):
            expectedResults = {'max_nodes':1, "timeout":120, 'source':source, "user":user, 'ip':self.ipAddress, 'port':12345}
            rest.update_autofailover_settings(False, expectedResults['timeout'])

        elif (ops == 'resetAutoFailover'):
            expectedResults = {'source':source, "user":user, 'ip':self.ipAddress, 'port':12345}
            rest.reset_autofailover()

        elif (ops == 'enableClusterAlerts'):
            expectedResults = {"encrypt":False, "email_server:port":25, "host":"localhost", "email_server:user":"******", "alerts":["auto_failover_node", "auto_failover_maximum_reached"], \
                             "recipients":["*****@*****.**"], "sender":"*****@*****.**", "source":"ns_server", "user":"******", 'ip':self.ipAddress, 'port':1234}
            rest.set_alerts_settings('*****@*****.**', '*****@*****.**', 'ritam', 'password',)

        elif (ops == 'disableClusterAlerts'):
            rest.set_alerts_settings('*****@*****.**', '*****@*****.**', 'ritam', 'password',)
            expectedResults = {'source':source, "user":user, 'ip':self.ipAddress, 'port':1234}
            rest.disable_alerts()

        elif (ops == 'modifyCompactionSettingsPercentage'):
            expectedResults = {"parallel_db_and_view_compaction":False,
                               "database_fragmentation_threshold:percentage":50,
                               "view_fragmentation_threshold:percentage":50,
                               "purge_interval":3,
                               "source":"ns_server",
                               "user":"******",
                               'source':source,
                               "user":user,
                               'ip':self.ipAddress,
                               'port':1234}
            rest.set_auto_compaction(dbFragmentThresholdPercentage=50, viewFragmntThresholdPercentage=50)

        elif (ops == 'modifyCompactionSettingsPercentSize'):
            expectedResults = {"parallel_db_and_view_compaction":False,
                               "database_fragmentation_threshold:percentage":50,
                               "database_fragmentation_threshold:size":10,
                               "view_fragmentation_threshold:percentage":50,
                               "view_fragmentation_threshold:size":10,
                               "purge_interval":3,
                               "source":"ns_server",
                               "user":"******",
                               'source':source,
                               "user":user,
                               'ip':self.ipAddress,
                               'port':1234}
            rest.set_auto_compaction(dbFragmentThresholdPercentage=50,
                                     viewFragmntThresholdPercentage=50,
                                     dbFragmentThreshold=10,
                                     viewFragmntThreshold=10)

        elif (ops == 'modifyCompactionSettingsTime'):
            expectedResults = {"parallel_db_and_view_compaction":False,
                               "database_fragmentation_threshold:percentage":50,
                               "database_fragmentation_threshold:size":10,
                               "view_fragmentation_threshold:percentage":50,
                               "view_fragmentation_threshold:size":10,
                               "allowed_time_period:abort_outside":True,
                               "allowed_time_period:to_minute":15,
                               "allowed_time_period:from_minute":12,
                               "allowed_time_period:to_hour":1,
                               "allowed_time_period:from_hour":1,
                               "purge_interval":3,
                               "source":"ns_server",
                               "user":"******",
                               'source':source,
                               "user":user,
                               'ip':self.ipAddress,
                               'port':1234,
                               }
            rest.set_auto_compaction(dbFragmentThresholdPercentage=50,
                                     viewFragmntThresholdPercentage=50,
                                     dbFragmentThreshold=10,
                                     viewFragmntThreshold=10,
                                     allowedTimePeriodFromHour=1,
                                     allowedTimePeriodFromMin=12,
                                     allowedTimePeriodToHour=1,
                                     allowedTimePeriodToMin=15,
                                     allowedTimePeriodAbort='true')

        elif (ops == "AddGroup"):
            expectedResults = {'group_name':'add group', 'source':source, 'user':user, 'ip':self.ipAddress, 'port':1234}
            rest.add_zone(expectedResults['group_name'])
            tempStr = rest.get_zone_uri()[expectedResults['group_name']]
            tempStr = (tempStr.split("/"))[4]
            expectedResults['uuid'] = tempStr

        elif (ops == "UpdateGroup"):
            expectedResults = {'group_name':'upGroup', 'source':source, 'user':user, 'ip':self.ipAddress, 'port':1234, 'nodes':[]}
            rest.add_zone(expectedResults['group_name'])
            rest.rename_zone(expectedResults['group_name'], 'update group')
            expectedResults['group_name'] = 'update group'
            tempStr = rest.get_zone_uri()[expectedResults['group_name']]
            tempStr = (tempStr.split("/"))[4]
            expectedResults['uuid'] = tempStr

        elif (ops == "UpdateGroupAddNodes"):
            sourceGroup = "Group 1"
            destGroup = 'destGroup'
            expectedResults = {'group_name':destGroup, 'source':source, 'user':user, 'ip':self.ipAddress, 'port':1234, 'nodes':['ns_1@' + self.master.ip], 'port':1234}
            #rest.add_zone(sourceGroup)
            rest.add_zone(destGroup)
            self.sleep(30)
            rest.shuffle_nodes_in_zones([self.master.ip], sourceGroup, destGroup)
            tempStr = rest.get_zone_uri()[expectedResults['group_name']]
            tempStr = (tempStr.split("/"))[4]
            expectedResults['uuid'] = tempStr

        elif (ops == "DeleteGroup"):
            expectedResults = {'group_name':'delete group', 'source':source, 'user':user, 'ip':self.ipAddress, 'port':1234}
            rest.add_zone(expectedResults['group_name'])
            tempStr = rest.get_zone_uri()[expectedResults['group_name']]
            tempStr = (tempStr.split("/"))[4]
            expectedResults['uuid'] = tempStr
            rest.delete_zone(expectedResults['group_name'])

        elif (ops == "regenCer"):
            expectedResults = {'source':source, 'user':user, 'ip':self.ipAddress, 'port':1234}
            rest.regenerate_cluster_certificate()

        elif (ops == 'renameNode'):
            rest.rename_node(self.master.ip, user, password)
            expectedResults = {"hostname":self.master.ip, "node":"ns_1@" + self.master.ip, "source":source, "user":user, "ip":self.ipAddress, "port":56845}

        try:
            self.checkConfig(self.eventID, self.master, expectedResults)
        finally:
            if (ops == "UpdateGroupAddNodes"):
                sourceGroup = "Group 1"
                destGroup = 'destGroup'
                rest.shuffle_nodes_in_zones([self.master.ip], destGroup, sourceGroup)

            rest = RestConnection(self.master)
            zones = rest.get_zone_names()
            for zone in zones:
                if zone != "Group 1":
                    rest.delete_zone(zone)
Ejemplo n.º 32
0
    def test_settingsCluster(self):
        ops = self.input.param("ops", None)
        source = 'ns_server'
        user = self.master.rest_username
        password = self.master.rest_password
        rest = RestConnection(self.master)

        if (ops == 'memoryQuota'):
            expectedResults = {'memory_quota':512, 'source':source, 'user':user, 'ip':self.ipAddress, 'port':12345, 'cluster_name':'', 'index_memory_quota':256}
            rest.init_cluster_memoryQuota(expectedResults['user'], password, expectedResults['memory_quota'])

        elif (ops == 'loadSample'):
            expectedResults = {'name':'gamesim-sample', 'source':source, "user":user, 'ip':self.ipAddress, 'port':12345}
            rest.addSamples()
            #Get a REST Command for loading sample

        elif (ops == 'enableAutoFailover'):
            expectedResults = {'max_nodes':1, "timeout":120, 'source':source, "user":user, 'ip':self.ipAddress, 'port':12345}
            rest.update_autofailover_settings(True, expectedResults['timeout'])

        elif (ops == 'disableAutoFailover'):
            expectedResults = {'max_nodes':1, "timeout":120, 'source':source, "user":user, 'ip':self.ipAddress, 'port':12345}
            rest.update_autofailover_settings(False, expectedResults['timeout'])

        elif (ops == 'resetAutoFailover'):
            expectedResults = {'source':source, "user":user, 'ip':self.ipAddress, 'port':12345}
            rest.reset_autofailover()

        elif (ops == 'enableClusterAlerts'):
            expectedResults = {"encrypt":False, "email_server:port":25, "host":"localhost", "email_server:user":"******", "alerts":["auto_failover_node", "auto_failover_maximum_reached"], \
                             "recipients":["*****@*****.**"], "sender":"*****@*****.**", "source":"ns_server", "user":"******", 'ip':self.ipAddress, 'port':1234}
            rest.set_alerts_settings('*****@*****.**', '*****@*****.**', 'ritam', 'password',)

        elif (ops == 'disableClusterAlerts'):
            rest.set_alerts_settings('*****@*****.**', '*****@*****.**', 'ritam', 'password',)
            expectedResults = {'source':source, "user":user, 'ip':self.ipAddress, 'port':1234}
            rest.disable_alerts()

        elif (ops == 'modifyCompactionSettingsPercentage'):
            expectedResults = {"parallel_db_and_view_compaction":False,
                               "database_fragmentation_threshold:percentage":50,
                               "view_fragmentation_threshold:percentage":50,
                               "purge_interval":3,
                               "source":"ns_server",
                               "user":"******",
                               'source':source,
                               "user":user,
                               'ip':self.ipAddress,
                               'port':1234}
            rest.set_auto_compaction(dbFragmentThresholdPercentage=50, viewFragmntThresholdPercentage=50)

        elif (ops == 'modifyCompactionSettingsPercentSize'):
            expectedResults = {"parallel_db_and_view_compaction":False,
                               "database_fragmentation_threshold:percentage":50,
                               "database_fragmentation_threshold:size":10,
                               "view_fragmentation_threshold:percentage":50,
                               "view_fragmentation_threshold:size":10,
                               "purge_interval":3,
                               "source":"ns_server",
                               "user":"******",
                               'source':source,
                               "user":user,
                               'ip':self.ipAddress,
                               'port':1234}
            rest.set_auto_compaction(dbFragmentThresholdPercentage=50,
                                     viewFragmntThresholdPercentage=50,
                                     dbFragmentThreshold=10,
                                     viewFragmntThreshold=10)

        elif (ops == 'modifyCompactionSettingsTime'):
            expectedResults = {"parallel_db_and_view_compaction":False,
                               "database_fragmentation_threshold:percentage":50,
                               "database_fragmentation_threshold:size":10,
                               "view_fragmentation_threshold:percentage":50,
                               "view_fragmentation_threshold:size":10,
                               "allowed_time_period:abort_outside":True,
                               "allowed_time_period:to_minute":15,
                               "allowed_time_period:from_minute":12,
                               "allowed_time_period:to_hour":1,
                               "allowed_time_period:from_hour":1,
                               "purge_interval":3,
                               "source":"ns_server",
                               "user":"******",
                               'source':source,
                               "user":user,
                               'ip':self.ipAddress,
                               'port':1234,
                               }
            rest.set_auto_compaction(dbFragmentThresholdPercentage=50,
                                     viewFragmntThresholdPercentage=50,
                                     dbFragmentThreshold=10,
                                     viewFragmntThreshold=10,
                                     allowedTimePeriodFromHour=1,
                                     allowedTimePeriodFromMin=12,
                                     allowedTimePeriodToHour=1,
                                     allowedTimePeriodToMin=15,
                                     allowedTimePeriodAbort='true')

        elif (ops == "AddGroup"):
            expectedResults = {'group_name':'add group', 'source':source, 'user':user, 'ip':self.ipAddress, 'port':1234}
            rest.add_zone(expectedResults['group_name'])
            tempStr = rest.get_zone_uri()[expectedResults['group_name']]
            tempStr = (tempStr.split("/"))[4]
            expectedResults['uuid'] = tempStr

        elif (ops == "UpdateGroup"):
            expectedResults = {'group_name':'upGroup', 'source':source, 'user':user, 'ip':self.ipAddress, 'port':1234, 'nodes':[]}
            rest.add_zone(expectedResults['group_name'])
            rest.rename_zone(expectedResults['group_name'], 'update group')
            expectedResults['group_name'] = 'update group'
            tempStr = rest.get_zone_uri()[expectedResults['group_name']]
            tempStr = (tempStr.split("/"))[4]
            expectedResults['uuid'] = tempStr

        elif (ops == "UpdateGroupAddNodes"):
            sourceGroup = "Group 1"
            destGroup = 'destGroup'
            expectedResults = {'group_name':destGroup, 'source':source, 'user':user, 'ip':self.ipAddress, 'port':1234, 'nodes':['ns_1@' + self.master.ip], 'port':1234}
            #rest.add_zone(sourceGroup)
            rest.add_zone(destGroup)
            self.sleep(30)
            rest.shuffle_nodes_in_zones([self.master.ip], sourceGroup, destGroup)
            tempStr = rest.get_zone_uri()[expectedResults['group_name']]
            tempStr = (tempStr.split("/"))[4]
            expectedResults['uuid'] = tempStr

        elif (ops == "DeleteGroup"):
            expectedResults = {'group_name':'delete group', 'source':source, 'user':user, 'ip':self.ipAddress, 'port':1234}
            rest.add_zone(expectedResults['group_name'])
            tempStr = rest.get_zone_uri()[expectedResults['group_name']]
            tempStr = (tempStr.split("/"))[4]
            expectedResults['uuid'] = tempStr
            rest.delete_zone(expectedResults['group_name'])

        elif (ops == "regenCer"):
            expectedResults = {'source':source, 'user':user, 'ip':self.ipAddress, 'port':1234}
            rest.regenerate_cluster_certificate()

        elif (ops == 'renameNode'):
            rest.rename_node(self.master.ip, user, password)
            expectedResults = {"hostname":self.master.ip, "node":"ns_1@" + self.master.ip, "source":source, "user":user, "ip":self.ipAddress, "port":56845}

        try:
            self.checkConfig(self.eventID, self.master, expectedResults)
        finally:
            if (ops == "UpdateGroupAddNodes"):
                sourceGroup = "Group 1"
                destGroup = 'destGroup'
                rest.shuffle_nodes_in_zones([self.master.ip], destGroup, sourceGroup)

            rest = RestConnection(self.master)
            zones = rest.get_zone_names()
            for zone in zones:
                if zone != "Group 1":
                    rest.delete_zone(zone)