Ejemplo n.º 1
0
    def common_setup(input, testcase):
        log.info("==============  common_setup was started for test #{0} {1}==============" \
                 .format(testcase.case_number, testcase._testMethodName))
        servers = input.servers
        RemoteUtilHelper.common_basic_setup(servers)
        BucketOperationHelper.delete_all_buckets_or_assert(servers, testcase)
        ClusterOperationHelper.cleanup_cluster(servers)
        ClusterOperationHelper.wait_for_ns_servers_or_assert(servers, testcase)

        # Add built-in user
        testuser = [{
            'id': 'cbadminbucket',
            'name': 'cbadminbucket',
            'password': '******'
        }]
        RbacBase().create_user_source(testuser, 'builtin', servers[0])

        # Assign user to role
        role_list = [{
            'id': 'cbadminbucket',
            'name': 'cbadminbucket',
            'roles': 'admin'
        }]
        RbacBase().add_user_role(role_list, RestConnection(servers[0]),
                                 'builtin')

        log.info("==============  common_setup was finished for test #{0} {1} ==============" \
                 .format(testcase.case_number, testcase._testMethodName))
Ejemplo n.º 2
0
 def common_setup(input, testcase):
     servers = input.servers
     RemoteUtilHelper.common_basic_setup(servers)
     BucketOperationHelper.delete_all_buckets_or_assert(servers, testcase)
     for server in servers:
         ClusterOperationHelper.cleanup_cluster([server])
     ClusterHelper.wait_for_ns_servers_or_assert(servers, testcase)
Ejemplo n.º 3
0
 def tearDown(self):
     try:
         self._cluster_helper.shutdown()
         log = logger.Logger.get_logger()
         log.info("==============  tearDown was started for test #{0} {1} =============="\
                           .format(self.case_number, self._testMethodName))
         RemoteUtilHelper.common_basic_setup(self._servers)
         log.info("10 seconds delay to wait for membase-server to start")
         time.sleep(10)
         for server in self._cleanup_nodes:
             shell = RemoteMachineShellConnection(server)
             o, r = shell.execute_command("iptables -F")
             shell.log_command_output(o, r)
             o, r = shell.execute_command(
                 "/sbin/iptables -A INPUT -p tcp -i eth0 --dport 1000:60000 -j ACCEPT"
             )
             shell.log_command_output(o, r)
             o, r = shell.execute_command(
                 "/sbin/iptables -A OUTPUT -p tcp -o eth0 --dport 1000:60000 -j ACCEPT"
             )
             shell.log_command_output(o, r)
             o, r = shell.execute_command(
                 "/etc/init.d/couchbase-server start")
             shell.log_command_output(o, r)
             shell.disconnect()
         BucketOperationHelper.delete_all_buckets_or_assert(
             self._servers, self)
         ClusterOperationHelper.cleanup_cluster(self._servers)
         ClusterHelper.wait_for_ns_servers_or_assert(self._servers, self)
         log.info("==============  tearDown was finished for test #{0} {1} =============="\
                           .format(self.case_number, self._testMethodName))
     finally:
         pass
Ejemplo n.º 4
0
 def common_setup(input, testcase):
     servers = input.servers
     RemoteUtilHelper.common_basic_setup(servers)
     BucketOperationHelper.delete_all_buckets_or_assert(servers, testcase)
     for server in servers:
         ClusterOperationHelper.cleanup_cluster([server])
     ClusterHelper.wait_for_ns_servers_or_assert(servers, testcase)
Ejemplo n.º 5
0
    def common_tearDown(servers, testcase):
        log = logger.Logger.get_logger()
        log.info(
            "==============  common_tearDown was started for test #{0} {1} ==============".format(
                testcase.case_number, testcase._testMethodName
            )
        )
        RemoteUtilHelper.common_basic_setup(servers)

        log.info("10 seconds delay to wait for couchbase-server to start")
        time.sleep(10)
        ClusterOperationHelper.wait_for_ns_servers_or_assert(
            servers, testcase, wait_time=AutoFailoverBaseTest.MAX_FAIL_DETECT_TIME * 15, wait_if_warmup=True
        )
        try:
            rest = RestConnection(self._servers[0])
            buckets = rest.get_buckets()
            for bucket in buckets:
                MemcachedClientHelper.flush_bucket(servers[0], bucket.name)
        except Exception:
            pass
        BucketOperationHelper.delete_all_buckets_or_assert(servers, testcase)
        ClusterOperationHelper.cleanup_cluster(servers)
        log.info(
            "==============  common_tearDown was finished for test #{0} {1} ==============".format(
                testcase.case_number, testcase._testMethodName
            )
        )
Ejemplo n.º 6
0
 def tearDown(self):
     try:
         self._cluster_helper.shutdown()
         log = logger.Logger.get_logger()
         log.info("==============  tearDown was started for test #{0} {1} =============="\
                           .format(self.case_number, self._testMethodName))
         RemoteUtilHelper.common_basic_setup(self._servers)
         log.info("10 seconds delay to wait for membase-server to start")
         time.sleep(10)
         for server in self._cleanup_nodes:
             shell = RemoteMachineShellConnection(server)
             o, r = shell.execute_command("iptables -F")
             shell.log_command_output(o, r)
             o, r = shell.execute_command("/sbin/iptables -A INPUT -p tcp -i eth0 --dport 1000:60000 -j ACCEPT")
             shell.log_command_output(o, r)
             o, r = shell.execute_command("/sbin/iptables -A OUTPUT -p tcp -o eth0 --dport 1000:60000 -j ACCEPT")
             shell.log_command_output(o, r)
             o, r = shell.execute_command("/etc/init.d/couchbase-server start")
             shell.log_command_output(o, r)
             shell.disconnect()
         BucketOperationHelper.delete_all_buckets_or_assert(self._servers, self)
         ClusterOperationHelper.cleanup_cluster(self._servers)
         ClusterHelper.wait_for_ns_servers_or_assert(self._servers, self)
         log.info("==============  tearDown was finished for test #{0} {1} =============="\
                           .format(self.case_number, self._testMethodName))
     finally:
         pass
Ejemplo n.º 7
0
 def tearDown(self):
     if hasattr(self, '_resultForDoCleanups') and len(self._resultForDoCleanups.failures) > 0 \
                 and 'stop-on-failure' in TestInputSingleton.input.test_params and \
                 str(TestInputSingleton.input.test_params['stop-on-failure']).lower() == 'true':
                 #supported starting with python2.7
                 log.warn("CLEANUP WAS SKIPPED")
                 self.cluster.shutdown()
                 self._log_finish(self)
     else:
         try:
             self.log.info("==============  tearDown was started for test #{0} {1} =============="\
                           .format(self.case_number, self._testMethodName))
             RemoteUtilHelper.common_basic_setup(self.servers)
             self.log.info("10 seconds delay to wait for membase-server to start")
             time.sleep(10)
             for server in self._cleanup_nodes:
                 shell = RemoteMachineShellConnection(server)
                 o, r = shell.execute_command("iptables -F")
                 shell.log_command_output(o, r)
                 o, r = shell.execute_command("/sbin/iptables -A INPUT -p tcp -i eth0 --dport 1000:60000 -j ACCEPT")
                 shell.log_command_output(o, r)
                 o, r = shell.execute_command("/sbin/iptables -A OUTPUT -p tcp -o eth0 --dport 1000:60000 -j ACCEPT")
                 shell.log_command_output(o, r)
                 o, r = shell.execute_command("/etc/init.d/couchbase-server start")
                 shell.log_command_output(o, r)
                 shell.disconnect()
             self.log.info("==============  tearDown was finished for test #{0} {1} =============="\
                           .format(self.case_number, self._testMethodName))
         finally:
             super(FailoverBaseTest, self).tearDown()
Ejemplo n.º 8
0
 def tearDown(self):
     if hasattr(self, '_resultForDoCleanups') and len(self._resultForDoCleanups.failures) > 0 \
                 and 'stop-on-failure' in TestInputSingleton.input.test_params and \
                 str(TestInputSingleton.input.test_params['stop-on-failure']).lower() == 'true':
                 #supported starting with python2.7
                 log.warn("CLEANUP WAS SKIPPED")
                 self.cluster.shutdown()
                 self._log_finish(self)
     else:
         try:
             self.log.info("==============  tearDown was started for test #{0} {1} =============="\
                           .format(self.case_number, self._testMethodName))
             RemoteUtilHelper.common_basic_setup(self.servers)
             self.log.info("10 seconds delay to wait for membase-server to start")
             time.sleep(10)
             for server in self._cleanup_nodes:
                 shell = RemoteMachineShellConnection(server)
                 o, r = shell.execute_command("iptables -F")
                 shell.log_command_output(o, r)
                 o, r = shell.execute_command("/sbin/iptables -A INPUT -p tcp -i eth0 --dport 1000:60000 -j ACCEPT")
                 shell.log_command_output(o, r)
                 o, r = shell.execute_command("/sbin/iptables -A OUTPUT -p tcp -o eth0 --dport 1000:60000 -j ACCEPT")
                 shell.log_command_output(o, r)
                 o, r = shell.execute_command("/etc/init.d/couchbase-server start")
                 shell.log_command_output(o, r)
                 shell.disconnect()
             self.log.info("==============  tearDown was finished for test #{0} {1} =============="\
                           .format(self.case_number, self._testMethodName))
         finally:
             super(FailoverBaseTest, self).tearDown()
 def tearDown(self):
     if hasattr(self, '_resultForDoCleanups') and len(self._resultForDoCleanups.failures) > 0 \
                 and 'stop-on-failure' in TestInputSingleton.input.test_params and \
                 str(TestInputSingleton.input.test_params['stop-on-failure']).lower() == 'true':
                 # supported starting with python2.7
                 log.warn("CLEANUP WAS SKIPPED")
                 self.cluster.shutdown(force=True)
                 self._log_finish(self)
     else:
         try:
             self.log.info("==============  tearDown was started for test #{0} {1} =============="\
                           .format(self.case_number, self._testMethodName))
             RemoteUtilHelper.common_basic_setup(self.servers)
             BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self)
             for node in self.servers:
                 master = node
                 try:
                     ClusterOperationHelper.cleanup_cluster(self.servers,
                                                            master=master)
                 except:
                     continue
             self.log.info("==============  tearDown was finished for test #{0} {1} =============="\
                           .format(self.case_number, self._testMethodName))
         finally:
             super(FailoverBaseTest, self).tearDown()
Ejemplo n.º 10
0
 def tearDown(self):
     if hasattr(self, '_resultForDoCleanups') and len(self._resultForDoCleanups.failures) > 0 \
                 and 'stop-on-failure' in TestInputSingleton.input.test_params and \
                 str(TestInputSingleton.input.test_params['stop-on-failure']).lower() == 'true':
         # supported starting with python2.7
         log.warn("CLEANUP WAS SKIPPED")
         self.cluster.shutdown(force=True)
         self._log_finish(self)
     else:
         try:
             self.log.info("==============  tearDown was started for test #{0} {1} =============="\
                           .format(self.case_number, self._testMethodName))
             RemoteUtilHelper.common_basic_setup(self.servers)
             BucketOperationHelper.delete_all_buckets_or_assert(
                 self.servers, self)
             for node in self.servers:
                 master = node
                 try:
                     ClusterOperationHelper.cleanup_cluster(self.servers,
                                                            master=master)
                 except:
                     continue
             self.log.info("==============  tearDown was finished for test #{0} {1} =============="\
                           .format(self.case_number, self._testMethodName))
         finally:
             super(FailoverBaseTest, self).tearDown()
Ejemplo n.º 11
0
    def common_tearDown(servers, testcase):
        RemoteUtilHelper.common_basic_setup(servers)
        log = logger.Logger.get_logger()
        log.info("10 seconds delay to wait for membase-server to start")
        time.sleep(10)

        BucketOperationHelper.delete_all_buckets_or_assert(servers, testcase)
        ClusterOperationHelper.cleanup_cluster(servers)
        ClusterHelper.wait_for_ns_servers_or_assert(servers, testcase)
Ejemplo n.º 12
0
    def common_tearDown(servers, testcase):
        RemoteUtilHelper.common_basic_setup(servers)
        log = logger.Logger.get_logger()
        log.info("10 seconds delay to wait for membase-server to start")
        time.sleep(10)

        BucketOperationHelper.delete_all_buckets_or_assert(servers, testcase)
        ClusterOperationHelper.cleanup_cluster(servers)
        ClusterHelper.wait_for_ns_servers_or_assert(servers, testcase)
Ejemplo n.º 13
0
 def common_setup(input, testcase):
     log.info("==============  common_setup was started for test #{0} {1}=============="\
                   .format(testcase.case_number, testcase._testMethodName))
     servers = input.servers
     RemoteUtilHelper.common_basic_setup(servers)
     ClusterOperationHelper.cleanup_cluster(servers)
     ClusterOperationHelper.wait_for_ns_servers_or_assert(servers, testcase)
     BucketOperationHelper.delete_all_buckets_or_assert(servers, testcase)
     log.info("==============  common_setup was finished for test #{0} {1} =============="\
                   .format(testcase.case_number, testcase._testMethodName))
Ejemplo n.º 14
0
 def common_setup(input, testcase):
     log.info("==============  common_setup was started for test #{0} {1}=============="\
                   .format(testcase.case_number, testcase._testMethodName))
     servers = input.servers
     RemoteUtilHelper.common_basic_setup(servers)
     BucketOperationHelper.delete_all_buckets_or_assert(servers, testcase)
     ClusterOperationHelper.cleanup_cluster(servers)
     ClusterOperationHelper.wait_for_ns_servers_or_assert(servers, testcase)
     log.info("==============  common_setup was finished for test #{0} {1} =============="\
                   .format(testcase.case_number, testcase._testMethodName))
Ejemplo n.º 15
0
 def common_tearDown(servers, testcase):
     RemoteUtilHelper.common_basic_setup(servers)
     log = logger.Logger.get_logger()
     log.info("10 seconds delay to wait for couchbase-server to start")
     time.sleep(10)
     ClusterOperationHelper.wait_for_ns_servers_or_assert(servers, testcase)
     try:
         MemcachedClientHelper.flush_bucket(servers[0], 'default')
     except Exception:
         pass
     BucketOperationHelper.delete_all_buckets_or_assert(servers, testcase)
     ClusterOperationHelper.cleanup_cluster(servers)
     ClusterOperationHelper.wait_for_ns_servers_or_assert(servers, testcase)
Ejemplo n.º 16
0
 def setUp(self):
     log = logger.Logger.get_logger()
     self._input = TestInputSingleton.input
     self._keys_count = self._input.param("keys_count", DEFAULT_KEY_COUNT)
     self._num_replicas = self._input.param("replica", DEFAULT_REPLICA)
     self.bidirectional = self._input.param("bidirectional", False)
     self.case_number = self._input.param("case_number", 0)
     self._value_size = self._input.param("value_size", 256)
     self.wait_timeout = self._input.param("wait_timeout", 60)
     self._servers = self._input.servers
     self.master = self._servers[0]
     self._failed_nodes = []
     num_buckets = 0
     self.buckets = []
     self.default_bucket = self._input.param("default_bucket", True)
     if self.default_bucket:
         self.default_bucket_name = "default"
         num_buckets += 1
     self._standard_buckets = self._input.param("standard_buckets", 0)
     self._sasl_buckets = self._input.param("sasl_buckets", 0)
     num_buckets += self._standard_buckets + self._sasl_buckets
     self.dgm_run = self._input.param("dgm_run", True)
     self.log = logger.Logger().get_logger()
     self._cluster_helper = Cluster()
     self.disabled_consistent_view = self._input.param(
         "disabled_consistent_view", None)
     self._quota = self._initialize_nodes(self._cluster_helper,
                                          self._servers,
                                          self.disabled_consistent_view)
     if self.dgm_run:
         self.quota = 256
     self.bucket_size = int(
         (2.0 / 3.0) / float(num_buckets) * float(self._quota))
     self.gen_create = BlobGenerator('loadOne',
                                     'loadOne_',
                                     self._value_size,
                                     end=self._keys_count)
     self.add_back_flag = False
     self._cleanup_nodes = []
     log.info("==============  setup was started for test #{0} {1}=============="\
                   .format(self.case_number, self._testMethodName))
     RemoteUtilHelper.common_basic_setup(self._servers)
     BucketOperationHelper.delete_all_buckets_or_assert(self._servers, self)
     for server in self._servers:
         ClusterOperationHelper.cleanup_cluster([server])
     ClusterHelper.wait_for_ns_servers_or_assert(self._servers, self)
     self._setup_cluster()
     self._create_buckets_()
     log.info("==============  setup was finished for test #{0} {1} =============="\
                   .format(self.case_number, self._testMethodName))
Ejemplo n.º 17
0
 def tearDown(self):
     if hasattr(self, '_resultForDoCleanups') \
             and len(self._resultForDoCleanups.failures) > 0 \
             and 'stop-on-failure' in TestInputSingleton.input.test_params \
             and str(TestInputSingleton.input.test_params['stop-on-failure']).lower() == 'true':
         # supported starting with python2.7
         self.log.warn("CLEANUP WAS SKIPPED")
         self.cluster.shutdown(force=True)
     else:
         try:
             self.log.info("==============  tearDown was started for test #{0} {1} =============="\
                           .format(self.case_number, self._testMethodName))
             RemoteUtilHelper.common_basic_setup(self.cluster.servers)
             self.cluster_util.check_for_panic_and_mini_dumps(self.servers)
         finally:
             super(FailoverBaseTest, self).tearDown()
Ejemplo n.º 18
0
 def common_tearDown(servers, testcase):
     RemoteUtilHelper.common_basic_setup(servers)
     log = logger.Logger.get_logger()
     log.info("10 seconds delay to wait for couchbase-server to start")
     time.sleep(10)
     ClusterOperationHelper.wait_for_ns_servers_or_assert(servers, testcase)
     try:
         rest = RestConnection(self._servers[0])
         buckets = rest.get_buckets()
         for bucket in buckets:
             MemcachedClientHelper.flush_bucket(servers[0], bucket.name)
     except Exception:
         pass
     BucketOperationHelper.delete_all_buckets_or_assert(servers, testcase)
     ClusterOperationHelper.cleanup_cluster(servers)
     ClusterOperationHelper.wait_for_ns_servers_or_assert(servers, testcase)
Ejemplo n.º 19
0
 def setUp(self):
     log = logger.Logger.get_logger()
     self._input = TestInputSingleton.input
     self._keys_count = self._input.param("keys_count", DEFAULT_KEY_COUNT)
     self._num_replicas = self._input.param("replica", DEFAULT_REPLICA)
     self.bidirectional = self._input.param("bidirectional", False)
     self.case_number = self._input.param("case_number", 0)
     self._value_size = self._input.param("value_size", 256)
     self.wait_timeout = self._input.param("wait_timeout", 60)
     self._servers = self._input.servers
     self.master = self._servers[0]
     self._failed_nodes = []
     num_buckets = 0
     self.buckets = []
     self.default_bucket = self._input.param("default_bucket", True)
     if self.default_bucket:
         self.default_bucket_name = "default"
         num_buckets += 1
     self._standard_buckets = self._input.param("standard_buckets", 0)
     self._sasl_buckets = self._input.param("sasl_buckets", 0)
     num_buckets += self._standard_buckets + self._sasl_buckets
     self.dgm_run = self._input.param("dgm_run", True)
     self.log = logger.Logger().get_logger()
     self._cluster_helper = Cluster()
     self.disabled_consistent_view = self._input.param("disabled_consistent_view", None)
     self._quota = self._initialize_nodes(self._cluster_helper, self._servers, self.disabled_consistent_view)
     if self.dgm_run:
         self.quota = 256
     self.bucket_size = int((2.0 / 3.0) / float(num_buckets) * float(self._quota))
     self.gen_create = BlobGenerator('loadOne', 'loadOne_', self._value_size, end=self._keys_count)
     self.add_back_flag = False
     self._cleanup_nodes = []
     log.info("==============  setup was started for test #{0} {1}=============="\
                   .format(self.case_number, self._testMethodName))
     RemoteUtilHelper.common_basic_setup(self._servers)
     BucketOperationHelper.delete_all_buckets_or_assert(self._servers, self)
     for server in self._servers:
         ClusterOperationHelper.cleanup_cluster([server])
     ClusterHelper.wait_for_ns_servers_or_assert(self._servers, self)
     self._setup_cluster()
     self._create_buckets_()
     log.info("==============  setup was finished for test #{0} {1} =============="\
                   .format(self.case_number, self._testMethodName))
Ejemplo n.º 20
0
    def common_tearDown(servers, testcase):
        log.info("==============  common_tearDown was started for test #{0} {1} =============="\
                          .format(testcase.case_number, testcase._testMethodName))
        RemoteUtilHelper.common_basic_setup(servers)

        log.info("10 seconds delay to wait for couchbase-server to start")
        time.sleep(10)
        ClusterOperationHelper.wait_for_ns_servers_or_assert(servers, testcase, \
                wait_time=AutoFailoverBaseTest.MAX_FAIL_DETECT_TIME * 10, wait_if_warmup=True)
        try:
            rest = RestConnection(servers[0])
            buckets = rest.get_buckets()
            for bucket in buckets:
                MemcachedClientHelper.flush_bucket(servers[0], bucket.name)
        except Exception:
            pass
        BucketOperationHelper.delete_all_buckets_or_assert(servers, testcase)
        ClusterOperationHelper.cleanup_cluster(servers)
        log.info("==============  common_tearDown was finished for test #{0} {1} =============="\
                          .format(testcase.case_number, testcase._testMethodName))
Ejemplo n.º 21
0
    def common_setup(input, testcase):
        log.info("==============  common_setup was started for test #{0} {1}==============" \
                 .format(testcase.case_number, testcase._testMethodName))
        servers = input.servers
        RemoteUtilHelper.common_basic_setup(servers)
        BucketOperationHelper.delete_all_buckets_or_assert(servers, testcase)
        ClusterOperationHelper.cleanup_cluster(servers)
        ClusterOperationHelper.wait_for_ns_servers_or_assert(servers, testcase)

        # Add built-in user
        testuser = [{'id': 'cbadminbucket', 'name': 'cbadminbucket', 'password': '******'}]
        RbacBase().create_user_source(testuser, 'builtin', servers[0])
        time.sleep(10)

        # Assign user to role
        role_list = [{'id': 'cbadminbucket', 'name': 'cbadminbucket', 'roles': 'admin'}]
        RbacBase().add_user_role(role_list, RestConnection(servers[0]), 'builtin')
        time.sleep(10)

        log.info("==============  common_setup was finished for test #{0} {1} ==============" \
                 .format(testcase.case_number, testcase._testMethodName))
Ejemplo n.º 22
0
 def test_node_memcached_failure(self):
     timeout = self.timeout / 2
     status = self.rest.update_autoreprovision_settings(True, 1)
     if not status:
         self.fail('failed to change autoreprovision_settings!')
     self.sleep(5)
     self._pause_couchbase(self.server_fail)
     self.sleep(5)
     AutoReprovisionBaseTest.wait_for_warmup_or_assert(self.master, 1,
                                                       timeout + AutoReprovisionBaseTest.MAX_FAIL_DETECT_TIME,
                                                       self)
     RemoteUtilHelper.common_basic_setup([self.server_fail])
     AutoReprovisionBaseTest.wait_for_failover_or_assert(self.master, 0,
                                                         timeout + AutoReprovisionBaseTest.MAX_FAIL_DETECT_TIME,
                                                         self)
     helper = RestHelper(self.rest)
     self.assertTrue(helper.is_cluster_healthy(), "cluster status is not healthy")
     self.assertTrue(helper.is_cluster_rebalanced(), "cluster is not balanced")
     buckets = self.rest.get_buckets()
     for bucket in buckets:
         self.verify_loaded_data(self.master, bucket.name, self.loaded_items[bucket.name])
Ejemplo n.º 23
0
 def test_node_memcached_failure_in_series(self):
     timeout = self.timeout / 2
     status = self.rest.update_autoreprovision_settings(True, 1)
     if not status:
         self.fail('failed to change autoreprovision_settings!')
     self.sleep(5)
     data_lost = False
     for i in reversed(xrange(len(self.servers))):
         print self.servers[i]
         operation = random.choice(['stop', 'memcached_failure', 'restart', 'failover', 'reboot'])
         shell = RemoteMachineShellConnection(self.servers[i])
         print "operation", operation
         if i == 0:
             self.master = self.servers[1]
         if operation == 'stop':
             self._stop_couchbase(self.servers[i])
         elif operation == 'memcached_failure':
             self._pause_couchbase(self.servers[i])
         elif operation == 'restart':
             shell.restart_couchbase()
         elif operation == 'failover':
             RemoteUtilHelper.enable_firewall(self.servers[i])
         elif operation == 'reboot':
             if shell.extract_remote_info().type.lower() == 'windows':
                 o, r = shell.execute_command("shutdown -r -f -t 0")
                 self.sleep(200)
             elif shell.extract_remote_info().type.lower() == 'linux':
                 o, r = shell.execute_command("reboot")
             shell.log_command_output(o, r)
             self.sleep(60)
         self.sleep(40)
         if operation == 'memcached_failure':
             AutoReprovisionBaseTest.wait_for_warmup_or_assert(self.master, 1,
                                                               timeout + AutoReprovisionBaseTest.MAX_FAIL_DETECT_TIME,
                                                               self)
         if operation != 'restart' and operation != 'memcached_failure' and operation != 'reboot':
             AutoReprovisionBaseTest.wait_for_failover_or_assert(self.master, 1,
                                                                 timeout + AutoReprovisionBaseTest.MAX_FAIL_DETECT_TIME,
                                                                 self)
         if operation != 'restart':
             RemoteUtilHelper.common_basic_setup([self.servers[i]])
         AutoReprovisionBaseTest.wait_for_failover_or_assert(self.master, 0,
                                                             timeout + AutoReprovisionBaseTest.MAX_FAIL_DETECT_TIME,
                                                             self)
         helper = RestHelper(RestConnection(self.master))
         self.assertTrue(helper.is_cluster_healthy(), "cluster status is not healthy")
         self.sleep(40)
         if operation == 'memcached_failure' or operation == 'failover':
             self.assertTrue(helper.is_cluster_rebalanced(), "cluster is not balanced")
         else:
             if 'kv' in self.servers[i].services and self.replicas > 0:
                 self.assertFalse(helper.is_cluster_rebalanced(), "cluster is balanced")
                 self.rest.rebalance(otpNodes=[node.id for node in self.rest.node_statuses()], ejectedNodes=[])
                 self.assertTrue(self.rest.monitorRebalance())
             else:
                 self.assertTrue(helper.is_cluster_rebalanced(), "cluster is not balanced")
         buckets = self.rest.get_buckets()
         if self.replicas == 0 and (operation == 'restart' or operation == 'reboot'):
             data_lost = True
         for bucket in buckets:
             if not data_lost:
                 self.verify_loaded_data(self.master, bucket.name, self.loaded_items[bucket.name])
Ejemplo n.º 24
0
 def test_node_memcached_failure_in_series(self):
     timeout = self.timeout / 2
     status = self.rest.update_autoreprovision_settings(True, 1)
     if not status:
         self.fail('failed to change autoreprovision_settings!')
     self.sleep(5)
     data_lost = False
     for i in reversed(xrange(len(self.servers))):
         print self.servers[i]
         operation = random.choice(
             ['stop', 'memcached_failure', 'restart', 'failover', 'reboot'])
         shell = RemoteMachineShellConnection(self.servers[i])
         print "operation", operation
         if i == 0:
             self.master = self.servers[1]
         if operation == 'stop':
             self._stop_couchbase(self.servers[i])
         elif operation == 'memcached_failure':
             self._pause_couchbase(self.servers[i])
         elif operation == 'restart':
             shell.restart_couchbase()
         elif operation == 'failover':
             RemoteUtilHelper.enable_firewall(self.servers[i])
         elif operation == 'reboot':
             if shell.extract_remote_info().type.lower() == 'windows':
                 o, r = shell.execute_command("shutdown -r -f -t 0")
                 self.sleep(200)
             elif shell.extract_remote_info().type.lower() == 'linux':
                 o, r = shell.execute_command("reboot")
             shell.log_command_output(o, r)
             self.sleep(60)
         self.sleep(40)
         if operation == 'memcached_failure':
             AutoReprovisionBaseTest.wait_for_warmup_or_assert(
                 self.master, 1,
                 timeout + AutoReprovisionBaseTest.MAX_FAIL_DETECT_TIME,
                 self)
         if operation != 'restart' and operation != 'memcached_failure' and operation != 'reboot':
             AutoReprovisionBaseTest.wait_for_failover_or_assert(
                 self.master, 1,
                 timeout + AutoReprovisionBaseTest.MAX_FAIL_DETECT_TIME,
                 self)
         if operation != 'restart':
             RemoteUtilHelper.common_basic_setup([self.servers[i]])
         AutoReprovisionBaseTest.wait_for_failover_or_assert(
             self.master, 0,
             timeout + AutoReprovisionBaseTest.MAX_FAIL_DETECT_TIME, self)
         helper = RestHelper(RestConnection(self.master))
         self.assertTrue(helper.is_cluster_healthy(),
                         "cluster status is not healthy")
         self.sleep(40)
         if operation == 'memcached_failure' or operation == 'failover':
             self.assertTrue(helper.is_cluster_rebalanced(),
                             "cluster is not balanced")
         else:
             if 'kv' in self.servers[i].services and self.replicas > 0:
                 self.assertFalse(helper.is_cluster_rebalanced(),
                                  "cluster is balanced")
                 self.rest.rebalance(otpNodes=[
                     node.id for node in self.rest.node_statuses()
                 ],
                                     ejectedNodes=[])
                 self.assertTrue(self.rest.monitorRebalance())
             else:
                 self.assertTrue(helper.is_cluster_rebalanced(),
                                 "cluster is not balanced")
         buckets = self.rest.get_buckets()
         if self.replicas == 0 and (operation == 'restart'
                                    or operation == 'reboot'):
             data_lost = True
         for bucket in buckets:
             if not data_lost:
                 self.verify_loaded_data(self.master, bucket.name,
                                         self.loaded_items[bucket.name])