Пример #1
0
    def setUp_bucket(self, unittest):
        self.log = logger.Logger.get_logger()
        self.input = TestInputSingleton.input
        unittest.assertTrue(self.input, msg="input parameters missing...")
        self.test = unittest
        self.master = self.input.servers[0]
        rest = RestConnection(self.master)
        rest.init_cluster(username=self.master.rest_username, password=self.master.rest_password)
        rest.init_cluster_memoryQuota(memoryQuota=rest.get_nodes_self().mcdMemoryReserved)
        ClusterOperationHelper.cleanup_cluster([self.master])
        BucketOperationHelper.delete_all_buckets_or_assert([self.master], self.test)

        serverInfo = self.master
        rest = RestConnection(serverInfo)
        info = rest.get_nodes_self()
        rest.init_cluster(username=serverInfo.rest_username,
                          password=serverInfo.rest_password)
        rest.init_cluster_memoryQuota(memoryQuota=info.memoryQuota)

        # Add built-in user
        testuser = [{'id': 'cbadminbucket', 'name': 'cbadminbucket', 'password': '******'}]
        RbacBase().create_user_source(testuser, 'builtin', self.master)
        time.sleep(10)

        # Assign user to role
        role_list = [{'id': 'cbadminbucket', 'name': 'cbadminbucket', 'roles': 'admin'}]
        RbacBase().add_user_role(role_list, RestConnection(self.master), 'builtin')
        time.sleep(10)
Пример #2
0
 def cleanup(self):
     rest = RestConnection(self.master)
     rest.stop_rebalance()
     BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self)
     for server in self.servers:
         ClusterOperationHelper.cleanup_cluster([server])
     ClusterOperationHelper.wait_for_ns_servers_or_assert(self.servers, self)
Пример #3
0
    def setUp(self):
        self.log = logger.Logger.get_logger()
        self.master = TestInputSingleton.input.servers[0]
        self.input = TestInputSingleton.input
        self.servers = self.input.servers
        self.num_of_docs = self.input.param("num_of_docs", 1000)

        rest = RestConnection(self.master)
        for server in self.servers:
            rest.init_cluster(server.rest_username, server.rest_password)

        info = rest.get_nodes_self()

        for server in self.servers:
            rest.init_cluster_memoryQuota(
                server.rest_username, server.rest_password, memoryQuota=info.mcdMemoryReserved
            )

        ClusterOperationHelper.cleanup_cluster(self.servers)
        BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self)
        self._create_default_bucket()

        # Rebalance the nodes
        ClusterOperationHelper.begin_rebalance_in(self.master, self.servers)
        ClusterOperationHelper.end_rebalance(self.master)
        self._log_start()
Пример #4
0
    def setUp(self):
        self.log = logger.Logger().get_logger()
        self.input = TestInputSingleton.input
        self.servers = self.input.servers
        self.master = self.servers[0]
        self.ip = self.master.ip
        self.finished = False
        self.keys = []
        self.keycount = 0
        self.failure_string = ""

        self.cleanup()

        rest = RestConnection(self.master)
        info = rest.get_nodes_self()
        self.port = info.moxi+1

        rest.init_cluster(username=self.master.rest_username,
                          password=self.master.rest_password)
        rest.init_cluster_memoryQuota(memoryQuota=info.mcdMemoryReserved)
        created = BucketOperationHelper.create_multiple_buckets(self.master,
                                                                replica=1,
                                                                bucket_ram_ratio=(2.0 / 3.0),
                                                                howmany=10,
                                                                sasl=False)
        self.assertTrue(created, "bucket creation failed")

        ready = BucketOperationHelper.wait_for_memcached(self.master, "bucket-0")
        self.assertTrue(ready, "wait_for_memcached failed")
Пример #5
0
 def tearDown(self):
     try:
         test_failed = len(self._resultForDoCleanups.errors)
         if self.driver and test_failed:
             BaseHelper(self).create_screenshot()
         if self.driver:
             self.driver.close()
         if test_failed and TestInputSingleton.input.param("stop-on-failure", False):
             print "test fails, teardown will be skipped!!!"
             return
         rest = RestConnection(self.servers[0])
         try:
             reb_status = rest._rebalance_progress_status()
         except ValueError as e:
             if e.message == 'No JSON object could be decoded':
                 print "cluster not initialized!!!"
                 return
         if reb_status == 'running':
             stopped = rest.stop_rebalance()
             self.assertTrue(stopped, msg="unable to stop rebalance")
         BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self)
         for server in self.servers:
             ClusterOperationHelper.cleanup_cluster([server])
         ClusterOperationHelper.wait_for_ns_servers_or_assert(self.servers, self)
     except Exception as e:
         raise e
     finally:
         if self.driver:
             self.shell.disconnect()
Пример #6
0
 def _create_plasma_buckets(self):
     for bucket in self.buckets:
         if bucket.name.startswith("standard"):
             BucketOperationHelper.delete_bucket_or_assert(
                 serverInfo=self.dgmServer, bucket=bucket.name)
     self.buckets = [bu for bu in self.buckets if not bu.name.startswith("standard")]
     buckets = []
     for i in range(self.num_plasma_buckets):
         name = "plasma_dgm_" + str(i)
         buckets.append(name)
     bucket_size = self._get_bucket_size(self.quota,
                                         len(self.buckets)+len(buckets))
     self._create_buckets(server=self.master, bucket_list=buckets,
                          bucket_size=bucket_size)
     testuser = []
     rolelist = []
     for bucket in buckets:
         testuser.append({'id': bucket, 'name': bucket, 'password': '******'})
         rolelist.append({'id': bucket, 'name': bucket, 'roles': 'admin'})
     self.add_built_in_server_user(testuser=testuser, rolelist=rolelist)
     buckets = []
     for bucket in self.buckets:
         if bucket.name.startswith("plasma_dgm"):
             buckets.append(bucket)
     return buckets
Пример #7
0
 def tearDown(self):
     if not self.input.param("skip_cleanup", True):
         if self.times_teardown_called > 1 :
             self.shell.disconnect()
     if self.input.param("skip_cleanup", True):
         if self.case_number > 1 or self.times_teardown_called > 1:
             self.shell.disconnect()
     self.times_teardown_called += 1
     serverInfo = self.servers[0]
     rest = RestConnection(serverInfo)
     zones = rest.get_zone_names()
     for zone in zones:
         if zone != "Group 1":
             rest.delete_zone(zone)
     self.clusters_dic = self.input.clusters
     if self.clusters_dic:
         if len(self.clusters_dic) > 1:
             self.dest_nodes = self.clusters_dic[1]
             self.dest_master = self.dest_nodes[0]
             if self.dest_nodes and len(self.dest_nodes) > 1:
                 self.log.info("======== clean up destination cluster =======")
                 rest = RestConnection(self.dest_nodes[0])
                 rest.remove_all_remote_clusters()
                 rest.remove_all_replications()
                 BucketOperationHelper.delete_all_buckets_or_assert(self.dest_nodes, self)
                 ClusterOperationHelper.cleanup_cluster(self.dest_nodes)
         elif len(self.clusters_dic) == 1:
             self.log.error("=== need 2 cluster to setup xdcr in ini file ===")
     else:
         self.log.info("**** If run xdcr test, need cluster config is setup in ini file. ****")
     super(CliBaseTest, self).tearDown()
Пример #8
0
 def test_oom_delete_bucket(self):
     """
     1. Get OOM
     2. Delete a bucket
     3. Verify if state of indexes is changed
     :return:
     """
     self.assertTrue(self._push_indexer_off_the_cliff(), "OOM Can't be achieved")
     for i in range(len(self.buckets)):
         log.info("Deleting bucket {0}...".format(self.buckets[i].name))
         BucketOperationHelper.delete_bucket_or_assert(serverInfo=self.oomServer, bucket=self.buckets[i].name)
         self.sleep(120)
         check = self._validate_indexer_status_oom()
         if not check:
             if i < len(self.buckets):
                 self.buckets = self.buckets[i+1:]
             else:
                 #TODO: Pras: Need better solution here
                 self.buckets = []
             break
         log.info("Indexer Still in OOM...")
     self.sleep(120)
     self.assertFalse(self._validate_indexer_status_oom(), "Indexer still in OOM")
     self._verify_bucket_count_with_index_count(self.load_query_definitions)
     self.multi_query_using_index(buckets=self.buckets,
                     query_definitions=self.load_query_definitions)
Пример #9
0
 def tearDown(self):
     try:
         self._cluster_helper.shutdown()
         log = logger.Logger.get_logger()
         log.info("==============  tearDown was started for test #{0} {1} =============="\
                           .format(self.case_number, self._testMethodName))
         RemoteUtilHelper.common_basic_setup(self._servers)
         log.info("10 seconds delay to wait for membase-server to start")
         time.sleep(10)
         for server in self._cleanup_nodes:
             shell = RemoteMachineShellConnection(server)
             o, r = shell.execute_command("iptables -F")
             shell.log_command_output(o, r)
             o, r = shell.execute_command("/sbin/iptables -A INPUT -p tcp -i eth0 --dport 1000:60000 -j ACCEPT")
             shell.log_command_output(o, r)
             o, r = shell.execute_command("/sbin/iptables -A OUTPUT -p tcp -o eth0 --dport 1000:60000 -j ACCEPT")
             shell.log_command_output(o, r)
             o, r = shell.execute_command("/etc/init.d/couchbase-server start")
             shell.log_command_output(o, r)
             shell.disconnect()
         BucketOperationHelper.delete_all_buckets_or_assert(self._servers, self)
         ClusterOperationHelper.cleanup_cluster(self._servers)
         ClusterHelper.wait_for_ns_servers_or_assert(self._servers, self)
         log.info("==============  tearDown was finished for test #{0} {1} =============="\
                           .format(self.case_number, self._testMethodName))
     finally:
         pass
Пример #10
0
    def test_backup_with_spatial_data(self):
        num_docs = self.helper.input.param("num-docs", 5000)
        self.log.info("description : Make limit queries on a simple "
                      "dataset with {0} docs".format(num_docs))
        data_set = SimpleDataSet(self.helper, num_docs)
        data_set.add_limit_queries()
        self._query_test_init(data_set)

        if not self.command_options:
            self.command_options = []
        options = self.command_options + [' -m full']

        self.total_backups = 1
        self.shell.execute_cluster_backup(self.couchbase_login_info, self.backup_location, options)
        time.sleep(2)

        self.buckets = RestConnection(self.master).get_buckets()
        bucket_names = [bucket.name for bucket in self.buckets]
        BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self)
        gc.collect()

        self.helper._create_default_bucket()
        self.shell.restore_backupFile(self.couchbase_login_info, self.backup_location, bucket_names)

        SimpleDataSet(self.helper, num_docs)._create_views()
        self._query_test_init(data_set)
Пример #11
0
 def common_setup(input, testcase):
     servers = input.servers
     RemoteUtilHelper.common_basic_setup(servers)
     BucketOperationHelper.delete_all_buckets_or_assert(servers, testcase)
     for server in servers:
         ClusterOperationHelper.cleanup_cluster([server])
     ClusterHelper.wait_for_ns_servers_or_assert(servers, testcase)
Пример #12
0
    def common_tearDown(servers, testcase):
        log = logger.Logger.get_logger()
        log.info(
            "==============  common_tearDown was started for test #{0} {1} ==============".format(
                testcase.case_number, testcase._testMethodName
            )
        )
        RemoteUtilHelper.common_basic_setup(servers)

        log.info("10 seconds delay to wait for couchbase-server to start")
        time.sleep(10)
        ClusterOperationHelper.wait_for_ns_servers_or_assert(
            servers, testcase, wait_time=AutoFailoverBaseTest.MAX_FAIL_DETECT_TIME * 15, wait_if_warmup=True
        )
        try:
            rest = RestConnection(self._servers[0])
            buckets = rest.get_buckets()
            for bucket in buckets:
                MemcachedClientHelper.flush_bucket(servers[0], bucket.name)
        except Exception:
            pass
        BucketOperationHelper.delete_all_buckets_or_assert(servers, testcase)
        ClusterOperationHelper.cleanup_cluster(servers)
        log.info(
            "==============  common_tearDown was finished for test #{0} {1} ==============".format(
                testcase.case_number, testcase._testMethodName
            )
        )
Пример #13
0
 def cleanup_cluster(self):
     if not "skip_cleanup" in TestInputSingleton.input.test_params:
         BucketOperationHelper.delete_all_buckets_or_assert(
             self.servers, self.testcase)
         ClusterOperationHelper.cleanup_cluster(self.servers)
         ClusterOperationHelper.wait_for_ns_servers_or_assert(
             self.servers, self.testcase)
Пример #14
0
    def _cluster_setup(self):
        replicas = self.input.param("replicas", 1)
        keys_count = self.input.param("keys-count", 0)
        num_buckets = self.input.param("num-buckets", 1)

        bucket_name = "default"
        master = self.servers[0]
        credentials = self.input.membase_settings
        rest = RestConnection(self.master)
        info = rest.get_nodes_self()
        rest.init_cluster(username=self.master.rest_username,
                          password=self.master.rest_password)
        rest.init_cluster_memoryQuota(memoryQuota=info.mcdMemoryReserved)
        rest.reset_autofailover()
        ClusterOperationHelper.add_and_rebalance(self.servers, True)

        if num_buckets == 1:
            bucket_ram = info.memoryQuota * 2 / 3
            rest.create_bucket(bucket=bucket_name,
                               ramQuotaMB=bucket_ram,
                               replicaNumber=replicas,
                               proxyPort=info.moxi)
        else:
            created = BucketOperationHelper.create_multiple_buckets(self.master, replicas, howmany=num_buckets)
            self.assertTrue(created, "unable to create multiple buckets")

        buckets = rest.get_buckets()
        for bucket in buckets:
                ready = BucketOperationHelper.wait_for_memcached(self.master, bucket.name)
                self.assertTrue(ready, msg="wait_for_memcached failed")

        for bucket in buckets:
            inserted_keys_cnt = self.load_data(self.master, bucket.name, keys_count)
            log.info('inserted {0} keys'.format(inserted_keys_cnt))
Пример #15
0
 def setUp(self):
     super(SGConfigTests, self).setUp()
     for server in self.servers:
         if self.case_number == 1:
             with open('pytests/sg/resources/gateway_config_walrus_template.json', 'r') as file:
                 filedata = file.read()
                 filedata = filedata.replace('LOCAL_IP', server.ip)
             with open('pytests/sg/resources/gateway_config_walrus.json', 'w') as file:
                 file.write(filedata)
             shell = RemoteMachineShellConnection(server)
             shell.execute_command("rm -rf {0}/tmp/*".format(self.folder_prefix))
             shell.copy_files_local_to_remote('pytests/sg/resources', '{0}/tmp'.format(self.folder_prefix))
             # will install sg only the first time
             self.install(shell)
             pid = self.is_sync_gateway_process_running(shell)
             self.assertNotEqual(pid, 0)
             exist = shell.file_exists('{0}/tmp/'.format(self.folder_prefix), 'gateway.log')
             self.assertTrue(exist)
             shell.disconnect()
     if self.case_number == 1:
         shutil.copy2('pytests/sg/resources/gateway_config_backup.json', 'pytests/sg/resources/gateway_config.json')
         BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self)
         self.cluster = Cluster()
         self.cluster.create_default_bucket(self.master, 150)
         task = self.cluster.async_create_sasl_bucket(self.master, 'test_%E-.5', 'password', 150, 1)
         task.result()
         task = self.cluster.async_create_standard_bucket(self.master, 'db', 11219, 150, 1)
         task.result()
Пример #16
0
 def setUp(self):
     super(XDCRTests, self).setUp()
     self.bucket = Bucket()
     self._initialize_nodes()
     self.master = self.servers[0]
     for server in self.servers:
         rest=RestConnection(server)
         cluster_status = rest.cluster_status()
         self.log.info("Initial status of {0} cluster is {1}".format(server.ip,
                                                                     cluster_status['nodes'][0]['status']))
         while cluster_status['nodes'][0]['status'] == 'warmup':
             self.log.info("Waiting for cluster to become healthy")
             self.sleep(5)
             cluster_status = rest.cluster_status()
         self.log.info("current status of {0}  is {1}".format(server.ip,
                                                              cluster_status['nodes'][0]['status']))
     # Delete all buckets before creating new buckets
     self.log.info("Deleting all existing buckets")
     BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self)
     self.log.info("Creating new buckets")
     src_bucket = self.input.param('src_bucket', self.bucket)
     dest_bucket = self.input.param('dest_bucket', self.bucket)
     if src_bucket:
         RestConnection(self.servers[0]).create_bucket(bucket='default', ramQuotaMB=500)
     if dest_bucket:
         RestConnection(self.servers[1]).create_bucket(bucket='default', ramQuotaMB=500)
     helper = BaseHelper(self)
     helper.login()
Пример #17
0
    def setUp(self):
        self.log = logger.Logger.get_logger()
        self.master = TestInputSingleton.input.servers[0]
        ClusterOperationHelper.cleanup_cluster([self.master])
        BucketOperationHelper.delete_all_buckets_or_assert([self.master], self)

        self._bucket_name = 'default'

        serverInfo = self.master
        rest = RestConnection(serverInfo)
        info = rest.get_nodes_self()
        self._bucket_port = info.moxi
        rest.init_cluster(username=serverInfo.rest_username,
                          password=serverInfo.rest_password)
        rest.init_cluster_memoryQuota(memoryQuota=info.mcdMemoryReserved)
        bucket_ram = info.memoryQuota * 2 / 3

        # Add built-in user
        testuser = [{'id': 'cbadminbucket', 'name': 'cbadminbucket', 'password': '******'}]
        RbacBase().create_user_source(testuser, 'builtin', self.master)
        time.sleep(10)

        # Assign user to role
        role_list = [{'id': 'cbadminbucket', 'name': 'cbadminbucket', 'roles': 'admin'}]
        RbacBase().add_user_role(role_list, RestConnection(self.master), 'builtin')
        time.sleep(10)

        rest.create_bucket(bucket=self._bucket_name,
                           ramQuotaMB=bucket_ram,
                           proxyPort=info.memcached)
        msg = 'create_bucket succeeded but bucket "default" does not exist'
        self.assertTrue(BucketOperationHelper.wait_for_bucket_creation(self._bucket_name, rest), msg=msg)
        ready = BucketOperationHelper.wait_for_memcached(serverInfo, self._bucket_name)
        self.assertTrue(ready, "wait_for_memcached failed")
        self._log_start()
Пример #18
0
    def setUp(self):
        self.log = logger.Logger.get_logger()
        self.input = TestInputSingleton.input
        self.cluster = Cluster()
        self.servers = self.input.servers
        self.buckets = {}

        self.default_bucket = self.input.param("default_bucket", True)
        self.standard_buckets = self.input.param("standard_buckets", 0)
        self.sasl_buckets = self.input.param("sasl_buckets", 0)
        self.total_buckets = self.sasl_buckets + self.default_bucket + self.standard_buckets
        self.num_servers = self.input.param("servers", len(self.servers))
        self.num_replicas = self.input.param("replicas", 1)
        self.num_items = self.input.param("items", 1000)
        self.dgm_run = self.input.param("dgm_run", False)

        if not self.input.param("skip_cleanup", False):
            BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self)
            for server in self.servers:
                ClusterOperationHelper.cleanup_cluster([server])
            ClusterOperationHelper.wait_for_ns_servers_or_assert([self.servers[0]], self)

        self.quota = self._initialize_nodes(self.cluster, self.servers)
        if self.dgm_run:
            self.quota = 256
        self.bucket_size = self._get_bucket_size(self.quota, self.total_buckets)
        if self.default_bucket:
            self.cluster.create_default_bucket(self.servers[0], self.bucket_size, self.num_replicas)
            self.buckets['default'] = {1 : KVStore()}
        self._create_sasl_buckets(self.servers[0], self.sasl_buckets)
Пример #19
0
 def reset(self):
     self.log.info(
         "==============  SwapRebalanceBase cleanup was started for test #{0} {1} ==============".format(
             self.case_number, self._testMethodName
         )
     )
     self.log.info("Stopping load in Teardown")
     SwapRebalanceBase.stop_load(self.loaders)
     for server in self.servers:
         rest = RestConnection(server)
         if rest._rebalance_progress_status() == "running":
             self.log.warning("rebalancing is still running, test should be verified")
             stopped = rest.stop_rebalance()
             self.assertTrue(stopped, msg="unable to stop rebalance")
     BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self)
     for server in self.servers:
         ClusterOperationHelper.cleanup_cluster([server])
         if server.data_path:
             rest = RestConnection(server)
             rest.set_data_path(data_path=server.data_path)
     ClusterOperationHelper.wait_for_ns_servers_or_assert(self.servers, self)
     self.log.info(
         "==============  SwapRebalanceBase cleanup was finished for test #{0} {1} ==============".format(
             self.case_number, self._testMethodName
         )
     )
 def tearDown(self):
     if hasattr(self, '_resultForDoCleanups') and len(self._resultForDoCleanups.failures) > 0 \
                 and 'stop-on-failure' in TestInputSingleton.input.test_params and \
                 str(TestInputSingleton.input.test_params['stop-on-failure']).lower() == 'true':
                 # supported starting with python2.7
                 log.warn("CLEANUP WAS SKIPPED")
                 self.cluster.shutdown(force=True)
                 self._log_finish(self)
     else:
         try:
             self.log.info("==============  tearDown was started for test #{0} {1} =============="\
                           .format(self.case_number, self._testMethodName))
             RemoteUtilHelper.common_basic_setup(self.servers)
             BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self)
             for node in self.servers:
                 master = node
                 try:
                     ClusterOperationHelper.cleanup_cluster(self.servers,
                                                            master=master)
                 except:
                     continue
             self.log.info("==============  tearDown was finished for test #{0} {1} =============="\
                           .format(self.case_number, self._testMethodName))
         finally:
             super(FailoverBaseTest, self).tearDown()
Пример #21
0
 def tearDown(self):
     try:
         if self.driver:
             path_screen = self.input.ui_conf['screenshots'] or 'logs/screens'
             full_path = '{1}/screen_{0}.png'.format(time.time(), path_screen)
             self.log.info('screenshot is available: %s' % full_path)
             if not os.path.exists(path_screen):
                 os.mkdir(path_screen)
             self.driver.get_screenshot_as_file(os.path.abspath(full_path))
         rest = RestConnection(self.servers[0])
         if rest._rebalance_progress_status() == 'running':
             stopped = rest.stop_rebalance()
             self.assertTrue(stopped, msg="unable to stop rebalance")
         BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self)
         for server in self.servers:
             ClusterOperationHelper.cleanup_cluster([server])
         ClusterOperationHelper.wait_for_ns_servers_or_assert(self.servers, self)
         if self.driver:
             self.driver.close()
     except Exception as e:
         raise e
     finally:
         if self.driver:
             self.shell.disconnect()
         self.cluster.shutdown()
Пример #22
0
 def test_valid_bucket_name(self, password='******'):
         tasks = []
         if self.bucket_type == 'sasl':
             self.cluster.create_sasl_bucket(self.server, self.bucket_name, password, self.num_replicas, self.bucket_size)
             self.buckets.append(Bucket(name=self.bucket_name, authType="sasl", saslPassword=password, num_replicas=self.num_replicas,
                                        bucket_size=self.bucket_size, master_id=self.server))
         elif self.bucket_type == 'standard':
             self.cluster.create_standard_bucket(self.server, self.bucket_name, STANDARD_BUCKET_PORT + 1, self.bucket_size, self.num_replicas)
             self.buckets.append(Bucket(name=self.bucket_name, authType=None, saslPassword=None, num_replicas=self.num_replicas,
                                        bucket_size=self.bucket_size, port=STANDARD_BUCKET_PORT + 1, master_id=self.server))
         elif self.bucket_type == "memcached":
             tasks.append(self.cluster.async_create_memcached_bucket(self.server, self.bucket_name, STANDARD_BUCKET_PORT + 1,
                                                                     self.bucket_size, self.num_replicas))
             self.buckets.append(Bucket(name=self.bucket_name, authType=None, saslPassword=None, num_replicas=self.num_replicas,
                                        bucket_size=self.bucket_size, port=STANDARD_BUCKET_PORT + 1 , master_id=self.server, type='memcached'))
             for task in tasks:
                 task.result()
         else:
             self.log.error('Bucket type not specified')
             return
         self.assertTrue(BucketOperationHelper.wait_for_bucket_creation(self.bucket_name, self.rest),
                         msg='failed to start up bucket with name "{0}'.format(self.bucket_name))
         gen_load = BlobGenerator('buckettest', 'buckettest-', self.value_size, start=0, end=self.num_items)
         self._load_all_buckets(self.server, gen_load, "create", 0)
         self.cluster.bucket_delete(self.server, self.bucket_name)
         self.assertTrue(BucketOperationHelper.wait_for_bucket_deletion(self.bucket_name, self.rest, timeout_in_seconds=60),
                         msg='bucket "{0}" was not deleted even after waiting for 30 seconds'.format(self.bucket_name))
Пример #23
0
    def tearDown(self):
#        super(Rebalance, self).tearDown()
        try:
            self.log.info("==============  XDCRbasetests stats for test #{0} {1} =============="\
                        .format(self.case_number, self._testMethodName))
            self._end_replication_flag = 1
            if hasattr(self, '_stats_thread1'): self._stats_thread1.join()
            if hasattr(self, '_stats_thread2'): self._stats_thread2.join()
            if hasattr(self, '_stats_thread3'): self._stats_thread3.join()
            if self._replication_direction_str in "bidirection":
                if hasattr(self, '_stats_thread4'): self._stats_thread4.join()
                if hasattr(self, '_stats_thread5'): self._stats_thread5.join()
                if hasattr(self, '_stats_thread6'): self._stats_thread6.join()
            if self._replication_direction_str in "bidirection":
                self.log.info("Type of run: BIDIRECTIONAL XDCR")
            else:
                self.log.info("Type of run: UNIDIRECTIONAL XDCR")
            self._print_stats(self.src_master)
            if self._replication_direction_str in "bidirection":
                self._print_stats(self.dest_master)
            self.log.info("============== = = = = = = = = END = = = = = = = = = = ==============")
            self.log.info("==============  rebalanceXDCR cleanup was started for test #{0} {1} =============="\
                    .format(self.case_number, self._testMethodName))
            for nodes in [self.src_nodes, self.dest_nodes]:
                for node in nodes:
                    BucketOperationHelper.delete_all_buckets_or_assert([node], self)
                    ClusterOperationHelper.cleanup_cluster([node], self)
                    ClusterOperationHelper.wait_for_ns_servers_or_assert([node], self)
            self.log.info("==============  rebalanceXDCR cleanup was finished for test #{0} {1} =============="\
                    .format(self.case_number, self._testMethodName))
        finally:
            self.cluster.shutdown(force=True)
            self._log_finish(self)
Пример #24
0
 def test_backup_restore(self):
     self._load_all_buckets()
     self.shell.execute_command("rm -rf /tmp/backups")
     output, error = self.shell.execute_command("/opt/couchbase/bin/cbbackupmgr config "
                                                "--archive /tmp/backups --repo example")
     self.log.info(output)
     self.assertEquals('Backup repository `example` created successfully in archive `/tmp/backups`', output[0])
     output, error = self.shell.execute_command(
         "/opt/couchbase/bin/cbbackupmgr backup --archive /tmp/backups --repo example "
         "--cluster couchbase://127.0.0.1 --username Administrator --password password")
     self.log.info(output)
     self.assertEquals('Backup successfully completed', output[1])
     BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self)
     imp_rest = RestConnection(self.master)
     info = imp_rest.get_nodes_self()
     if info.memoryQuota and int(info.memoryQuota) > 0:
         self.quota = info.memoryQuota
     bucket_params = self._create_bucket_params(server=self.master, size=250, bucket_type='ephemeral',
                                                replicas=self.num_replicas,
                                                enable_replica_index=self.enable_replica_index,
                                                eviction_policy=self.eviction_policy)
     self.cluster.create_default_bucket(bucket_params)
     output, error = self.shell.execute_command('ls /tmp/backups/example')
     output, error = self.shell.execute_command("/opt/couchbase/bin/cbbackupmgr restore --archive /tmp/backups"
                                                " --repo example --cluster couchbase://127.0.0.1 "
                                                "--username Administrator --password password --start %s" % output[
                                                    0])
     self.log.info(output)
     self.assertEquals('Restore completed successfully', output[1])
     self._verify_all_buckets(self.master)
Пример #25
0
    def common_setup(self, replica):
        self._input = TestInputSingleton.input
        self._servers = self._input.servers
        first = self._servers[0]
        self.log = logger.Logger().get_logger()
        self.log.info(self._input)
        rest = RestConnection(first)
        for server in self._servers:
            RestHelper(RestConnection(server)).is_ns_server_running()

        ClusterOperationHelper.cleanup_cluster(self._servers)
        BucketOperationHelper.delete_all_buckets_or_assert(self._servers, self)
        ClusterOperationHelper.add_all_nodes_or_assert(self._servers[0], self._servers, self._input.membase_settings, self)
        nodes = rest.node_statuses()
        otpNodeIds = []
        for node in nodes:
            otpNodeIds.append(node.id)
        info = rest.get_nodes_self()
        bucket_ram = info.mcdMemoryReserved * 3 / 4
        rest.create_bucket(bucket="default",
                           ramQuotaMB=int(bucket_ram),
                           replicaNumber=replica,
                           proxyPort=rest.get_nodes_self().moxi)
        msg = "wait_for_memcached fails"
        ready = BucketOperationHelper.wait_for_memcached(first, "default"),
        self.assertTrue(ready, msg)
        rebalanceStarted = rest.rebalance(otpNodeIds, [])
        self.assertTrue(rebalanceStarted,
                        "unable to start rebalance on master node {0}".format(first.ip))
        self.log.info('started rebalance operation on master node {0}'.format(first.ip))
        rebalanceSucceeded = rest.monitorRebalance()
        # without a bucket this seems to fail
        self.assertTrue(rebalanceSucceeded,
                        "rebalance operation for nodes: {0} was not successful".format(otpNodeIds))
        self.awareness = VBucketAwareMemcached(rest, "default")
Пример #26
0
 def tearDown(self):
     BucketOperationHelper.delete_all_buckets_or_assert([self.master], self)
     rest = RestConnection(self.master)
     # Remove rbac user in teardown
     role_del = ['cbadminbucket']
     temp = RbacBase().remove_user_role(role_del, rest)
     self._log_finish()
Пример #27
0
 def setUp(self):
     self.log = logger.Logger.get_logger()
     self.input = TestInputSingleton.input
     self.assertTrue(self.input, msg="input parameters missing...")
     self.servers = self.input.servers
     self.master = self.servers[0]
     rest = RestConnection(self.master)
     rest.init_cluster(username=self.master.rest_username,
                       password=self.master.rest_password)
     info = rest.get_nodes_self()
     node_ram_ratio = BucketOperationHelper.base_bucket_ratio(self.servers)
     rest.init_cluster_memoryQuota(memoryQuota=int(info.mcdMemoryReserved * node_ram_ratio))
     BucketOperationHelper.delete_all_buckets_or_assert(servers=self.servers, test_case=self)
     ClusterOperationHelper.cleanup_cluster(servers=self.servers)
     credentials = self.input.membase_settings
     ClusterOperationHelper.add_all_nodes_or_assert(master=self.master, all_servers=self.servers, rest_settings=credentials, test_case=self)
     rest = RestConnection(self.master)
     nodes = rest.node_statuses()
     otpNodeIds = []
     for node in nodes:
         otpNodeIds.append(node.id)
     rebalanceStarted = rest.rebalance(otpNodeIds, [])
     self.assertTrue(rebalanceStarted,
                     "unable to start rebalance on master node {0}".format(self.master.ip))
     self.log.info('started rebalance operation on master node {0}'.format(self.master.ip))
     rebalanceSucceeded = rest.monitorRebalance()
Пример #28
0
 def setUp(self):
     self.log = logger.Logger.get_logger()
     self.input = TestInput.TestInputSingleton.input
     self.assertTrue(self.input, msg="input parameters missing...")
     self.servers = self.input.servers
     BucketOperationHelper.delete_all_buckets_or_assert(self.servers, test_case=self)
     self._log_start()
Пример #29
0
 def tearDown(self):
         try:
             if (hasattr(self, '_resultForDoCleanups') and len(self._resultForDoCleanups.failures) > 0 \
                 and TestInputSingleton.input.param("stop-on-failure", False))\
                     or self.input.param("skip_cleanup", False):
                 self.log.warn("CLEANUP WAS SKIPPED")
             else:
                 self.log.info("==============  basetestcase cleanup was started for test #{0} {1} =============="\
                       .format(self.case_number, self._testMethodName))
                 rest = RestConnection(self.master)
                 alerts = rest.get_alerts()
                 if alerts is not None and len(alerts) != 0:
                     self.log.warn("Alerts were found: {0}".format(alerts))
                 if rest._rebalance_progress_status() == 'running':
                     self.log.warning("rebalancing is still running, test should be verified")
                     stopped = rest.stop_rebalance()
                     self.assertTrue(stopped, msg="unable to stop rebalance")
                 BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self)
                 ClusterOperationHelper.cleanup_cluster(self.servers)
                 self.sleep(10)
                 ClusterOperationHelper.wait_for_ns_servers_or_assert(self.servers, self)
                 self.log.info("==============  basetestcase cleanup was finished for test #{0} {1} =============="\
                       .format(self.case_number, self._testMethodName))
         except BaseException:
             # increase case_number to retry tearDown in setup for the next test
             self.case_number += 1000
         finally:
             # stop all existing task manager threads
             self.cluster.shutdown()
             self._log_finish(self)
Пример #30
0
 def common_setUp(self, with_buckets):
     ClusterOperationHelper.cleanup_cluster(self.servers)
     server = self.servers[0]
     if with_buckets:
         BucketOperationHelper.delete_all_buckets_or_assert(self.servers, test_case=self)
         ok = BucketOperationHelper.create_multiple_buckets(server, 1)
         if not ok:
             self.fail("unable to create multiple buckets on this node : {0}".format(server))
Пример #31
0
    def common_setup(input, testcase, bucket_ram_ratio=(2.8 / 3.0), replica=0):
        log = logger.Logger.get_logger()
        servers = input.servers
        BucketOperationHelper.delete_all_buckets_or_assert(servers, testcase)
        ClusterOperationHelper.cleanup_cluster(servers)
        ClusterHelper.wait_for_ns_servers_or_assert(servers, testcase)
        serverInfo = servers[0]

        log.info('picking server : {0} as the master'.format(serverInfo))
        #if all nodes are on the same machine let's have the bucket_ram_ratio as bucket_ram_ratio * 1/len(servers)
        node_ram_ratio = BucketOperationHelper.base_bucket_ratio(servers)
        rest = RestConnection(serverInfo)
        info = rest.get_nodes_self()
        rest.init_cluster(username=serverInfo.rest_username,
                          password=serverInfo.rest_password)
        rest.init_cluster_memoryQuota(memoryQuota=int(info.mcdMemoryReserved *
                                                      node_ram_ratio))
        if "ascii" in TestInputSingleton.input.test_params\
        and TestInputSingleton.input.test_params["ascii"].lower() == "true":
            BucketOperationHelper.create_multiple_buckets(serverInfo,
                                                          replica,
                                                          node_ram_ratio *
                                                          bucket_ram_ratio,
                                                          howmany=1,
                                                          sasl=False)
        else:
            BucketOperationHelper.create_multiple_buckets(serverInfo,
                                                          replica,
                                                          node_ram_ratio *
                                                          bucket_ram_ratio,
                                                          howmany=1,
                                                          sasl=True)
        buckets = rest.get_buckets()
        for bucket in buckets:
            ready = BucketOperationHelper.wait_for_memcached(
                serverInfo, bucket.name)
            testcase.assertTrue(ready, "wait_for_memcached failed")
Пример #32
0
    def _test_backup_add_restore_bucket_with_expiration_key(self, replica):
        bucket = "default"
        rest = RestConnection(self.master)
        info = rest.get_nodes_self()
        size = int(info.memoryQuota * 2.0 / 3.0)
        rest.create_bucket(bucket, ramQuotaMB=size, proxyPort=info.moxi, replicaNumber=replica)
        BucketOperationHelper.wait_for_memcached(self.master, bucket)
        client = MemcachedClientHelper.direct_client(self.master, bucket)
        expiry = 60
        test_uuid = uuid.uuid4()
        keys = ["key_%s_%d" % (test_uuid, i) for i in range(5000)]
        self.log.info("pushing keys with expiry set to {0}".format(expiry))
        for key in keys:
            try:
                client.set(key, expiry, 0, key)
            except mc_bin_client.MemcachedError as error:
                msg = "unable to push key : {0} to bucket : {1} error : {2}"
                self.log.error(msg.format(key, client.vbucketId, error.status))
                self.fail(msg.format(key, client.vbucketId, error.status))
        client.close()
        self.log.info("inserted {0} keys with expiry set to {1}".format(len(keys), expiry))
        ready = RebalanceHelper.wait_for_persistence(self.master, bucket, bucket_type=self.bucket_type)
        self.assertTrue(ready, "not all items persisted. see logs")
        node = RestConnection(self.master).get_nodes_self()

        output, error = self.shell.execute_command(self.perm_command)
        self.shell.log_command_output(output, error)
        backupHelper = BackupHelper(self.master, self)
        backupHelper.backup(bucket, node, self.remote_tmp_folder)

        BucketOperationHelper.delete_bucket_or_assert(self.master, bucket, self)
        rest.create_bucket(bucket, ramQuotaMB=size, proxyPort=info.moxi)
        BucketOperationHelper.wait_for_memcached(self.master, bucket)
        backupHelper.restore(self.remote_tmp_folder)
        time.sleep(60)
        client = MemcachedClientHelper.direct_client(self.master, bucket)
        self.log.info('verifying that all those keys have expired...')
        for key in keys:
            try:
                client.get(key=key)
                msg = "expiry was set to {0} but key: {1} did not expire after waiting for {2}+ seconds"
                self.fail(msg.format(expiry, key, expiry))
            except mc_bin_client.MemcachedError as error:
                self.assertEqual(error.status, 1,
                                  msg="expected error code {0} but saw error code {1}".format(1, error.status))
        client.close()
        self.log.info("verified that those keys inserted with expiry set to {0} have expired".format(expiry))
Пример #33
0
    def test_non_default_case_sensitive_same_port(self):
        postfix = uuid.uuid4()
        name = 'uppercase_{0}'.format(postfix)
        for serverInfo in self.servers:
            rest = RestConnection(serverInfo)
            proxyPort = rest.get_nodes_self().moxi + 100
            rest.create_bucket(bucket=name,
                               ramQuotaMB=200,
                               proxyPort=proxyPort)
            msg = 'create_bucket succeeded but bucket {0} does not exist'.format(name)
            self.assertTrue(BucketOperationHelper.wait_for_bucket_creation(name, rest), msg=msg)

            self.log.info("user should not be able to create a new bucket on a an already used port")
            name = 'UPPERCASE{0}'.format(postfix)
            try:
                rest.create_bucket(bucket=name,
                                   ramQuotaMB=200,
                                   proxyPort=proxyPort)
                self.fail('create-bucket did not throw exception while creating a new bucket on an already used port')
            #make sure it raises bucketcreateexception
            except BucketCreationException as ex:
                self.log.error(ex)
Пример #34
0
    def test_max_buckets(self):
        log = logger.Logger.get_logger()
        serverInfo = self.servers[0]
        log.info('picking server : {0} as the master'.format(serverInfo))
        rest = RestConnection(serverInfo)
        proxyPort = rest.get_nodes_self().moxi
        info = rest.get_nodes_self()
        rest.init_cluster(username=serverInfo.rest_username,
                          password=serverInfo.rest_password)
        rest.init_cluster_memoryQuota(memoryQuota=info.mcdMemoryReserved)
        bucket_num = rest.get_internalSettings("maxBucketCount")
        bucket_ram = 100

        for i in range(bucket_num):
            bucket_name = 'max_buckets-{0}'.format(uuid.uuid4())
            rest.create_bucket(bucket=bucket_name,
                               ramQuotaMB=bucket_ram,
                               authType='sasl', proxyPort=proxyPort)
            ready = BucketOperationHelper.wait_for_memcached(serverInfo, bucket_name)
            self.assertTrue(ready, "wait_for_memcached failed")

        buckets = rest.get_buckets()
        if len(buckets) != bucket_num:
            msg = 'tried to create {0} buckets, only created {1}'.format(bucket_count, len(buckets))
            self.fail(msg)
        try:
            rest.create_bucket(bucket=bucket_name,
                               ramQuotaMB=bucket_ram,
                               authType='sasl', proxyPort=proxyPort)
            msg = 'bucket creation did not fail even though system was overcommited'
            self.fail(msg)
        except BucketCreationException as ex:
            self.log.info('BucketCreationException was thrown as expected when we try to create {0} buckets'.
                          format(bucket_num + 1))
        buckets = rest.get_buckets()
        if len(buckets) != bucket_num:
            msg = 'tried to create {0} buckets, only created {1}'.format(bucket_num + 1, len(buckets))
            self.fail(msg)
Пример #35
0
    def setUp(self):
        super(BucketConfig, self).setUp()
        self.testcase = '2'
        self.log = logger.Logger.get_logger()
        self.input = TestInputSingleton.input
        self.servers = self.input.servers
        #self.time_synchronization = self.input.param("time_sync", "enabledWithoutDrift")
        self.lww = self.input.param("lww", True)
        self.drift = self.input.param("drift", False)
        self.bucket='bucket-1'
        self.master = self.servers[0]
        self.rest = RestConnection(self.master)
        self.cluster = Cluster()
        self.skip_rebalance = self.input.param("skip_rebalance", False)

        node_ram_ratio = BucketOperationHelper.base_bucket_ratio(self.servers)
        mem_quota = int(self.rest.get_nodes_self().mcdMemoryReserved *
                        node_ram_ratio)

        if not self.skip_rebalance:
            self.rest.init_cluster(self.master.rest_username,
                self.master.rest_password)
            self.rest.init_cluster_memoryQuota(self.master.rest_username,
                self.master.rest_password,
                memoryQuota=mem_quota)
            for server in self.servers:
                ClusterOperationHelper.cleanup_cluster([server])
                ClusterOperationHelper.wait_for_ns_servers_or_assert(
                    [self.master], self.testcase)
            try:
                rebalanced = ClusterOperationHelper.add_and_rebalance(
                    self.servers)

            except Exception as e:
                self.fail(e, 'cluster is not rebalanced')

        self._create_bucket(self.lww, self.drift)
Пример #36
0
    def test_non_default_case_sensitive_different_port(self):
        postfix = uuid.uuid4()
        lowercase_name = 'uppercase_{0}'.format(postfix)
        for serverInfo in self.servers:
            rest = RestConnection(serverInfo)
            proxyPort = rest.get_nodes_self().moxi + 500
            rest.create_bucket(bucket=lowercase_name,
                               ramQuotaMB=200,
                               proxyPort=proxyPort)
            msg = 'create_bucket succeeded but bucket {0} does not exist'.format(lowercase_name)
            self.assertTrue(BucketOperationHelper.wait_for_bucket_creation(lowercase_name, rest), msg=msg)

            uppercase_name = 'UPPERCASE_{0}'.format(postfix)
            try:
                rest.create_bucket(bucket=uppercase_name,
                                   ramQuotaMB=200,
                                   proxyPort=proxyPort + 1000)
                msg = "create_bucket created two buckets in different case : {0},{1}".format(lowercase_name,
                                                                                             uppercase_name)
                self.fail(msg)
            except BucketCreationException as ex:
                #check if 'default' and 'Default' buckets exist
                self.log.info('BucketCreationException was thrown as expected')
                self.log.info(ex._message)
Пример #37
0
 def test_bucket_edit_password(self,
                               bucket_name='secretsbucket',
                               num_replicas=1,
                               bucket_size=100):
     updated_pass = "******"
     rest = RestConnection(self.master)
     for servers in self.servers:
         self.secretmgmt_base_obj.setup_pass_node(servers, self.password)
     bucket_type = self.input.param("bucket_type", 'standard')
     tasks = []
     if bucket_type == 'sasl':
         self.cluster.create_sasl_bucket(self.master, bucket_name,
                                         self.password, num_replicas,
                                         bucket_size)
         self.sleep(10)
         rest.change_bucket_props(bucket_name, saslPassword=updated_pass)
     else:
         self.log.error('Bucket type not specified')
         return
     self.assertTrue(
         BucketOperationHelper.wait_for_bucket_creation(
             bucket_name, RestConnection(self.master)),
         msg='failed to start up bucket with name "{0}'.format(bucket_name))
     gen_load = BlobGenerator('buckettest',
                              'buckettest-',
                              self.value_size,
                              start=0,
                              end=self.num_items)
     self._load_all_buckets(self.master, gen_load, "create", 0)
     install_path = self.secretmgmt_base_obj._get_install_path(self.master)
     temp_result = self.secretmgmt_base_obj.check_config_files(
         self.master, install_path, '/config/config.dat', updated_pass)
     self.assertTrue(temp_result, "Password found in config.dat")
     temp_result = self.secretmgmt_base_obj.check_config_files(
         self.master, install_path, 'isasl.pw', updated_pass)
     self.assertTrue(temp_result, "Password found in isasl.pw")
Пример #38
0
 def test_valid_length(self):
     max_len = 100
     name_len = self.input.param('name_length', 100)
     name = 'a' * name_len
     master = self.servers[0]
     rest = RestConnection(master)
     try:
         rest.create_bucket(bucket=name,
                            ramQuotaMB=256,
                            storageBackend=self.bucket_storage)
         if name_len <= max_len:
             msg = 'failed to start up bucket with valid length'
             self.assertTrue(BucketOperationHelper.wait_for_bucket_creation(
                 name, rest),
                             msg=msg)
         else:
             self.fail('Bucket with invalid length created')
     except BucketCreationException as ex:
         self.log.error(ex)
         if name_len <= max_len:
             self.fail('could not create bucket with valid length')
         else:
             self.log.info(
                 'bucket with invalid length not created as expected')
Пример #39
0
    def _test_backup_and_restore_bucket_overwriting_body(
            self, overwrite_flag=True):
        bucket = "default"
        BucketOperationHelper.create_bucket(serverInfo=self.master,
                                            test_case=self)
        BucketOperationHelper.wait_for_memcached(self.master, bucket)
        self.add_nodes_and_rebalance()

        client = MemcachedClientHelper.direct_client(self.master, "default")
        expiry = 2400
        test_uuid = uuid.uuid4()
        keys = ["key_%s_%d" % (test_uuid, i) for i in range(500)]
        self.log.info("pushing keys with expiry set to {0}".format(expiry))
        for key in keys:
            try:
                client.set(key, expiry, 0, "1")
            except mc_bin_client.MemcachedError as error:
                msg = "unable to push key : {0} to bucket : {1} error : {2}"
                self.log.error(msg.format(key, client.vbucketId, error.status))
                self.fail(msg.format(key, client.vbucketId, error.status))
        self.log.info("inserted {0} keys with expiry set to {1}".format(
            len(keys), expiry))

        ready = RebalanceHelper.wait_for_stats_on_all(self.master, bucket,
                                                      'ep_queue_size', 0)
        self.assertTrue(ready, "wait_for ep_queue_size == 0 failed")
        ready = RebalanceHelper.wait_for_stats_on_all(self.master, bucket,
                                                      'ep_flusher_todo', 0)
        self.assertTrue(ready, "wait_for ep_queue_size == 0 failed")

        for server in self.servers:
            shell = RemoteMachineShellConnection(server)

            output, error = shell.execute_command(self.perm_command)
            shell.log_command_output(output, error)
            node = RestConnection(server).get_nodes_self()
            BackupHelper(server, self).backup(bucket, node,
                                              self.remote_tmp_folder)
            shell.disconnect()

        for key in keys:
            try:
                client.replace(key, expiry, 0, "2")
            except mc_bin_client.MemcachedError as error:
                msg = "unable to replace key : {0} in bucket : {1} error : {2}"
                self.log.error(msg.format(key, client.vbucketId, error.status))
                self.fail(msg.format(key, client.vbucketId, error.status))
        self.log.info("replaced {0} keys with expiry set to {1}".format(
            len(keys), expiry))

        for server in self.servers:
            BackupHelper(server, self).restore(self.remote_tmp_folder,
                                               overwrite_flag)
            time.sleep(10)

        self.log.info('verifying that all those keys...')
        for key in keys:
            if overwrite_flag:
                self.assertEqual("2", client.get(key=key),
                                 key + " should has value = 2")
            else:
                self.assertNotEqual("2", client.get(key=key),
                                    key + " should not has value = 2")
        self.log.info(
            "verified that those keys inserted with expiry set to {0} have expired"
            .format(expiry))
Пример #40
0
 def _create_default_bucket(self, master):
     BucketOperationHelper.create_default_buckets(servers=[master],
                                                  assert_on_test=self)
     ready = BucketOperationHelper.wait_for_memcached(master, "default")
     self.assertTrue(ready, "wait_for_memcached failed")
Пример #41
0
    def _test_backup_add_restore_bucket_body(self, bucket,
                                             delay_after_data_load,
                                             startup_flag, single_node):
        server = self.master
        rest = RestConnection(server)
        info = rest.get_nodes_self()
        size = int(info.memoryQuota * 2.0 / 3.0)
        if bucket == "default":
            rest.create_bucket(bucket, ramQuotaMB=size, proxyPort=info.moxi)
        else:
            proxyPort = info.moxi + 500
            rest.create_bucket(bucket,
                               ramQuotaMB=size,
                               proxyPort=proxyPort,
                               authType="sasl",
                               saslPassword="******")

        ready = BucketOperationHelper.wait_for_memcached(server, bucket)
        self.assertTrue(ready, "wait_for_memcached failed")
        if not single_node:
            self.add_nodes_and_rebalance()
        distribution = {10: 0.2, 20: 0.5, 30: 0.25, 40: 0.05}
        inserted_keys, rejected_keys = MemcachedClientHelper.load_bucket_and_return_the_keys(
            servers=[self.master],
            name=bucket,
            ram_load_ratio=1,
            value_size_distribution=distribution,
            moxi=True,
            write_only=True,
            number_of_threads=2)

        if not single_node:
            rest = RestConnection(self.master)
            self.assertTrue(RebalanceHelper.wait_for_replication(
                rest.get_nodes(), timeout=180),
                            msg="replication did not complete")

        self.log.info(
            "Sleep {0} seconds after data load".format(delay_after_data_load))
        ready = RebalanceHelper.wait_for_stats_on_all(self.master, bucket,
                                                      'ep_queue_size', 0)
        self.assertTrue(ready, "wait_for ep_queue_size == 0 failed")
        ready = RebalanceHelper.wait_for_stats_on_all(self.master, bucket,
                                                      'ep_flusher_todo', 0)
        self.assertTrue(ready, "wait_for ep_queue_size == 0 failed")
        node = RestConnection(self.master).get_nodes_self()
        if not startup_flag:
            for server in self.servers:
                shell = RemoteMachineShellConnection(server)
                shell.stop_membase()
                shell.stop_couchbase()
                shell.disconnect()

        output, error = self.shell.execute_command(self.perm_command)
        self.shell.log_command_output(output, error)

        #now let's back up
        BackupHelper(self.master, self).backup(bucket, node,
                                               self.remote_tmp_folder)

        if not startup_flag:
            for server in self.servers:
                shell = RemoteMachineShellConnection(server)
                shell.start_membase()
                shell.start_couchbase()
                RestHelper(RestConnection(server)).is_ns_server_running()
                shell.disconnect()

        BucketOperationHelper.delete_bucket_or_assert(self.master, bucket,
                                                      self)

        if bucket == "default":
            rest.create_bucket(bucket, ramQuotaMB=size, proxyPort=info.moxi)
        else:
            proxyPort = info.moxi + 500
            rest.create_bucket(bucket,
                               ramQuotaMB=size,
                               proxyPort=proxyPort,
                               authType="sasl",
                               saslPassword="******")
        BucketOperationHelper.wait_for_memcached(self.master, bucket)

        if bucket == "default":
            BackupHelper(self.master,
                         self).restore(backup_location=self.remote_tmp_folder,
                                       moxi_port=info.moxi)
        else:
            BackupHelper(self.master,
                         self).restore(backup_location=self.remote_tmp_folder,
                                       moxi_port=info.moxi,
                                       username=bucket,
                                       password='******')

        keys_exist = BucketOperationHelper.keys_exist_or_assert_in_parallel(
            inserted_keys, self.master, bucket, self, concurrency=4)
        self.assertTrue(keys_exist, msg="unable to verify keys after restore")
 def test_negative_auto_retry_of_failed_rebalance_where_rebalance_will_not_be_cancelled(
         self):
     during_rebalance_failure = self.input.param("during_rebalance_failure",
                                                 "stop_server")
     post_failure_operation = self.input.param("post_failure_operation",
                                               "create_delete_buckets")
     zone_name = "Group_{0}_{1}".format(random.randint(1, 1000000000),
                                        self._testMethodName)
     zone_name = zone_name[0:60]
     default_zone = "Group 1"
     moved_node = []
     moved_node.append(self.servers[1].ip)
     try:
         operation = self._rebalance_operation(self.rebalance_operation)
         self.sleep(self.sleep_time)
         # induce the failure during the rebalance
         self._induce_error(during_rebalance_failure)
         operation.result()
     except Exception as e:
         self.log.info("Rebalance failed with : {0}".format(str(e)))
         # Recover from the error
         self._recover_from_error(during_rebalance_failure)
         result = json.loads(self.rest.get_pending_rebalance_info())
         self.log.info(result)
         retry_rebalance = result["retry_rebalance"]
         if retry_rebalance != "pending":
             self.fail("Auto-retry of failed rebalance is not triggered")
         if post_failure_operation == "create_delete_buckets":
             # delete buckets and create new one
             BucketOperationHelper.delete_all_buckets_or_assert(
                 servers=self.servers, test_case=self)
             self.sleep(self.sleep_time)
             BucketOperationHelper.create_bucket(self.master,
                                                 test_case=self)
         elif post_failure_operation == "change_replica_count":
             # change replica count
             self.log.info("Changing replica count of buckets")
             for bucket in self.buckets:
                 self.rest.change_bucket_props(bucket, replicaNumber=2)
         elif post_failure_operation == "change_server_group":
             # change server group
             self.log.info("Creating new zone " + zone_name)
             self.rest.add_zone(zone_name)
             self.log.info("Moving {0} to new zone {1}".format(
                 moved_node, zone_name))
             status = self.rest.shuffle_nodes_in_zones(
                 moved_node, default_zone, zone_name)
         else:
             self.fail("Invalid post_failure_operation option")
         # In these failure scenarios while the retry is pending, then the retry will be attempted but fail
         try:
             self.check_retry_rebalance_succeeded()
         except Exception as e:
             self.log.info(e)
             if "Retrying of rebalance still did not help. All the retries exhausted" not in str(
                     e):
                 self.fail(
                     "Auto retry of failed rebalance succeeded when it was expected to fail"
                 )
     else:
         self.fail(
             "Rebalance did not fail as expected. Hence could not validate auto-retry feature.."
         )
     finally:
         if post_failure_operation == "change_server_group":
             status = self.rest.shuffle_nodes_in_zones(
                 moved_node, zone_name, default_zone)
             self.log.info(
                 "Shuffle the node back to default group . Status : {0}".
                 format(status))
             self.sleep(self.sleep_time)
             self.log.info("Deleting new zone " + zone_name)
             try:
                 self.rest.delete_zone(zone_name)
             except:
                 self.log.info("Errors in deleting zone")
         if self.disable_auto_failover:
             self.rest.update_autofailover_settings(True, 120)
         self.start_server(self.servers[1])
         self.stop_firewall_on_node(self.servers[1])
Пример #43
0
    def _install_and_upgrade(self,
                             initial_version='1.6.5.3',
                             initialize_cluster=False,
                             create_buckets=False,
                             insert_data=False):
        input = TestInputSingleton.input
        rest_settings = input.membase_settings
        servers = input.servers
        server = servers[0]
        save_upgrade_config = False
        if initial_version.startswith(
                "1.7") and input.test_params['version'].startswith("1.8"):
            save_upgrade_config = True
        is_amazon = False
        if input.test_params.get('amazon', False):
            is_amazon = True
        if initial_version.startswith("1.6") or initial_version.startswith(
                "1.7"):
            product = 'membase-server-enterprise'
        else:
            product = 'couchbase-server-enterprise'
        remote = RemoteMachineShellConnection(server)
        rest = RestConnection(server)
        info = remote.extract_remote_info()
        remote.membase_uninstall()
        remote.couchbase_uninstall()
        builds, changes = BuildQuery().get_all_builds()
        # check to see if we are installing from latestbuilds or releases
        # note: for newer releases (1.8.0) even release versions can have the
        #  form 1.8.0r-55
        if re.search('r', initial_version):
            builds, changes = BuildQuery().get_all_builds()
            older_build = BuildQuery().find_membase_build(
                builds,
                deliverable_type=info.deliverable_type,
                os_architecture=info.architecture_type,
                build_version=initial_version,
                product=product,
                is_amazon=is_amazon)
        else:
            older_build = BuildQuery().find_membase_release_build(
                deliverable_type=info.deliverable_type,
                os_architecture=info.architecture_type,
                build_version=initial_version,
                product=product,
                is_amazon=is_amazon)
        remote.stop_membase()
        remote.stop_couchbase()
        remote.download_build(older_build)
        #now let's install ?
        remote.membase_install(older_build)
        RestHelper(rest).is_ns_server_running(testconstants.NS_SERVER_TIMEOUT)
        rest.init_cluster_port(rest_settings.rest_username,
                               rest_settings.rest_password)
        bucket_data = {}
        if initialize_cluster:
            rest.init_cluster_memoryQuota(
                memoryQuota=rest.get_nodes_self().mcdMemoryReserved)
            if create_buckets:
                _create_load_multiple_bucket(self,
                                             server,
                                             bucket_data,
                                             howmany=2)
        version = input.test_params['version']

        appropriate_build = _get_build(servers[0],
                                       version,
                                       is_amazon=is_amazon)
        self.assertTrue(appropriate_build.url,
                        msg="unable to find build {0}".format(version))

        remote.download_build(appropriate_build)
        remote.membase_upgrade(appropriate_build,
                               save_upgrade_config=save_upgrade_config)
        remote.disconnect()
        RestHelper(rest).is_ns_server_running(testconstants.NS_SERVER_TIMEOUT)

        pools_info = rest.get_pools_info()

        rest.init_cluster_port(rest_settings.rest_username,
                               rest_settings.rest_password)
        time.sleep(TIMEOUT_SECS)
        #verify admin_creds still set

        self.assertTrue(pools_info['implementationVersion'],
                        appropriate_build.product_version)
        if initialize_cluster:
            #TODO: how can i verify that the cluster init config is preserved
            if create_buckets:
                self.assertTrue(BucketOperationHelper.wait_for_bucket_creation(
                    'bucket-0', rest),
                                msg="bucket 'default' does not exist..")
            if insert_data:
                buckets = rest.get_buckets()
                for bucket in buckets:
                    BucketOperationHelper.keys_exist_or_assert(
                        bucket_data[bucket.name]["inserted_keys"], server,
                        bucket.name, self)
Пример #44
0
    def _test_backup_and_restore_from_to_different_buckets(self):
        bucket_before_backup = "bucket_before_backup"
        bucket_after_backup = "bucket_after_backup"
        BucketOperationHelper.create_bucket(serverInfo=self.master,
                                            name=bucket_before_backup,
                                            port=11212,
                                            test_case=self)
        ready = BucketOperationHelper.wait_for_memcached(
            self.master, bucket_before_backup)
        self.assertTrue(ready, "wait_for_memcached failed")

        self.add_nodes_and_rebalance()

        distribution = {10: 0.2, 20: 0.5, 30: 0.25, 40: 0.05}
        inserted_keys, rejected_keys = MemcachedClientHelper.load_bucket_and_return_the_keys(
            servers=[self.master],
            name=bucket_before_backup,
            ram_load_ratio=20,
            value_size_distribution=distribution,
            write_only=True,
            moxi=True,
            number_of_threads=2)

        self.log.info("Sleep after data load")
        ready = RebalanceHelper.wait_for_stats_on_all(self.master,
                                                      bucket_before_backup,
                                                      'ep_queue_size', 0)
        self.assertTrue(ready, "wait_for ep_queue_size == 0 failed")
        ready = RebalanceHelper.wait_for_stats_on_all(self.master,
                                                      bucket_before_backup,
                                                      'ep_flusher_todo', 0)
        self.assertTrue(ready, "wait_for ep_queue_size == 0 failed")

        for server in self.servers:
            shell = RemoteMachineShellConnection(server)
            output, error = shell.execute_command(self.perm_command)
            shell.log_command_output(output, error)
            node = RestConnection(server).get_nodes_self()
            BackupHelper(server, self).backup(bucket_before_backup, node,
                                              self.remote_tmp_folder)
            shell.disconnect()

        BucketOperationHelper.delete_bucket_or_assert(self.master,
                                                      bucket_before_backup,
                                                      self)
        BucketOperationHelper.create_bucket(serverInfo=self.master,
                                            name=bucket_after_backup,
                                            port=11212,
                                            test_case=self)
        ready = BucketOperationHelper.wait_for_memcached(
            self.master, bucket_after_backup)
        self.assertTrue(ready, "wait_for_memcached failed")

        for server in self.servers:
            BackupHelper(server, self).restore(self.remote_tmp_folder,
                                               moxi_port=11212)
            time.sleep(10)

        ready = RebalanceHelper.wait_for_stats_on_all(self.master,
                                                      bucket_after_backup,
                                                      'ep_queue_size', 0)
        self.assertTrue(ready, "wait_for ep_queue_size == 0 failed")
        ready = RebalanceHelper.wait_for_stats_on_all(self.master,
                                                      bucket_after_backup,
                                                      'ep_flusher_todo', 0)
        self.assertTrue(ready, "wait_for ep_queue_size == 0 failed")
        self.assertTrue(
            BucketOperationHelper.verify_data(self.master,
                                              inserted_keys,
                                              False,
                                              False,
                                              11212,
                                              debug=False,
                                              bucket=bucket_after_backup),
            "Missing keys")
Пример #45
0
    def common_test_body(self, keys_count, replica, load_ratio,
                         failover_reason):
        log = logger.Logger.get_logger()
        log.info("keys_count : {0}".format(keys_count))
        log.info("replica : {0}".format(replica))
        log.info("load_ratio : {0}".format(load_ratio))
        log.info("failover_reason : {0}".format(failover_reason))
        master = self._servers[0]
        log.info('picking server : {0} as the master'.format(master))
        rest = RestConnection(master)
        info = rest.get_nodes_self()
        rest.init_cluster(username=master.rest_username,
                          password=master.rest_password)
        rest.init_cluster_memoryQuota(memoryQuota=info.mcdMemoryReserved)
        bucket_ram = info.memoryQuota * 2 / 3
        bucket = 'default'
        rest.create_bucket(bucket=bucket,
                           ramQuotaMB=bucket_ram,
                           replicaNumber=replica,
                           proxyPort=info.moxi)
        ready = BucketOperationHelper.wait_for_memcached(master, bucket)
        self.assertTrue(ready, "wait_for_memcached_failed")
        credentials = self._input.membase_settings

        ClusterOperationHelper.add_all_nodes_or_assert(master, self._servers,
                                                       credentials, self)
        nodes = rest.node_statuses()
        rest.rebalance(otpNodes=[node.id for node in nodes], ejectedNodes=[])
        msg = "rebalance failed after adding these nodes {0}".format(nodes)
        self.assertTrue(rest.monitorRebalance(), msg=msg)

        inserted_keys = FailoverBaseTest.load_data(master, bucket, keys_count,
                                                   load_ratio)
        inserted_count = len(inserted_keys)
        log.info('inserted {0} keys'.format(inserted_count))

        nodes = rest.node_statuses()
        while (len(nodes) - replica) > 1:
            final_replication_state = RestHelper(rest).wait_for_replication(
                900)
            msg = "replication state after waiting for up to 15 minutes : {0}"
            self.log.info(msg.format(final_replication_state))
            chosen = RebalanceHelper.pick_nodes(master, howmany=replica)
            for node in chosen:
                #let's do op
                if failover_reason == 'stop_server':
                    self.stop_server(node)
                    log.info(
                        "10 seconds delay to wait for membase-server to shutdown"
                    )
                    #wait for 5 minutes until node is down
                    self.assertTrue(
                        RestHelper(rest).wait_for_node_status(
                            node, "unhealthy", 300),
                        msg=
                        "node status is not unhealthy even after waiting for 5 minutes"
                    )
                elif failover_reason == "firewall":
                    RemoteUtilHelper.enable_firewall(
                        self._servers, node, bidirectional=self.bidirectional)
                    self.assertTrue(
                        RestHelper(rest).wait_for_node_status(
                            node, "unhealthy", 300),
                        msg=
                        "node status is not unhealthy even after waiting for 5 minutes"
                    )

                failed_over = rest.fail_over(node.id)
                if not failed_over:
                    self.log.info(
                        "unable to failover the node the first time. try again in  60 seconds.."
                    )
                    #try again in 60 seconds
                    time.sleep(75)
                    failed_over = rest.fail_over(node.id)
                self.assertTrue(
                    failed_over, "unable to failover node after {0}".format(
                        failover_reason))
                log.info("failed over node : {0}".format(node.id))
                self._failed_nodes.append(node.ip)

            log.info(
                "10 seconds sleep after failover before invoking rebalance...")
            time.sleep(10)
            rest.rebalance(otpNodes=[node.id for node in nodes],
                           ejectedNodes=[node.id for node in chosen])
            msg = "rebalance failed while removing failover nodes {0}".format(
                chosen)
            self.assertTrue(rest.monitorRebalance(stop_if_loop=True), msg=msg)
            FailoverBaseTest.replication_verification(master, bucket, replica,
                                                      inserted_count, self)

            nodes = rest.node_statuses()
        FailoverBaseTest.verify_data(master, inserted_keys, bucket, self)
Пример #46
0
    def common_setup(self):
        self.cluster_helper = Cluster()
        self.log = logger.Logger.get_logger()
        self.cluster_run = False
        self.input = TestInputSingleton.input
        self.servers = self.input.servers
        serverInfo = self.servers[0]
        rest = RestConnection(serverInfo)
        if len(set([server.ip for server in self.servers])) == 1:
            ip = rest.get_nodes_self().ip
            for server in self.servers:
                server.ip = ip
            self.cluster_run = True
        self.case_number = self.input.param("case_number", 0)
        self.replica = self.input.param("replica", 1)
        self.keys_count = self.input.param("keys-count", 1000)
        self.load_ratio = self.input.param("load-ratio", 1)
        self.ratio_expiry = self.input.param("ratio-expiry", 0.03)
        self.ratio_deletes = self.input.param("ratio-deletes", 0.13)
        self.num_buckets = self.input.param("num-buckets", 1)
        self.failover_factor = self.num_swap = self.input.param("num-swap", 1)
        self.num_initial_servers = self.input.param("num-initial-servers", 3)
        self.fail_orchestrator = self.swap_orchestrator = self.input.param(
            "swap-orchestrator", False)
        self.do_access = self.input.param("do-access", True)
        self.load_started = False
        self.loaders = []
        try:
            # Clear the state from Previous invalid run
            if rest._rebalance_progress_status() == 'running':
                self.log.warning(
                    "rebalancing is still running, previous test should be verified"
                )
                stopped = rest.stop_rebalance()
                self.assertTrue(stopped, msg="unable to stop rebalance")
            self.log.info("==============  SwapRebalanceBase setup was started for test #{0} {1}=============="\
                      .format(self.case_number, self._testMethodName))
            SwapRebalanceBase.reset(self)

            # Make sure the test is setup correctly
            min_servers = int(self.num_initial_servers) + int(self.num_swap)
            msg = "minimum {0} nodes required for running swap rebalance"
            self.assertTrue(len(self.servers) >= min_servers,
                            msg=msg.format(min_servers))

            self.log.info(
                'picking server : {0} as the master'.format(serverInfo))
            node_ram_ratio = BucketOperationHelper.base_bucket_ratio(
                self.servers)
            info = rest.get_nodes_self()
            rest.init_cluster(username=serverInfo.rest_username,
                              password=serverInfo.rest_password)
            rest.init_cluster_memoryQuota(
                memoryQuota=int(info.mcdMemoryReserved * node_ram_ratio))
            SwapRebalanceBase.enable_diag_eval_on_non_local_hosts(
                self, serverInfo)
            # Add built-in user
            testuser = [{
                'id': 'cbadminbucket',
                'name': 'cbadminbucket',
                'password': '******'
            }]
            RbacBase().create_user_source(testuser, 'builtin', self.servers[0])

            # Assign user to role
            role_list = [{
                'id': 'cbadminbucket',
                'name': 'cbadminbucket',
                'roles': 'admin'
            }]
            RbacBase().add_user_role(role_list,
                                     RestConnection(self.servers[0]),
                                     'builtin')

            if self.num_buckets > 10:
                BaseTestCase.change_max_buckets(self, self.num_buckets)
            self.log.info(
                "==============  SwapRebalanceBase setup was finished for test #{0} {1} =============="
                .format(self.case_number, self._testMethodName))
            SwapRebalanceBase._log_start(self)
        except Exception, e:
            self.cluster_helper.shutdown()
            self.fail(e)
Пример #47
0
 def setUp(self):
     self.log = logger.Logger.get_logger()
     self.input = TestInputSingleton.input
     self.servers = self.input.servers
     self.autocompaction_value = TestInputSingleton.input.param("autocompaction_value", 0)
     BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self)
Пример #48
0
 def _create_bucket(self, bucketname):
     self.rest.create_bucket(bucket=bucketname, ramQuotaMB=100, authType="sasl",
                             saslPassword="******")
     ready = BucketOperationHelper.wait_for_memcached(self.master, bucketname)
     self.assertTrue(ready, msg="wait_for_memcached failed")
Пример #49
0
        if "bucket_quota" in params:
            bucket_quota = params["bucket_quota"]
        if "moxi" in params:
            if params["moxi"].lower() == "true":
                moxi = True
        if "get" in params:
            if params["get"].lower() == "true":
                get = True
        if "prefix" in params:
            prefix = params["prefix"]
        if "delete" in params:
            if params["delete"].lower() == "true":
                delete = True

        if delete:
            BucketOperationHelper.delete_all_buckets_or_assert([server], None)

        create_buckets(server, count, prefix, bucket_quota)
        if run_load:
            rest = RestConnection(server)
            buckets = rest.get_buckets()
            threads = []
            for bucket in buckets:
                t = Thread(target=load_buckets,
                           args=(server, bucket.name, get, no_threads, moxi))
                t.start()
                threads.append(t)

            for t in threads:
                t.join()
    except Exception as ex:
 def backup_reset_clusters(self, servers):
     BucketOperationHelper.delete_all_buckets_or_assert(servers, self)
     ClusterOperationHelper.cleanup_cluster(servers, master=servers[0])
     ClusterOperationHelper.wait_for_ns_servers_or_assert(servers, self)
Пример #51
0
 def verify_loaded_data(self, master, bucket, inserted_keys):
     keys_exist = BucketOperationHelper.keys_exist_or_assert_in_parallel(
         inserted_keys, master, bucket, self, concurrency=4)
     self.assertTrue(keys_exist, msg="unable to verify keys after restore")
Пример #52
0
    def setUp(self):
        self.log = logger.Logger.get_logger()
        self.master = TestInputSingleton.input.servers[0]
        ClusterOperationHelper.cleanup_cluster([self.master])
        BucketOperationHelper.delete_all_buckets_or_assert([self.master], self)

        self._bucket_name = 'default'

        serverInfo = self.master

        rest = RestConnection(serverInfo)
        info = rest.get_nodes_self()
        self._bucket_port = info.moxi
        rest.init_cluster(username=serverInfo.rest_username,
                          password=serverInfo.rest_password)
        rest.init_cluster_memoryQuota(memoryQuota=info.mcdMemoryReserved)
        bucket_ram = info.memoryQuota * 2 // 3

        # Add built-in user
        testuser = [{
            'id': 'cbadminbucket',
            'name': 'cbadminbucket',
            'password': '******'
        }]
        RbacBase().create_user_source(testuser, 'builtin', self.master)

        # Assign user to role
        role_list = [{
            'id': 'cbadminbucket',
            'name': 'cbadminbucket',
            'roles': 'admin'
        }]
        RbacBase().add_user_role(role_list, RestConnection(self.master),
                                 'builtin')

        self.log.info("-->create_bucket: {},{},{}".format(
            self._bucket_name, bucket_ram, info.memcached))
        rest.create_bucket(bucket=self._bucket_name,
                           ramQuotaMB=bucket_ram,
                           proxyPort=info.memcached)

        msg = 'create_bucket succeeded but bucket "default" does not exist'

        if (testconstants.TESTRUNNER_CLIENT in list(os.environ.keys())
            ) and os.environ[
                testconstants.TESTRUNNER_CLIENT] == testconstants.PYTHON_SDK:
            self.client = SDKSmartClient(
                serverInfo,
                self._bucket_name,
                compression=TestInputSingleton.input.param(
                    "sdk_compression", True))
        else:
            self.client = MemcachedClientHelper.direct_client(
                serverInfo, self._bucket_name)

        self.assertTrue(BucketOperationHelper.wait_for_bucket_creation(
            self._bucket_name, rest),
                        msg=msg)
        ready = BucketOperationHelper.wait_for_memcached(
            serverInfo, self._bucket_name)
        self.assertTrue(ready, "wait_for_memcached failed")
        self._log_start()
Пример #53
0
    def _test_cluster_topology_change_body(self):
        bucket = "default"
        BucketOperationHelper.create_bucket(serverInfo=self.master,
                                            test_case=self)
        ready = BucketOperationHelper.wait_for_memcached(self.master, bucket)
        self.assertTrue(ready, "wait_for_memcached failed")
        self.add_nodes_and_rebalance()

        distribution = {10: 0.2, 20: 0.5, 30: 0.25, 40: 0.05}

        inserted_keys, rejected_keys = MemcachedClientHelper.load_bucket_and_return_the_keys(
            servers=[self.master],
            ram_load_ratio=1,
            value_size_distribution=distribution,
            moxi=True,
            write_only=True,
            number_of_threads=2)

        self.log.info("Sleep after data load")
        ready = RebalanceHelper.wait_for_stats_on_all(self.master, bucket,
                                                      'ep_queue_size', 0)
        self.assertTrue(ready, "wait_for ep_queue_size == 0 failed")
        ready = RebalanceHelper.wait_for_stats_on_all(self.master, bucket,
                                                      'ep_flusher_todo', 0)
        self.assertTrue(ready, "wait_for ep_queue_size == 0 failed")

        #let's create a unique folder in the remote location
        for server in self.servers:
            shell = RemoteMachineShellConnection(server)
            output, error = shell.execute_command(self.perm_command)
            shell.log_command_output(output, error)
            node = RestConnection(server).get_nodes_self()
            BackupHelper(server, self).backup(bucket, node,
                                              self.remote_tmp_folder)
            shell.disconnect()

        ClusterOperationHelper.cleanup_cluster(self.servers)
        BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self)

        servers = []
        for i in range(0, len(self.servers) - 1):
            servers.append(self.servers[i])

        self.add_node_and_rebalance(servers[0], servers)

        BucketOperationHelper.delete_bucket_or_assert(self.master, bucket,
                                                      self)
        BucketOperationHelper.create_bucket(serverInfo=self.master,
                                            test_case=self)

        ready = BucketOperationHelper.wait_for_memcached(self.master, bucket)
        self.assertTrue(ready, "wait_for_memcached failed")

        for server in self.servers:
            BackupHelper(server, self).restore(self.remote_tmp_folder)
            time.sleep(10)

        BucketOperationHelper.verify_data(self.master, inserted_keys, False,
                                          False, 11210, self)
Пример #54
0
 def repetitive_create_delete(self):
     self.repetitions = self.input.param("repetition_count", 1)
     self.bufferspace = self.input.param("bufferspace", 6000000)
     # the first front end load
     self._load_all_buckets(self.master,
                            self.gen_create,
                            "create",
                            0,
                            batch_size=10000,
                            pause_secs=5,
                            timeout_secs=100)
     self._wait_for_stats_all_buckets(self.servers)
     rest = RestConnection(self.servers[0])
     max_data_sizes = {}
     initial_memory_usage = {}
     self.sleep(30)
     for bucket in self.buckets:
         max_data_sizes[bucket.name] = rest.fetch_bucket_stats(
             bucket=bucket.name)["op"]["samples"]["ep_max_size"][-1]
         self.log.info("Initial max_data_size of bucket '{0}': {1}".format(
             bucket.name, max_data_sizes[bucket.name]))
         initial_memory_usage[bucket.name] = \
             rest.fetch_bucket_stats(bucket=bucket.name)["op"]["samples"]["mem_used"][-1]
         self.log.info(
             "initial memory consumption of bucket '{0}' with load: {1}".
             format(bucket.name, initial_memory_usage[bucket.name]))
     mem_usage = {}
     self.sleep(10)
     # the repetitions
     for i in range(0, self.repetitions):
         BucketOperationHelper.delete_all_buckets_or_assert(
             self.servers, self)
         del self.buckets[:]
         self.log.info('About to create the buckets')
         self._bucket_creation()
         self.log.info('Done bucket creation, about to load them')
         self._load_all_buckets(self.master,
                                self.gen_create,
                                "create",
                                0,
                                batch_size=10000,
                                pause_secs=5,
                                timeout_secs=100)
         self.log.info('Buckets are loaded, waiting for stats')
         self._wait_for_stats_all_buckets(self.servers)
         self.log.info('Have the stats, sleeping for 30 seconds')
         self.sleep(60)
         for bucket in self.buckets:
             mem_usage[bucket.name] = rest.fetch_bucket_stats(
                 bucket.name)["op"]["samples"]["mem_used"][-1]
             self.log.info("Memory used after attempt {0} = {1}, Difference from initial snapshot: {2}" \
                           .format(i + 1, mem_usage[bucket.name],
                                   (mem_usage[bucket.name] - initial_memory_usage[bucket.name])))
         self.sleep(10)
     if (self.repetitions > 0):
         self.log.info(
             "After {0} repetitive deletion-creation-load of the buckets, the memory consumption difference is .." \
                 .format(self.repetitions))
         for bucket in self.buckets:
             self.log.info("{0} :: Initial: {1} :: Now: {2} :: Difference: {3}" \
                           .format(bucket.name, initial_memory_usage[bucket.name], mem_usage[bucket.name],
                                   (mem_usage[bucket.name] - initial_memory_usage[bucket.name])))
             msg = "Memory used now, much greater than initial usage!"
             assert mem_usage[bucket.name] <= initial_memory_usage[
                 bucket.name] + self.bufferspace, msg
     else:
         self.log.info(
             "Verification skipped, as there weren't any repetitions..")
Пример #55
0
    def _test_delete_key_and_backup_and_restore_body(self):
        bucket = "default"
        BucketOperationHelper.create_bucket(serverInfo=self.master,
                                            name=bucket,
                                            test_case=self)
        ready = BucketOperationHelper.wait_for_memcached(self.master, bucket)
        self.assertTrue(ready, "wait_for_memcached failed")

        self.add_nodes_and_rebalance()

        client = MemcachedClientHelper.direct_client(self.master, "default")
        expiry = 2400
        test_uuid = uuid.uuid4()
        keys = ["key_%s_%d" % (test_uuid, i) for i in range(500)]
        self.log.info("pushing keys with expiry set to {0}".format(expiry))
        for key in keys:
            try:
                client.set(key, expiry, 0, "1")
            except mc_bin_client.MemcachedError as error:
                msg = "unable to push key : {0} to bucket : {1} error : {2}"
                self.log.error(msg.format(key, client.vbucketId, error.status))
                self.fail(msg.format(key, client.vbucketId, error.status))
        self.log.info("inserted {0} keys with expiry set to {1}".format(
            len(keys), expiry))

        client.delete(keys[0])

        ready = RebalanceHelper.wait_for_stats_on_all(self.master, bucket,
                                                      'ep_queue_size', 0)
        self.assertTrue(ready, "wait_for ep_queue_size == 0 failed")
        ready = RebalanceHelper.wait_for_stats_on_all(self.master, bucket,
                                                      'ep_flusher_todo', 0)
        self.assertTrue(ready, "wait_for ep_queue_size == 0 failed")

        #let's create a unique folder in the remote location
        for server in self.servers:
            shell = RemoteMachineShellConnection(server)
            output, error = shell.execute_command(self.perm_command)
            shell.log_command_output(output, error)
            node = RestConnection(server).get_nodes_self()
            BackupHelper(server, self).backup(bucket, node,
                                              self.remote_tmp_folder)
            shell.disconnect()

        for server in self.servers:
            BackupHelper(server, self).restore(self.remote_tmp_folder)
            time.sleep(10)

        self.log.info('verifying that all those keys...')
        missing_keys = []
        verify_keys = []
        for key in keys:
            vBucketId = crc32.crc32_hash(key) & 1023  # or & 0x3FF
            client.vbucketId = vBucketId
            if key == keys[0]:
                missing_keys.append(key)
            else:
                verify_keys.append(key)

        self.assertTrue(
            BucketOperationHelper.keys_dont_exist(self.master, missing_keys,
                                                  self), "Keys are not empty")
        self.assertTrue(
            BucketOperationHelper.verify_data(self.master, verify_keys, False,
                                              False, 11210, self),
            "Missing keys")
Пример #56
0
 def tearDown(self):
     BucketOperationHelper.delete_all_buckets_or_assert(
         servers=self.servers, test_case=self)
     self._log_finish()
Пример #57
0
    def test_backup_upgrade_restore_default(self):
        if len(self.servers) < 2:
            self.log.error("At least 2 servers required for this test ..")
            return
        original_set = copy.copy(self.servers)
        worker = self.servers[len(self.servers) - 1]
        self.servers = self.servers[:len(self.servers) - 1]
        shell = RemoteMachineShellConnection(self.master)
        o, r = shell.execute_command("cat /opt/couchbase/VERSION.txt")
        fin = o[0]
        shell.disconnect()
        initial_version = self.input.param("initial_version", fin)
        final_version = self.input.param("final_version", fin)
        if initial_version == final_version:
            self.log.error("Same initial and final versions ..")
            return
        if not final_version.startswith('2.0'):
            self.log.error("Upgrade test not set to run from 1.8.1 -> 2.0 ..")
            return
        builds, changes = BuildQuery().get_all_builds()
        product = 'couchbase-server-enterprise'
        #CASE where the worker isn't a 2.0+
        worker_flag = 0
        shell = RemoteMachineShellConnection(worker)
        o, r = shell.execute_command("cat /opt/couchbase/VERSION.txt")
        temp = o[0]
        if not temp.startswith('2.0'):
            worker_flag = 1
        if worker_flag == 1:
            self.log.info(
                "Loading version {0} on worker.. ".format(final_version))
            remote = RemoteMachineShellConnection(worker)
            info = remote.extract_remote_info()
            older_build = BuildQuery().find_build(builds, product,
                                                  info.deliverable_type,
                                                  info.architecture_type,
                                                  final_version)
            remote.stop_couchbase()
            remote.couchbase_uninstall()
            remote.download_build(older_build)
            remote.install_server(older_build)
            remote.disconnect()

        remote_tmp = "{1}/{0}".format("backup", "/root")
        perm_comm = "mkdir -p {0}".format(remote_tmp)
        if not initial_version == fin:
            for server in self.servers:
                remote = RemoteMachineShellConnection(server)
                info = remote.extract_remote_info()
                self.log.info(
                    "Loading version ..  {0}".format(initial_version))
                older_build = BuildQuery().find_build(builds, product,
                                                      info.deliverable_type,
                                                      info.architecture_type,
                                                      initial_version)
                remote.stop_couchbase()
                remote.couchbase_uninstall()
                remote.download_build(older_build)
                remote.install_server(older_build)
                rest = RestConnection(server)
                RestHelper(rest).is_ns_server_running(
                    testconstants.NS_SERVER_TIMEOUT)
                rest.init_cluster(server.rest_username, server.rest_password)
                rest.init_cluster_memoryQuota(
                    memoryQuota=rest.get_nodes_self().mcdMemoryReserved)
                remote.disconnect()

        self.common_setUp()
        bucket = "default"
        if len(self.servers) > 1:
            self.add_nodes_and_rebalance()
        rest = RestConnection(self.master)
        info = rest.get_nodes_self()
        size = int(info.memoryQuota * 2.0 / 3.0)
        rest.create_bucket(bucket, ramQuotaMB=size)
        ready = BucketOperationHelper.wait_for_memcached(self.master, bucket)
        self.assertTrue(ready, "wait_for_memcached_failed")
        distribution = {10: 0.2, 20: 0.5, 30: 0.25, 40: 0.05}
        inserted_keys, rejected_keys = MemcachedClientHelper.load_bucket_and_return_the_keys(
            servers=[self.master],
            name=bucket,
            ram_load_ratio=0.5,
            value_size_distribution=distribution,
            moxi=True,
            write_only=True,
            delete_ratio=0.1,
            number_of_threads=2)
        if len(self.servers) > 1:
            rest = RestConnection(self.master)
            self.assertTrue(RebalanceHelper.wait_for_replication(
                rest.get_nodes(), timeout=180),
                            msg="replication did not complete")

        ready = RebalanceHelper.wait_for_stats_on_all(self.master, bucket,
                                                      'ep_queue_size', 0)
        self.assertTrue(ready, "wait_for ep_queue_size == 0 failed")
        ready = RebalanceHelper.wait_for_stats_on_all(self.master, bucket,
                                                      'ep_flusher_todo', 0)
        self.assertTrue(ready, "wait_for ep_queue_size == 0 failed")
        node = RestConnection(self.master).get_nodes_self()
        shell = RemoteMachineShellConnection(worker)
        o, r = shell.execute_command(perm_comm)
        shell.log_command_output(o, r)
        shell.disconnect()

        #Backup
        #BackupHelper(self.master, self).backup(bucket, node, remote_tmp)
        shell = RemoteMachineShellConnection(worker)
        shell.execute_command(
            "/opt/couchbase/bin/cbbackup http://{0}:{1} {2}".format(
                self.master.ip, self.master.port, remote_tmp))
        shell.disconnect()
        BucketOperationHelper.delete_bucket_or_assert(self.master, bucket,
                                                      self)
        time.sleep(30)

        #Upgrade
        for server in self.servers:
            self.log.info(
                "Upgrading to current version {0}".format(final_version))
            remote = RemoteMachineShellConnection(server)
            info = remote.extract_remote_info()
            new_build = BuildQuery().find_build(builds, product,
                                                info.deliverable_type,
                                                info.architecture_type,
                                                final_version)
            remote.stop_couchbase()
            remote.couchbase_uninstall()
            remote.download_build(new_build)
            remote.install_server(new_build)
            rest = RestConnection(server)
            RestHelper(rest).is_ns_server_running(
                testconstants.NS_SERVER_TIMEOUT)
            rest.init_cluster(server.rest_username, server.rest_password)
            rest.init_cluster_memoryQuota(
                memoryQuota=rest.get_nodes_self().mcdMemoryReserved)
            remote.disconnect()
        time.sleep(30)

        #Restore
        rest = RestConnection(self.master)
        info = rest.get_nodes_self()
        size = int(info.memoryQuota * 2.0 / 3.0)
        rest.create_bucket(bucket, ramQuotaMB=size)
        ready = BucketOperationHelper.wait_for_memcached(server, bucket)
        self.assertTrue(ready, "wait_for_memcached_failed")
        #BackupHelper(self.master, self).restore(backup_location=remote_tmp, moxi_port=info.moxi)
        shell = RemoteMachineShellConnection(worker)
        shell.execute_command(
            "/opt/couchbase/bin/cbrestore {2} http://{0}:{1} -b {3}".format(
                self.master.ip, self.master.port, remote_tmp, bucket))
        shell.disconnect()
        time.sleep(60)
        keys_exist = BucketOperationHelper.keys_exist_or_assert_in_parallel(
            inserted_keys, self.master, bucket, self, concurrency=4)
        self.assertTrue(keys_exist, msg="unable to verify keys after restore")
        time.sleep(30)
        BucketOperationHelper.delete_bucket_or_assert(self.master, bucket,
                                                      self)
        rest = RestConnection(self.master)
        helper = RestHelper(rest)
        nodes = rest.node_statuses()
        master_id = rest.get_nodes_self().id
        if len(self.servers) > 1:
            removed = helper.remove_nodes(
                knownNodes=[node.id for node in nodes],
                ejectedNodes=[
                    node.id for node in nodes if node.id != master_id
                ],
                wait_for_rebalance=True)

        shell = RemoteMachineShellConnection(worker)
        shell.remove_directory(remote_tmp)
        shell.disconnect()

        self.servers = copy.copy(original_set)
        if initial_version == fin:
            builds, changes = BuildQuery().get_all_builds()
            for server in self.servers:
                remote = RemoteMachineShellConnection(server)
                info = remote.extract_remote_info()
                self.log.info(
                    "Loading version ..  {0}".format(initial_version))
                older_build = BuildQuery().find_build(builds, product,
                                                      info.deliverable_type,
                                                      info.architecture_type,
                                                      initial_version)
                remote.stop_couchbase()
                remote.couchbase_uninstall()
                remote.download_build(older_build)
                remote.install_server(older_build)
                rest = RestConnection(server)
                RestHelper(rest).is_ns_server_running(
                    testconstants.NS_SERVER_TIMEOUT)
                rest.init_cluster(server.rest_username, server.rest_password)
                rest.init_cluster_memoryQuota(
                    memoryQuota=rest.get_nodes_self().mcdMemoryReserved)
                remote.disconnect()
Пример #58
0
    def _install_and_upgrade(self,
                             initial_version='1.6.5.3',
                             create_buckets=False,
                             insert_data=False,
                             start_upgraded_first=True,
                             load_ratio=-1,
                             roll_upgrade=False,
                             upgrade_path=[],
                             do_new_rest=False):
        node_upgrade_path = []
        node_upgrade_path.extend(upgrade_path)
        #then start them in whatever order you want
        inserted_keys = []
        log = logger.Logger.get_logger()
        if roll_upgrade:
            log.info("performing an online upgrade")
        input = TestInputSingleton.input
        rest_settings = input.membase_settings
        servers = input.servers
        save_upgrade_config = False
        is_amazon = False
        if input.test_params.get('amazon', False):
            is_amazon = True
        if initial_version.startswith("1.6") or initial_version.startswith(
                "1.7"):
            product = 'membase-server-enterprise'
        else:
            product = 'couchbase-server-enterprise'
        # install older build on all nodes
        for server in servers:
            remote = RemoteMachineShellConnection(server)
            rest = RestConnection(server)
            info = remote.extract_remote_info()
            # check to see if we are installing from latestbuilds or releases
            # note: for newer releases (1.8.0) even release versions can have the
            #  form 1.8.0r-55
            if re.search('r', initial_version):
                builds, changes = BuildQuery().get_all_builds()
                older_build = BuildQuery().find_membase_build(
                    builds,
                    deliverable_type=info.deliverable_type,
                    os_architecture=info.architecture_type,
                    build_version=initial_version,
                    product=product,
                    is_amazon=is_amazon)

            else:
                older_build = BuildQuery().find_membase_release_build(
                    deliverable_type=info.deliverable_type,
                    os_architecture=info.architecture_type,
                    build_version=initial_version,
                    product=product,
                    is_amazon=is_amazon)

            remote.membase_uninstall()
            remote.couchbase_uninstall()
            remote.stop_membase()
            remote.stop_couchbase()
            remote.download_build(older_build)
            #now let's install ?
            remote.membase_install(older_build)
            RestHelper(rest).is_ns_server_running(
                testconstants.NS_SERVER_TIMEOUT)
            rest.init_cluster_port(rest_settings.rest_username,
                                   rest_settings.rest_password)
            rest.init_cluster_memoryQuota(
                memoryQuota=rest.get_nodes_self().mcdMemoryReserved)
            remote.disconnect()

        bucket_data = {}
        master = servers[0]
        if create_buckets:
            #let's create buckets
            #wait for the bucket
            #bucket port should also be configurable , pass it as the
            #parameter to this test ? later

            self._create_default_bucket(master)
            inserted_keys = self._load_data(master, load_ratio)
            _create_load_multiple_bucket(self, master, bucket_data, howmany=2)

        # cluster all the nodes together
        ClusterOperationHelper.add_all_nodes_or_assert(master, servers,
                                                       rest_settings, self)
        rest = RestConnection(master)
        nodes = rest.node_statuses()
        otpNodeIds = []
        for node in nodes:
            otpNodeIds.append(node.id)
        rebalanceStarted = rest.rebalance(otpNodeIds, [])
        self.assertTrue(
            rebalanceStarted,
            "unable to start rebalance on master node {0}".format(master.ip))
        log.info('started rebalance operation on master node {0}'.format(
            master.ip))
        rebalanceSucceeded = rest.monitorRebalance()
        self.assertTrue(
            rebalanceSucceeded,
            "rebalance operation for nodes: {0} was not successful".format(
                otpNodeIds))

        if initial_version == "1.7.0" or initial_version == "1.7.1":
            self._save_config(rest_settings, master)

        input_version = input.test_params['version']
        node_upgrade_path.append(input_version)
        current_version = initial_version
        previous_version = current_version
        #if we dont want to do roll_upgrade ?
        log.info("Upgrade path: {0} -> {1}".format(initial_version,
                                                   node_upgrade_path))
        log.info("List of servers {0}".format(servers))
        if not roll_upgrade:
            for version in node_upgrade_path:
                previous_version = current_version
                current_version = version
                if version != initial_version:
                    log.info("Upgrading to version {0}".format(version))
                    self._stop_membase_servers(servers)
                    if previous_version.startswith(
                            "1.7") and current_version.startswith("1.8"):
                        save_upgrade_config = True
                    # No need to save the upgrade config from 180 to 181
                    if previous_version.startswith(
                            "1.8.0") and current_version.startswith("1.8.1"):
                        save_upgrade_config = False
                    appropriate_build = _get_build(servers[0],
                                                   version,
                                                   is_amazon=is_amazon)
                    self.assertTrue(
                        appropriate_build.url,
                        msg="unable to find build {0}".format(version))
                    for server in servers:
                        remote = RemoteMachineShellConnection(server)
                        remote.download_build(appropriate_build)
                        remote.membase_upgrade(
                            appropriate_build,
                            save_upgrade_config=save_upgrade_config)
                        RestHelper(
                            RestConnection(server)).is_ns_server_running(
                                testconstants.NS_SERVER_TIMEOUT)

                        #verify admin_creds still set
                        pools_info = RestConnection(server).get_pools_info()
                        self.assertTrue(pools_info['implementationVersion'],
                                        appropriate_build.product_version)

                        if start_upgraded_first:
                            log.info("Starting server {0} post upgrade".format(
                                server))
                            remote.start_membase()
                        else:
                            remote.stop_membase()

                        remote.disconnect()
                    if not start_upgraded_first:
                        log.info("Starting all servers together")
                        self._start_membase_servers(servers)
                    time.sleep(TIMEOUT_SECS)
                    if version == "1.7.0" or version == "1.7.1":
                        self._save_config(rest_settings, master)

                    if create_buckets:
                        self.assertTrue(
                            BucketOperationHelper.wait_for_bucket_creation(
                                'default', RestConnection(master)),
                            msg="bucket 'default' does not exist..")
                    if insert_data:
                        self._verify_data(master, rest, inserted_keys)

        # rolling upgrade
        else:
            version = input.test_params['version']
            appropriate_build = _get_build(servers[0],
                                           version,
                                           is_amazon=is_amazon)
            self.assertTrue(appropriate_build.url,
                            msg="unable to find build {0}".format(version))
            # rebalance node out
            # remove membase from node
            # install destination version onto node
            # rebalance it back into the cluster
            for server_index in range(len(servers)):
                server = servers[server_index]
                master = servers[server_index - 1]
                log.info("current master is {0}, rolling node is {1}".format(
                    master, server))

                rest = RestConnection(master)
                nodes = rest.node_statuses()
                allNodes = []
                toBeEjectedNodes = []
                for node in nodes:
                    allNodes.append(node.id)
                    if "{0}:{1}".format(node.ip,
                                        node.port) == "{0}:{1}".format(
                                            server.ip, server.port):
                        toBeEjectedNodes.append(node.id)
                helper = RestHelper(rest)
                removed = helper.remove_nodes(knownNodes=allNodes,
                                              ejectedNodes=toBeEjectedNodes)
                self.assertTrue(
                    removed,
                    msg="Unable to remove nodes {0}".format(toBeEjectedNodes))
                remote = RemoteMachineShellConnection(server)
                remote.download_build(appropriate_build)
                # if initial version is 180
                # Don't uninstall the server
                if not initial_version.startswith('1.8.0'):
                    remote.membase_uninstall()
                    remote.couchbase_uninstall()
                    remote.membase_install(appropriate_build)
                else:
                    remote.membase_upgrade(appropriate_build)

                RestHelper(rest).is_ns_server_running(
                    testconstants.NS_SERVER_TIMEOUT)
                log.info(
                    "sleep for 10 seconds to wait for membase-server to start..."
                )
                time.sleep(TIMEOUT_SECS)
                rest.init_cluster_port(rest_settings.rest_username,
                                       rest_settings.rest_password)
                rest.init_cluster_memoryQuota(
                    memoryQuota=rest.get_nodes_self().mcdMemoryReserved)
                remote.disconnect()

                #readd this to the cluster
                ClusterOperationHelper.add_all_nodes_or_assert(
                    master, [server], rest_settings, self)
                nodes = rest.node_statuses()
                otpNodeIds = []
                for node in nodes:
                    otpNodeIds.append(node.id)
                # Issue rest call to the newly added node
                # MB-5108
                if do_new_rest:
                    master = server
                    rest = RestConnection(master)
                rebalanceStarted = rest.rebalance(otpNodeIds, [])
                self.assertTrue(
                    rebalanceStarted,
                    "unable to start rebalance on master node {0}".format(
                        master.ip))
                log.info(
                    'started rebalance operation on master node {0}'.format(
                        master.ip))
                rebalanceSucceeded = rest.monitorRebalance()
                self.assertTrue(
                    rebalanceSucceeded,
                    "rebalance operation for nodes: {0} was not successful".
                    format(otpNodeIds))

            #TODO: how can i verify that the cluster init config is preserved
            # verify data on upgraded nodes
            if create_buckets:
                self.assertTrue(BucketOperationHelper.wait_for_bucket_creation(
                    'default', RestConnection(master)),
                                msg="bucket 'default' does not exist..")
            if insert_data:
                self._verify_data(master, rest, inserted_keys)
                rest = RestConnection(master)
                buckets = rest.get_buckets()
                for bucket in buckets:
                    BucketOperationHelper.keys_exist_or_assert(
                        bucket_data[bucket.name]["inserted_keys"], master,
                        bucket.name, self)
Пример #59
0
 def _do_cleanup(self):
     for key in self._clusters_keys_olst:
         nodes = self._clusters_dic[key]
         BucketOperationHelper.delete_all_buckets_or_assert(nodes, self)
         ClusterOperationHelper.cleanup_cluster(nodes)
         ClusterOperationHelper.wait_for_ns_servers_or_assert(nodes, self)
Пример #60
0
 def tearDown(self):
     BucketOperationHelper.delete_all_buckets_or_assert([self.master], self)
     self._log_finish()