Пример #1
0
 def tearDown(self):
     BucketOperationHelper.delete_all_buckets_or_assert(
         servers=[self.master], test_case=self)
     # Remove rbac user in teardown
     role_del = ['cbadminbucket']
     RbacBase().remove_user_role(role_del, RestConnection(self.master))
     self._log_finish()
Пример #2
0
 def setUp(self):
     self.log = logger.Logger.get_logger()
     self.input = TestInputSingleton.input
     self.assertTrue(self.input, msg="input parameters missing...")
     self.servers = self.input.servers
     BucketOperationHelper.delete_all_buckets_or_assert(servers=self.servers, test_case=self)
     self._log_start()
Пример #3
0
    def setUp(self):
        self.log = logger.Logger.get_logger()
        self.master = TestInputSingleton.input.servers[0]
        ClusterOperationHelper.cleanup_cluster([self.master])
        BucketOperationHelper.delete_all_buckets_or_assert([self.master], self)

        self._bucket_name = 'default'

        serverInfo = self.master

        rest = RestConnection(serverInfo)
        info = rest.get_nodes_self()
        self._bucket_port = info.moxi
        rest.init_cluster(username=serverInfo.rest_username,
                          password=serverInfo.rest_password)
        rest.init_cluster_memoryQuota(memoryQuota=info.mcdMemoryReserved)
        bucket_ram = info.memoryQuota * 2 / 3

        # Add built-in user
        testuser = [{
            'id': 'cbadminbucket',
            'name': 'cbadminbucket',
            'password': '******'
        }]
        RbacBase().create_user_source(testuser, 'builtin', self.master)

        # Assign user to role
        role_list = [{
            'id': 'cbadminbucket',
            'name': 'cbadminbucket',
            'roles': 'admin'
        }]
        RbacBase().add_user_role(role_list, RestConnection(self.master),
                                 'builtin')

        rest.create_bucket(bucket=self._bucket_name,
                           ramQuotaMB=bucket_ram,
                           proxyPort=info.memcached)

        msg = 'create_bucket succeeded but bucket "default" does not exist'

        if (testconstants.TESTRUNNER_CLIENT in os.environ.keys()
            ) and os.environ[
                testconstants.TESTRUNNER_CLIENT] == testconstants.PYTHON_SDK:
            self.client = SDKSmartClient(
                serverInfo,
                self._bucket_name,
                compression=TestInputSingleton.input.param(
                    "sdk_compression", True))
        else:
            self.client = MemcachedClientHelper.direct_client(
                serverInfo, self._bucket_name)

        self.assertTrue(BucketOperationHelper.wait_for_bucket_creation(
            self._bucket_name, rest),
                        msg=msg)
        ready = BucketOperationHelper.wait_for_memcached(
            serverInfo, self._bucket_name)
        self.assertTrue(ready, "wait_for_memcached failed")
        self._log_start()
Пример #4
0
 def tearDown(self):
     try:
         self._cluster_helper.shutdown()
         log = logger.Logger.get_logger()
         log.info("==============  tearDown was started for test #{0} {1} =============="\
                           .format(self.case_number, self._testMethodName))
         RemoteUtilHelper.common_basic_setup(self._servers)
         log.info("10 seconds delay to wait for membase-server to start")
         time.sleep(10)
         for server in self._cleanup_nodes:
             shell = RemoteMachineShellConnection(server)
             o, r = shell.execute_command("iptables -F")
             shell.log_command_output(o, r)
             o, r = shell.execute_command("/sbin/iptables -A INPUT -p tcp -i eth0 --dport 1000:60000 -j ACCEPT")
             shell.log_command_output(o, r)
             o, r = shell.execute_command("/sbin/iptables -A OUTPUT -p tcp -o eth0 --dport 1000:60000 -j ACCEPT")
             shell.log_command_output(o, r)
             o, r = shell.execute_command("/etc/init.d/couchbase-server start")
             shell.log_command_output(o, r)
             shell.disconnect()
         BucketOperationHelper.delete_all_buckets_or_assert(self._servers, self)
         ClusterOperationHelper.cleanup_cluster(self._servers)
         ClusterHelper.wait_for_ns_servers_or_assert(self._servers, self)
         log.info("==============  tearDown was finished for test #{0} {1} =============="\
                           .format(self.case_number, self._testMethodName))
     finally:
         pass
Пример #5
0
    def common_setup(self, replica):
        self._input = TestInputSingleton.input
        self._servers = self._input.servers
        first = self._servers[0]
        self.log = logger.Logger().get_logger()
        self.log.info(self._input)
        rest = RestConnection(first)
        for server in self._servers:
            RestHelper(RestConnection(server)).is_ns_server_running()

        ClusterOperationHelper.cleanup_cluster(self._servers)
        BucketOperationHelper.delete_all_buckets_or_assert(self._servers, self)
        ClusterOperationHelper.add_all_nodes_or_assert(self._servers[0], self._servers, self._input.membase_settings, self)
        nodes = rest.node_statuses()
        otpNodeIds = []
        for node in nodes:
            otpNodeIds.append(node.id)
        info = rest.get_nodes_self()
        bucket_ram = info.mcdMemoryReserved * 3 / 4
        rest.create_bucket(bucket="default",
                           ramQuotaMB=int(bucket_ram),
                           replicaNumber=replica,
                           proxyPort=rest.get_nodes_self().moxi)
        msg = "wait_for_memcached fails"
        ready = BucketOperationHelper.wait_for_memcached(first, "default"),
        self.assertTrue(ready, msg)
        rebalanceStarted = rest.rebalance(otpNodeIds, [])
        self.assertTrue(rebalanceStarted,
                        "unable to start rebalance on master node {0}".format(first.ip))
        self.log.info('started rebalance operation on master node {0}'.format(first.ip))
        rebalanceSucceeded = rest.monitorRebalance()
        # without a bucket this seems to fail
        self.assertTrue(rebalanceSucceeded,
                        "rebalance operation for nodes: {0} was not successful".format(otpNodeIds))
        self.awareness = VBucketAwareMemcached(rest, "default")
 def tearDown(self):
     if hasattr(self, '_resultForDoCleanups') and len(self._resultForDoCleanups.failures) > 0 \
                 and 'stop-on-failure' in TestInputSingleton.input.test_params and \
                 str(TestInputSingleton.input.test_params['stop-on-failure']).lower() == 'true':
                 # supported starting with python2.7
                 log.warn("CLEANUP WAS SKIPPED")
                 self.cluster.shutdown(force=True)
                 self._log_finish(self)
     else:
         try:
             self.log.info("==============  tearDown was started for test #{0} {1} =============="\
                           .format(self.case_number, self._testMethodName))
             RemoteUtilHelper.common_basic_setup(self.servers)
             BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self)
             for node in self.servers:
                 master = node
                 try:
                     ClusterOperationHelper.cleanup_cluster(self.servers,
                                                            master=master)
                 except:
                     continue
             self.log.info("==============  tearDown was finished for test #{0} {1} =============="\
                           .format(self.case_number, self._testMethodName))
         finally:
             super(FailoverBaseTest, self).tearDown()
Пример #7
0
    def setUp(self):
        self.log = logger.Logger.get_logger()
        self.master = TestInputSingleton.input.servers[0]
        self.input = TestInputSingleton.input
        self.servers = self.input.servers
        self.num_of_docs = self.input.param("num_of_docs", 1000)

        rest = RestConnection(self.master)
        for server in self.servers:
            rest.init_cluster(server.rest_username, server.rest_password)

        info = rest.get_nodes_self()

        for server in self.servers:
            rest.init_cluster_memoryQuota(
                server.rest_username, server.rest_password, memoryQuota=info.mcdMemoryReserved
            )

        ClusterOperationHelper.cleanup_cluster(self.servers)
        BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self)
        self._create_default_bucket()

        # Rebalance the nodes
        ClusterOperationHelper.begin_rebalance_in(self.master, self.servers)
        ClusterOperationHelper.end_rebalance(self.master)
        self._log_start()
Пример #8
0
 def cleanup(self):
     rest = RestConnection(self.master)
     rest.stop_rebalance()
     BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self)
     for server in self.servers:
         ClusterOperationHelper.cleanup_cluster([server])
     ClusterOperationHelper.wait_for_ns_servers_or_assert(self.servers, self)
Пример #9
0
 def tearDown(self):
     try:
         test_failed = len(self._resultForDoCleanups.errors)
         if self.driver and test_failed:
             BaseHelper(self).create_screenshot()
         if self.driver:
             self.driver.close()
         if test_failed and TestInputSingleton.input.param("stop-on-failure", False):
             print "test fails, teardown will be skipped!!!"
             return
         rest = RestConnection(self.servers[0])
         try:
             reb_status = rest._rebalance_progress_status()
         except ValueError as e:
             if e.message == 'No JSON object could be decoded':
                 print "cluster not initialized!!!"
                 return
         if reb_status == 'running':
             stopped = rest.stop_rebalance()
             self.assertTrue(stopped, msg="unable to stop rebalance")
         BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self)
         for server in self.servers:
             ClusterOperationHelper.cleanup_cluster([server])
         ClusterOperationHelper.wait_for_ns_servers_or_assert(self.servers, self)
     except Exception as e:
         raise e
     finally:
         if self.driver:
             self.shell.disconnect()
Пример #10
0
 def setUp(self):
     super(XDCRTests, self).setUp()
     self.bucket = Bucket()
     self._initialize_nodes()
     self.master = self.servers[0]
     for server in self.servers:
         rest=RestConnection(server)
         cluster_status = rest.cluster_status()
         self.log.info("Initial status of {0} cluster is {1}".format(server.ip,
                                                                     cluster_status['nodes'][0]['status']))
         while cluster_status['nodes'][0]['status'] == 'warmup':
             self.log.info("Waiting for cluster to become healthy")
             self.sleep(5)
             cluster_status = rest.cluster_status()
         self.log.info("current status of {0}  is {1}".format(server.ip,
                                                              cluster_status['nodes'][0]['status']))
     # Delete all buckets before creating new buckets
     self.log.info("Deleting all existing buckets")
     BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self)
     self.log.info("Creating new buckets")
     src_bucket = self.input.param('src_bucket', self.bucket)
     dest_bucket = self.input.param('dest_bucket', self.bucket)
     if src_bucket:
         RestConnection(self.servers[0]).create_bucket(bucket='default', ramQuotaMB=500)
     if dest_bucket:
         RestConnection(self.servers[1]).create_bucket(bucket='default', ramQuotaMB=500)
     helper = BaseHelper(self)
     helper.login()
Пример #11
0
 def tearDown(self):
         try:
             if (hasattr(self, '_resultForDoCleanups') and len(self._resultForDoCleanups.failures) > 0 \
                 and TestInputSingleton.input.param("stop-on-failure", False))\
                     or self.input.param("skip_cleanup", False):
                 self.log.warn("CLEANUP WAS SKIPPED")
             else:
                 self.log.info("==============  basetestcase cleanup was started for test #{0} {1} =============="\
                       .format(self.case_number, self._testMethodName))
                 rest = RestConnection(self.master)
                 alerts = rest.get_alerts()
                 if alerts is not None and len(alerts) != 0:
                     self.log.warn("Alerts were found: {0}".format(alerts))
                 if rest._rebalance_progress_status() == 'running':
                     self.log.warning("rebalancing is still running, test should be verified")
                     stopped = rest.stop_rebalance()
                     self.assertTrue(stopped, msg="unable to stop rebalance")
                 BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self)
                 ClusterOperationHelper.cleanup_cluster(self.servers)
                 self.sleep(10)
                 ClusterOperationHelper.wait_for_ns_servers_or_assert(self.servers, self)
                 self.log.info("==============  basetestcase cleanup was finished for test #{0} {1} =============="\
                       .format(self.case_number, self._testMethodName))
         except BaseException:
             # increase case_number to retry tearDown in setup for the next test
             self.case_number += 1000
         finally:
             # stop all existing task manager threads
             self.cluster.shutdown()
             self._log_finish(self)
Пример #12
0
 def reset(self):
     self.log.info(
         "==============  SwapRebalanceBase cleanup was started for test #{0} {1} ==============".format(
             self.case_number, self._testMethodName
         )
     )
     self.log.info("Stopping load in Teardown")
     SwapRebalanceBase.stop_load(self.loaders)
     for server in self.servers:
         rest = RestConnection(server)
         if rest._rebalance_progress_status() == "running":
             self.log.warning("rebalancing is still running, test should be verified")
             stopped = rest.stop_rebalance()
             self.assertTrue(stopped, msg="unable to stop rebalance")
     BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self)
     for server in self.servers:
         ClusterOperationHelper.cleanup_cluster([server])
         if server.data_path:
             rest = RestConnection(server)
             rest.set_data_path(data_path=server.data_path)
     ClusterOperationHelper.wait_for_ns_servers_or_assert(self.servers, self)
     self.log.info(
         "==============  SwapRebalanceBase cleanup was finished for test #{0} {1} ==============".format(
             self.case_number, self._testMethodName
         )
     )
Пример #13
0
 def setUp(self):
     super(XDCRTests, self).setUp()
     self.bucket = Bucket()
     self._initialize_nodes()
     self.master = self.servers[0]
     for server in self.servers:
         rest = RestConnection(server)
         cluster_status = rest.cluster_status()
         self.log.info("Initial status of {0} cluster is {1}".format(
             server.ip, cluster_status['nodes'][0]['status']))
         while cluster_status['nodes'][0]['status'] == 'warmup':
             self.log.info("Waiting for cluster to become healthy")
             self.sleep(5)
             cluster_status = rest.cluster_status()
         self.log.info("current status of {0}  is {1}".format(
             server.ip, cluster_status['nodes'][0]['status']))
     # Delete all buckets before creating new buckets
     self.log.info("Deleting all existing buckets")
     BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self)
     self.log.info("Creating new buckets")
     src_bucket = self.input.param('src_bucket', self.bucket)
     dest_bucket = self.input.param('dest_bucket', self.bucket)
     if src_bucket:
         RestConnection(self.servers[0]).create_bucket(bucket='default',
                                                       ramQuotaMB=500)
     if dest_bucket:
         RestConnection(self.servers[1]).create_bucket(bucket='default',
                                                       ramQuotaMB=500)
     helper = BaseHelper(self)
     helper.login()
Пример #14
0
 def setUp(self):
     self.log = logger.Logger.get_logger()
     self.input = TestInput.TestInputSingleton.input
     self.assertTrue(self.input, msg="input parameters missing...")
     self.servers = self.input.servers
     BucketOperationHelper.delete_all_buckets_or_assert(self.servers, test_case=self)
     self._log_start()
Пример #15
0
 def tearDown(self):
     if not self.input.param("skip_cleanup", True):
         if self.times_teardown_called > 1 :
             self.shell.disconnect()
     if self.input.param("skip_cleanup", True):
         if self.case_number > 1 or self.times_teardown_called > 1:
             self.shell.disconnect()
     self.times_teardown_called += 1
     serverInfo = self.servers[0]
     rest = RestConnection(serverInfo)
     zones = rest.get_zone_names()
     for zone in zones:
         if zone != "Group 1":
             rest.delete_zone(zone)
     self.clusters_dic = self.input.clusters
     if self.clusters_dic:
         if len(self.clusters_dic) > 1:
             self.dest_nodes = self.clusters_dic[1]
             self.dest_master = self.dest_nodes[0]
             if self.dest_nodes and len(self.dest_nodes) > 1:
                 self.log.info("======== clean up destination cluster =======")
                 rest = RestConnection(self.dest_nodes[0])
                 rest.remove_all_remote_clusters()
                 rest.remove_all_replications()
                 BucketOperationHelper.delete_all_buckets_or_assert(self.dest_nodes, self)
                 ClusterOperationHelper.cleanup_cluster(self.dest_nodes)
         elif len(self.clusters_dic) == 1:
             self.log.error("=== need 2 cluster to setup xdcr in ini file ===")
     else:
         self.log.info("**** If run xdcr test, need cluster config is setup in ini file. ****")
     super(CliBaseTest, self).tearDown()
Пример #16
0
 def tearDown(self):
     try:
         if self.driver:
             path_screen = self.input.ui_conf['screenshots'] or 'logs/screens'
             full_path = '{1}/screen_{0}.png'.format(time.time(), path_screen)
             self.log.info('screenshot is available: %s' % full_path)
             if not os.path.exists(path_screen):
                 os.mkdir(path_screen)
             self.driver.get_screenshot_as_file(os.path.abspath(full_path))
         rest = RestConnection(self.servers[0])
         if rest._rebalance_progress_status() == 'running':
             stopped = rest.stop_rebalance()
             self.assertTrue(stopped, msg="unable to stop rebalance")
         BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self)
         for server in self.servers:
             ClusterOperationHelper.cleanup_cluster([server])
         ClusterOperationHelper.wait_for_ns_servers_or_assert(self.servers, self)
         if self.driver:
             self.driver.close()
     except Exception as e:
         raise e
     finally:
         if self.driver:
             self.shell.disconnect()
         self.cluster.shutdown()
Пример #17
0
    def setUp(self):
        self.log = logger.Logger.get_logger()
        self.input = TestInputSingleton.input
        self.cluster = Cluster()
        self.servers = self.input.servers
        self.buckets = {}

        self.default_bucket = self.input.param("default_bucket", True)
        self.standard_buckets = self.input.param("standard_buckets", 0)
        self.sasl_buckets = self.input.param("sasl_buckets", 0)
        self.total_buckets = self.sasl_buckets + self.default_bucket + self.standard_buckets
        self.num_servers = self.input.param("servers", len(self.servers))
        self.num_replicas = self.input.param("replicas", 1)
        self.num_items = self.input.param("items", 1000)
        self.dgm_run = self.input.param("dgm_run", False)

        if not self.input.param("skip_cleanup", False):
            BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self)
            for server in self.servers:
                ClusterOperationHelper.cleanup_cluster([server])
            ClusterOperationHelper.wait_for_ns_servers_or_assert([self.servers[0]], self)

        self.quota = self._initialize_nodes(self.cluster, self.servers)
        if self.dgm_run:
            self.quota = 256
        self.bucket_size = self._get_bucket_size(self.quota, self.total_buckets)
        if self.default_bucket:
            self.cluster.create_default_bucket(self.servers[0], self.bucket_size, self.num_replicas)
            self.buckets['default'] = {1 : KVStore()}
        self._create_sasl_buckets(self.servers[0], self.sasl_buckets)
Пример #18
0
    def common_tearDown(servers, testcase):
        log = logger.Logger.get_logger()
        log.info(
            "==============  common_tearDown was started for test #{0} {1} ==============".format(
                testcase.case_number, testcase._testMethodName
            )
        )
        RemoteUtilHelper.common_basic_setup(servers)

        log.info("10 seconds delay to wait for couchbase-server to start")
        time.sleep(10)
        ClusterOperationHelper.wait_for_ns_servers_or_assert(
            servers, testcase, wait_time=AutoFailoverBaseTest.MAX_FAIL_DETECT_TIME * 15, wait_if_warmup=True
        )
        try:
            rest = RestConnection(self._servers[0])
            buckets = rest.get_buckets()
            for bucket in buckets:
                MemcachedClientHelper.flush_bucket(servers[0], bucket.name)
        except Exception:
            pass
        BucketOperationHelper.delete_all_buckets_or_assert(servers, testcase)
        ClusterOperationHelper.cleanup_cluster(servers)
        log.info(
            "==============  common_tearDown was finished for test #{0} {1} ==============".format(
                testcase.case_number, testcase._testMethodName
            )
        )
Пример #19
0
 def tearDown(self):
     BucketOperationHelper.delete_all_buckets_or_assert([self.master], self)
     rest = RestConnection(self.master)
     # Remove rbac user in teardown
     role_del = ['cbadminbucket']
     temp = RbacBase().remove_user_role(role_del, rest)
     self._log_finish()
Пример #20
0
 def test_backup_restore(self):
     self._load_all_buckets()
     self.shell.execute_command("rm -rf /tmp/backups")
     output, error = self.shell.execute_command("/opt/couchbase/bin/cbbackupmgr config "
                                                "--archive /tmp/backups --repo example")
     self.log.info(output)
     self.assertEquals('Backup repository `example` created successfully in archive `/tmp/backups`', output[0])
     output, error = self.shell.execute_command(
         "/opt/couchbase/bin/cbbackupmgr backup --archive /tmp/backups --repo example "
         "--cluster couchbase://127.0.0.1 --username Administrator --password password")
     self.log.info(output)
     self.assertEquals('Backup successfully completed', output[1])
     BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self)
     imp_rest = RestConnection(self.master)
     info = imp_rest.get_nodes_self()
     if info.memoryQuota and int(info.memoryQuota) > 0:
         self.quota = info.memoryQuota
     bucket_params = self._create_bucket_params(server=self.master, size=250, bucket_type='ephemeral',
                                                replicas=self.num_replicas,
                                                enable_replica_index=self.enable_replica_index,
                                                eviction_policy=self.eviction_policy)
     self.cluster.create_default_bucket(bucket_params)
     output, error = self.shell.execute_command('ls /tmp/backups/example')
     output, error = self.shell.execute_command("/opt/couchbase/bin/cbbackupmgr restore --archive /tmp/backups"
                                                " --repo example --cluster couchbase://127.0.0.1 "
                                                "--username Administrator --password password --start %s" % output[
                                                    0])
     self.log.info(output)
     self.assertEquals('Restore completed successfully', output[1])
     self._verify_all_buckets(self.master)
Пример #21
0
 def setUp(self):
     self.log = logger.Logger.get_logger()
     self.input = TestInputSingleton.input
     self.assertTrue(self.input, msg="input parameters missing...")
     self.servers = self.input.servers
     self.master = self.servers[0]
     rest = RestConnection(self.master)
     rest.init_cluster(username=self.master.rest_username,
                       password=self.master.rest_password)
     info = rest.get_nodes_self()
     node_ram_ratio = BucketOperationHelper.base_bucket_ratio(self.servers)
     rest.init_cluster_memoryQuota(memoryQuota=int(info.mcdMemoryReserved * node_ram_ratio))
     BucketOperationHelper.delete_all_buckets_or_assert(servers=self.servers, test_case=self)
     ClusterOperationHelper.cleanup_cluster(servers=self.servers)
     credentials = self.input.membase_settings
     ClusterOperationHelper.add_all_nodes_or_assert(master=self.master, all_servers=self.servers, rest_settings=credentials, test_case=self)
     rest = RestConnection(self.master)
     nodes = rest.node_statuses()
     otpNodeIds = []
     for node in nodes:
         otpNodeIds.append(node.id)
     rebalanceStarted = rest.rebalance(otpNodeIds, [])
     self.assertTrue(rebalanceStarted,
                     "unable to start rebalance on master node {0}".format(self.master.ip))
     self.log.info('started rebalance operation on master node {0}'.format(self.master.ip))
     rebalanceSucceeded = rest.monitorRebalance()
Пример #22
0
    def setUp(self):
        self.log = logger.Logger.get_logger()
        self.master = TestInputSingleton.input.servers[0]
        ClusterOperationHelper.cleanup_cluster([self.master])
        BucketOperationHelper.delete_all_buckets_or_assert([self.master], self)

        self._bucket_name = 'default'

        serverInfo = self.master
        rest = RestConnection(serverInfo)
        info = rest.get_nodes_self()
        self._bucket_port = info.moxi
        rest.init_cluster(username=serverInfo.rest_username,
                          password=serverInfo.rest_password)
        rest.init_cluster_memoryQuota(memoryQuota=info.mcdMemoryReserved)
        bucket_ram = info.memoryQuota * 2 / 3

        # Add built-in user
        testuser = [{'id': 'cbadminbucket', 'name': 'cbadminbucket', 'password': '******'}]
        RbacBase().create_user_source(testuser, 'builtin', self.master)
        time.sleep(10)

        # Assign user to role
        role_list = [{'id': 'cbadminbucket', 'name': 'cbadminbucket', 'roles': 'admin'}]
        RbacBase().add_user_role(role_list, RestConnection(self.master), 'builtin')
        time.sleep(10)

        rest.create_bucket(bucket=self._bucket_name,
                           ramQuotaMB=bucket_ram,
                           proxyPort=info.memcached)
        msg = 'create_bucket succeeded but bucket "default" does not exist'
        self.assertTrue(BucketOperationHelper.wait_for_bucket_creation(self._bucket_name, rest), msg=msg)
        ready = BucketOperationHelper.wait_for_memcached(serverInfo, self._bucket_name)
        self.assertTrue(ready, "wait_for_memcached failed")
        self._log_start()
Пример #23
0
 def setUp(self):
     super(SGConfigTests, self).setUp()
     for server in self.servers:
         if self.case_number == 1:
             with open('pytests/sg/resources/gateway_config_walrus_template.json', 'r') as file:
                 filedata = file.read()
                 filedata = filedata.replace('LOCAL_IP', server.ip)
             with open('pytests/sg/resources/gateway_config_walrus.json', 'w') as file:
                 file.write(filedata)
             shell = RemoteMachineShellConnection(server)
             shell.execute_command("rm -rf {0}/tmp/*".format(self.folder_prefix))
             shell.copy_files_local_to_remote('pytests/sg/resources', '{0}/tmp'.format(self.folder_prefix))
             # will install sg only the first time
             self.install(shell)
             pid = self.is_sync_gateway_process_running(shell)
             self.assertNotEqual(pid, 0)
             exist = shell.file_exists('{0}/tmp/'.format(self.folder_prefix), 'gateway.log')
             self.assertTrue(exist)
             shell.disconnect()
     if self.case_number == 1:
         shutil.copy2('pytests/sg/resources/gateway_config_backup.json', 'pytests/sg/resources/gateway_config.json')
         BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self)
         self.cluster = Cluster()
         self.cluster.create_default_bucket(self.master, 150)
         task = self.cluster.async_create_sasl_bucket(self.master, 'test_%E-.5', 'password', 150, 1)
         task.result()
         task = self.cluster.async_create_standard_bucket(self.master, 'db', 11219, 150, 1)
         task.result()
Пример #24
0
    def common_setup(input, testcase):
        log.info("==============  common_setup was started for test #{0} {1}=============="\
                      .format(testcase.case_number, testcase._testMethodName))
        servers = input.servers
        RemoteUtilHelper.common_basic_setup(servers)
        BucketOperationHelper.delete_all_buckets_or_assert(servers, testcase)
        ClusterOperationHelper.cleanup_cluster(servers)
        ClusterOperationHelper.wait_for_ns_servers_or_assert(servers, testcase)

        # Add built-in user
        testuser = [{
            'id': 'cbadminbucket',
            'name': 'cbadminbucket',
            'password': '******'
        }]
        RbacBase().create_user_source(testuser, 'builtin', servers[0])

        # Assign user to role
        role_list = [{
            'id': 'cbadminbucket',
            'name': 'cbadminbucket',
            'roles': 'admin'
        }]
        RbacBase().add_user_role(role_list, RestConnection(servers[0]),
                                 'builtin')

        log.info("==============  common_setup was finished for test #{0} {1} =============="\
                      .format(testcase.case_number, testcase._testMethodName))
Пример #25
0
    def test_backup_with_spatial_data(self):
        num_docs = self.helper.input.param("num-docs", 5000)
        self.log.info("description : Make limit queries on a simple "
                      "dataset with {0} docs".format(num_docs))
        data_set = SimpleDataSet(self.helper, num_docs)
        data_set.add_limit_queries()
        self._query_test_init(data_set)

        if not self.command_options:
            self.command_options = []
        options = self.command_options + [' -m full']

        self.total_backups = 1
        self.shell.execute_cluster_backup(self.couchbase_login_info, self.backup_location, options)
        time.sleep(2)

        self.buckets = RestConnection(self.master).get_buckets()
        bucket_names = [bucket.name for bucket in self.buckets]
        BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self)
        gc.collect()

        self.helper._create_default_bucket()
        self.shell.restore_backupFile(self.couchbase_login_info, self.backup_location, bucket_names)

        SimpleDataSet(self.helper, num_docs)._create_views()
        self._query_test_init(data_set)
Пример #26
0
 def common_setup(input, testcase):
     servers = input.servers
     RemoteUtilHelper.common_basic_setup(servers)
     BucketOperationHelper.delete_all_buckets_or_assert(servers, testcase)
     for server in servers:
         ClusterOperationHelper.cleanup_cluster([server])
     ClusterHelper.wait_for_ns_servers_or_assert(servers, testcase)
Пример #27
0
    def common_setup(input, testcase, bucket_ram_ratio=(2.8 / 3.0), replica=0):
        log = logger.Logger.get_logger()
        servers = input.servers
        BucketOperationHelper.delete_all_buckets_or_assert(servers, testcase)
        ClusterOperationHelper.cleanup_cluster(servers)
        ClusterHelper.wait_for_ns_servers_or_assert(servers, testcase)
        serverInfo = servers[0]

        log.info('picking server : {0} as the master'.format(serverInfo))
        #if all nodes are on the same machine let's have the bucket_ram_ratio as bucket_ram_ratio * 1/len(servers)
        node_ram_ratio = BucketOperationHelper.base_bucket_ratio(servers)
        rest = RestConnection(serverInfo)
        info = rest.get_nodes_self()
        rest.init_cluster(username=serverInfo.rest_username,
                          password=serverInfo.rest_password)
        rest.init_cluster_memoryQuota(memoryQuota=int(info.mcdMemoryReserved * node_ram_ratio))
        if "ascii" in TestInputSingleton.input.test_params\
        and TestInputSingleton.input.test_params["ascii"].lower() == "true":
            BucketOperationHelper.create_multiple_buckets(serverInfo, replica, node_ram_ratio * bucket_ram_ratio,
                                                          howmany=1, sasl=False)
        else:
            BucketOperationHelper.create_multiple_buckets(serverInfo, replica, node_ram_ratio * bucket_ram_ratio,
                                                          howmany=1, sasl=True)
        buckets = rest.get_buckets()
        for bucket in buckets:
            ready = BucketOperationHelper.wait_for_memcached(serverInfo, bucket.name)
            testcase.assertTrue(ready, "wait_for_memcached failed")
Пример #28
0
    def setUp_bucket(self, unittest):
        self.log = logger.Logger.get_logger()
        self.input = TestInputSingleton.input
        unittest.assertTrue(self.input, msg="input parameters missing...")
        self.test = unittest
        self.master = self.input.servers[0]
        rest = RestConnection(self.master)
        rest.init_cluster(username=self.master.rest_username, password=self.master.rest_password)
        rest.init_cluster_memoryQuota(memoryQuota=rest.get_nodes_self().mcdMemoryReserved)
        ClusterOperationHelper.cleanup_cluster([self.master])
        BucketOperationHelper.delete_all_buckets_or_assert([self.master], self.test)

        serverInfo = self.master
        rest = RestConnection(serverInfo)
        info = rest.get_nodes_self()
        rest.init_cluster(username=serverInfo.rest_username,
                          password=serverInfo.rest_password)
        rest.init_cluster_memoryQuota(memoryQuota=info.memoryQuota)

        # Add built-in user
        testuser = [{'id': 'cbadminbucket', 'name': 'cbadminbucket', 'password': '******'}]
        RbacBase().create_user_source(testuser, 'builtin', self.master)
        time.sleep(10)

        # Assign user to role
        role_list = [{'id': 'cbadminbucket', 'name': 'cbadminbucket', 'roles': 'admin'}]
        RbacBase().add_user_role(role_list, RestConnection(self.master), 'builtin')
        time.sleep(10)
Пример #29
0
    def tearDown(self):
#        super(Rebalance, self).tearDown()
        try:
            self.log.info("==============  XDCRbasetests stats for test #{0} {1} =============="\
                        .format(self.case_number, self._testMethodName))
            self._end_replication_flag = 1
            if hasattr(self, '_stats_thread1'): self._stats_thread1.join()
            if hasattr(self, '_stats_thread2'): self._stats_thread2.join()
            if hasattr(self, '_stats_thread3'): self._stats_thread3.join()
            if self._replication_direction_str in "bidirection":
                if hasattr(self, '_stats_thread4'): self._stats_thread4.join()
                if hasattr(self, '_stats_thread5'): self._stats_thread5.join()
                if hasattr(self, '_stats_thread6'): self._stats_thread6.join()
            if self._replication_direction_str in "bidirection":
                self.log.info("Type of run: BIDIRECTIONAL XDCR")
            else:
                self.log.info("Type of run: UNIDIRECTIONAL XDCR")
            self._print_stats(self.src_master)
            if self._replication_direction_str in "bidirection":
                self._print_stats(self.dest_master)
            self.log.info("============== = = = = = = = = END = = = = = = = = = = ==============")
            self.log.info("==============  rebalanceXDCR cleanup was started for test #{0} {1} =============="\
                    .format(self.case_number, self._testMethodName))
            for nodes in [self.src_nodes, self.dest_nodes]:
                for node in nodes:
                    BucketOperationHelper.delete_all_buckets_or_assert([node], self)
                    ClusterOperationHelper.cleanup_cluster([node], self)
                    ClusterOperationHelper.wait_for_ns_servers_or_assert([node], self)
            self.log.info("==============  rebalanceXDCR cleanup was finished for test #{0} {1} =============="\
                    .format(self.case_number, self._testMethodName))
        finally:
            self.cluster.shutdown(force=True)
            self._log_finish(self)
Пример #30
0
 def cleanup_cluster(self):
     if not "skip_cleanup" in TestInputSingleton.input.test_params:
         BucketOperationHelper.delete_all_buckets_or_assert(
             self.servers, self.testcase)
         ClusterOperationHelper.cleanup_cluster(self.servers)
         ClusterOperationHelper.wait_for_ns_servers_or_assert(
             self.servers, self.testcase)
Пример #31
0
    def _test_cluster_topology_change_body(self):
        bucket = "default"
        BucketOperationHelper.create_bucket(serverInfo=self.master,
                                            test_case=self)
        ready = BucketOperationHelper.wait_for_memcached(self.master, bucket)
        self.assertTrue(ready, "wait_for_memcached failed")
        self.add_nodes_and_rebalance()

        distribution = {10: 0.2, 20: 0.5, 30: 0.25, 40: 0.05}

        inserted_keys, rejected_keys = MemcachedClientHelper.load_bucket_and_return_the_keys(
            servers=[self.master],
            ram_load_ratio=1,
            value_size_distribution=distribution,
            moxi=True,
            write_only=True,
            number_of_threads=2)

        self.log.info("Sleep after data load")
        ready = RebalanceHelper.wait_for_stats_on_all(self.master, bucket,
                                                      'ep_queue_size', 0)
        self.assertTrue(ready, "wait_for ep_queue_size == 0 failed")
        ready = RebalanceHelper.wait_for_stats_on_all(self.master, bucket,
                                                      'ep_flusher_todo', 0)
        self.assertTrue(ready, "wait_for ep_queue_size == 0 failed")

        #let's create a unique folder in the remote location
        for server in self.servers:
            shell = RemoteMachineShellConnection(server)
            output, error = shell.execute_command(self.perm_command)
            shell.log_command_output(output, error)
            node = RestConnection(server).get_nodes_self()
            BackupHelper(server, self).backup(bucket, node,
                                              self.remote_tmp_folder)
            shell.disconnect()

        ClusterOperationHelper.cleanup_cluster(self.servers)
        BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self)

        servers = []
        for i in range(0, len(self.servers) - 1):
            servers.append(self.servers[i])

        self.add_node_and_rebalance(servers[0], servers)

        BucketOperationHelper.delete_bucket_or_assert(self.master, bucket,
                                                      self)
        BucketOperationHelper.create_bucket(serverInfo=self.master,
                                            test_case=self)

        ready = BucketOperationHelper.wait_for_memcached(self.master, bucket)
        self.assertTrue(ready, "wait_for_memcached failed")

        for server in self.servers:
            BackupHelper(server, self).restore(self.remote_tmp_folder)
            time.sleep(10)

        BucketOperationHelper.verify_data(self.master, inserted_keys, False,
                                          False, 11210, self)
Пример #32
0
    def tearDown(self):
        BucketOperationHelper.delete_all_buckets_or_assert(self.servers, test_case=self)
        rest = RestConnection(self.servers[0])
        # Remove rbac user in teardown
        role_del = ['cbadminbucket']
        temp = RbacBase().remove_user_role(role_del, rest)

        self._log_finish()
Пример #33
0
    def tearDown_bucket(self):
        BucketOperationHelper.delete_all_buckets_or_assert([self.master],
                                                           self.test)

        # Remove rbac user in teardown
        role_del = ['cbadminbucket']
        temp = RbacBase().remove_user_role(role_del,
                                           RestConnection(self.master))
Пример #34
0
 def tearDown(self):
     BucketOperationHelper.delete_all_buckets_or_assert(servers=self.servers, test_case=self)
     #wait for all ns_servers
     for server in self.servers:
         self.assertTrue(RestHelper(RestConnection(server)).is_ns_server_running(timeout_in_seconds=480),
                         msg="ns server is not running even after waiting for 6 minutes")
     self.log.info("sleep for 10 seconds to give enough time for other nodes to restart")
     time.sleep(10)
Пример #35
0
 def common_setUp(self, with_buckets):
     ClusterOperationHelper.cleanup_cluster(self.servers)
     server = self.servers[0]
     if with_buckets:
         BucketOperationHelper.delete_all_buckets_or_assert(self.servers, test_case=self)
         ok = BucketOperationHelper.create_multiple_buckets(server, 1)
         if not ok:
             self.fail("unable to create multiple buckets on this node : {0}".format(server))
Пример #36
0
 def _common_clenup(self):
     rest = RestConnection(self.servers[0])
     if rest._rebalance_progress_status() == 'running':
         stopped = rest.stop_rebalance()
         self.assertTrue(stopped, msg="unable to stop rebalance")
     BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self)
     ClusterOperationHelper.cleanup_cluster(self.servers)
     ClusterOperationHelper.wait_for_ns_servers_or_assert(self.servers, self)
Пример #37
0
 def tearDown(self):
     BucketOperationHelper.delete_all_buckets_or_assert(servers=self.servers,test_case=self)
     #wait for all ns_servers
     for server in self.servers:
         self.assertTrue(RestHelper(RestConnection(server)).is_ns_server_running(timeout_in_seconds=480),
                         msg="ns server is not running even after waiting for 6 minutes")
     self.log.info("sleep for 10 seconds to give enough time for other nodes to restart")
     time.sleep(10)
Пример #38
0
 def common_tearDown(self):
     if self.load_started:
         self.log.info("Stopping load in Teardown")
         SwapRebalanceBase.stop_load(self.loaders)
     BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self)
     for server in self.servers:
         ClusterOperationHelper.cleanup_cluster([server])
     ClusterHelper.wait_for_ns_servers_or_assert(self.servers, self)
Пример #39
0
 def cleanup(self):
     rest = RestConnection(self.master)
     rest.stop_rebalance()
     BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self)
     for server in self.servers:
         ClusterOperationHelper.cleanup_cluster([server])
     ClusterOperationHelper.wait_for_ns_servers_or_assert(
         self.servers, self)
Пример #40
0
 def common_setUp(self, with_buckets):
     ClusterOperationHelper.cleanup_cluster(self.servers)
     server = self.servers[0]
     if with_buckets:
         BucketOperationHelper.delete_all_buckets_or_assert(self.servers, test_case=self)
         ok = BucketOperationHelper.create_multiple_buckets(server, 1)
         if not ok:
             self.fail("unable to create multiple buckets on this node : {0}".format(server))
 def tearDown(self):
     self.log.info(
         "==============  QueryCollectionsEnd2EndTests tearDown has started =============="
     )
     BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self)
     super(QueryCollectionsEnd2EndTests, self).tearDown()
     self.log.info(
         "==============  QueryCollectionsEnd2EndTests tearDown has completed =============="
     )
Пример #42
0
 def tearDown(self):
     #super(Upgrade_EpTests, self).tearDown()
     self.testcase = '2'
     if not "skip_cleanup" in TestInputSingleton.input.test_params:
         BucketOperationHelper.delete_all_buckets_or_assert(
             self.servers, self.testcase)
         ClusterOperationHelper.cleanup_cluster(self.servers)
         ClusterOperationHelper.wait_for_ns_servers_or_assert(
             self.servers, self.testcase)
Пример #43
0
    def common_tearDown(servers, testcase):
        RemoteUtilHelper.common_basic_setup(servers)
        log = logger.Logger.get_logger()
        log.info("10 seconds delay to wait for membase-server to start")
        time.sleep(10)

        BucketOperationHelper.delete_all_buckets_or_assert(servers, testcase)
        ClusterOperationHelper.cleanup_cluster(servers)
        ClusterHelper.wait_for_ns_servers_or_assert(servers, testcase)
Пример #44
0
 def tearDown(self):
     #super(Upgrade_EpTests, self).tearDown()
     self.testcase = '2'
     if not "skip_cleanup" in TestInputSingleton.input.test_params:
         BucketOperationHelper.delete_all_buckets_or_assert(
             self.servers, self.testcase)
         ClusterOperationHelper.cleanup_cluster(self.servers)
         ClusterOperationHelper.wait_for_ns_servers_or_assert(
             self.servers, self.testcase)
Пример #45
0
 def tearDown(self):
     super(BucketConfig, self).tearDown()
     return
     if not "skip_cleanup" in TestInputSingleton.input.test_params:
         BucketOperationHelper.delete_all_buckets_or_assert(
             self.servers, self.testcase)
         ClusterOperationHelper.cleanup_cluster(self.servers)
         ClusterOperationHelper.wait_for_ns_servers_or_assert(
             self.servers, self.testcase)
Пример #46
0
 def common_setup(input, testcase):
     log.info("==============  common_setup was started for test #{0} {1}=============="\
                   .format(testcase.case_number, testcase._testMethodName))
     servers = input.servers
     RemoteUtilHelper.common_basic_setup(servers)
     BucketOperationHelper.delete_all_buckets_or_assert(servers, testcase)
     ClusterOperationHelper.cleanup_cluster(servers)
     ClusterOperationHelper.wait_for_ns_servers_or_assert(servers, testcase)
     log.info("==============  common_setup was finished for test #{0} {1} =============="\
                   .format(testcase.case_number, testcase._testMethodName))
Пример #47
0
 def setUp_bucket(self, bucket_name, port, bucket_type, unittest):
     self.log = logger.Logger.get_logger()
     self.input = TestInputSingleton.input
     unittest.assertTrue(self.input, msg="input parameters missing...")
     self.test = unittest
     self.master = self.input.servers[0]
     self.bucket_port = port
     self.bucket_name = bucket_name
     ClusterOperationHelper.cleanup_cluster([self.master])
     BucketOperationHelper.delete_all_buckets_or_assert([self.master], self.test)
     self._create_default_bucket(unittest)
Пример #48
0
 def common_setUp(self):
     ClusterOperationHelper.cleanup_cluster(self.servers)
     BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self)
     for server in self.servers:
         shell = RemoteMachineShellConnection(server)
         shell.stop_membase()
         shell.stop_couchbase()
         shell.start_membase()
         shell.start_couchbase()
         RestHelper(RestConnection(server)).is_ns_server_running(timeout_in_seconds=120)
         shell.disconnect()
Пример #49
0
 def reset(self):
     rest = RestConnection(self.servers[0])
     if rest._rebalance_progress_status() == 'running':
         self.log.warning("rebalancing is still running, test should be verified")
         stopped = rest.stop_rebalance()
         self.assertTrue(stopped, msg="unable to stop rebalance")
     BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self)
     for server in self.servers:
         ClusterOperationHelper.cleanup_cluster([server])
     self.log.info("Stopping load in Teardown")
     ClusterHelper.wait_for_ns_servers_or_assert(self.servers, self)
Пример #50
0
 def cleanup_cluster(self):
     if not "skip_cleanup" in TestInputSingleton.input.test_params:
         # Cleanup all indexes that were create with this helper class
         for name in self._indexes:
             self.rest.delete_spatial(self.bucket, name)
             self.log.info("deleted spatial {0} from bucket {1}".format(
                 name, self.bucket))
         BucketOperationHelper.delete_all_buckets_or_assert(
             self.servers, self.testcase)
         ClusterOperationHelper.cleanup_cluster(self.servers)
         ClusterOperationHelper.wait_for_ns_servers_or_assert(
             self.servers, self.testcase)
Пример #51
0
 def repetitive_create_delete(self):
     self.repetitions = self.input.param("repetition_count", 1)
     self.bufferspace = self.input.param("bufferspace", 6000000)
     # the first front end load
     self._load_all_buckets(self.master, self.gen_create, "create", 0,
                            batch_size=10000, pause_secs=5, timeout_secs=100)
     self._wait_for_stats_all_buckets(self.servers)
     rest = RestConnection(self.servers[0])
     max_data_sizes = {}
     initial_memory_usage = {}
     self.sleep(30)
     for bucket in self.buckets:
         max_data_sizes[bucket.name] = rest.fetch_bucket_stats(bucket=bucket.name)["op"]["samples"]["ep_max_size"][
             -1]
         self.log.info("Initial max_data_size of bucket '{0}': {1}".format(bucket.name, max_data_sizes[bucket.name]))
         initial_memory_usage[bucket.name] = \
             rest.fetch_bucket_stats(bucket=bucket.name)["op"]["samples"]["mem_used"][-1]
         self.log.info("initial memory consumption of bucket '{0}' with load: {1}".format(bucket.name,
                                                                                          initial_memory_usage[
                                                                                              bucket.name]))
     mem_usage = {}
     self.sleep(10)
     # the repetitions
     for i in range(0, self.repetitions):
         BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self)
         del self.buckets[:]
         self.log.info('About to create the buckets')
         self._bucket_creation()
         self.log.info('Done bucket creation, about to load them')
         self._load_all_buckets(self.master, self.gen_create, "create", 0,
                                batch_size=10000, pause_secs=5, timeout_secs=100)
         self.log.info('Buckets are loaded, waiting for stats')
         self._wait_for_stats_all_buckets(self.servers)
         self.log.info('Have the stats, sleeping for 30 seconds')
         self.sleep(60)
         for bucket in self.buckets:
             mem_usage[bucket.name] = rest.fetch_bucket_stats(bucket.name)["op"]["samples"]["mem_used"][-1]
             self.log.info("Memory used after attempt {0} = {1}, Difference from initial snapshot: {2}" \
                           .format(i + 1, mem_usage[bucket.name],
                                   (mem_usage[bucket.name] - initial_memory_usage[bucket.name])))
         self.sleep(10)
     if (self.repetitions > 0):
         self.log.info(
             "After {0} repetitive deletion-creation-load of the buckets, the memory consumption difference is .." \
                 .format(self.repetitions));
         for bucket in self.buckets:
             self.log.info("{0} :: Initial: {1} :: Now: {2} :: Difference: {3}" \
                           .format(bucket.name, initial_memory_usage[bucket.name], mem_usage[bucket.name],
                                   (mem_usage[bucket.name] - initial_memory_usage[bucket.name])))
             msg = "Memory used now, much greater than initial usage!"
             assert mem_usage[bucket.name] <= initial_memory_usage[bucket.name] + self.bufferspace, msg
     else:
         self.log.info("Verification skipped, as there weren't any repetitions..");
Пример #52
0
 def _cleanup_cluster(self):
     """
     Cleaup the cluster. Delete all the buckets in the nodes and remove
     the nodes from any cluster that has been formed.
     :return:
     """
     BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self)
     for node in self.servers:
         master = node
         try:
             ClusterOperationHelper.cleanup_cluster(self.servers,
                                                    master=master)
         except:
             continue
Пример #53
0
 def setUp(self):
     log = logger.Logger.get_logger()
     self._input = TestInputSingleton.input
     self._keys_count = self._input.param("keys_count", DEFAULT_KEY_COUNT)
     self._num_replicas = self._input.param("replica", DEFAULT_REPLICA)
     self.bidirectional = self._input.param("bidirectional", False)
     self.case_number = self._input.param("case_number", 0)
     self._value_size = self._input.param("value_size", 256)
     self.wait_timeout = self._input.param("wait_timeout", 60)
     self._servers = self._input.servers
     self.master = self._servers[0]
     self._failed_nodes = []
     num_buckets = 0
     self.buckets = []
     self.default_bucket = self._input.param("default_bucket", True)
     if self.default_bucket:
         self.default_bucket_name = "default"
         num_buckets += 1
     self._standard_buckets = self._input.param("standard_buckets", 0)
     self._sasl_buckets = self._input.param("sasl_buckets", 0)
     num_buckets += self._standard_buckets + self._sasl_buckets
     self.dgm_run = self._input.param("dgm_run", True)
     self.log = logger.Logger().get_logger()
     self._cluster_helper = Cluster()
     self.disabled_consistent_view = self._input.param(
         "disabled_consistent_view", None)
     self._quota = self._initialize_nodes(self._cluster_helper,
                                          self._servers,
                                          self.disabled_consistent_view)
     if self.dgm_run:
         self.quota = 256
     self.bucket_size = int(
         (2.0 / 3.0) / float(num_buckets) * float(self._quota))
     self.gen_create = BlobGenerator('loadOne',
                                     'loadOne_',
                                     self._value_size,
                                     end=self._keys_count)
     self.add_back_flag = False
     self._cleanup_nodes = []
     log.info("==============  setup was started for test #{0} {1}=============="\
                   .format(self.case_number, self._testMethodName))
     RemoteUtilHelper.common_basic_setup(self._servers)
     BucketOperationHelper.delete_all_buckets_or_assert(self._servers, self)
     for server in self._servers:
         ClusterOperationHelper.cleanup_cluster([server])
     ClusterHelper.wait_for_ns_servers_or_assert(self._servers, self)
     self._setup_cluster()
     self._create_buckets_()
     log.info("==============  setup was finished for test #{0} {1} =============="\
                   .format(self.case_number, self._testMethodName))
Пример #54
0
 def common_tearDown(servers, testcase):
     for server in servers:
         shell = RemoteMachineShellConnection(server)
         shell.start_membase()
     log = logger.Logger.get_logger()
     log.info("10 seconds delay to wait for membase-server to start")
     time.sleep(10)
     ClusterOperationHelper.wait_for_ns_servers_or_assert(servers, testcase)
     try:
         MemcachedClientHelper.flush_bucket(servers[0], 'default', 11211)
     except Exception:
         pass
     ClusterOperationHelper.cleanup_cluster(servers)
     ClusterOperationHelper.wait_for_ns_servers_or_assert(servers, testcase)
     BucketOperationHelper.delete_all_buckets_or_assert(servers, testcase)
Пример #55
0
    def tearDown(self):
        try:
            test_failed = (hasattr(self, '_resultForDoCleanups') and len(self._resultForDoCleanups.failures or self._resultForDoCleanups.errors)) \
                or (hasattr(self, '_exc_info') and self._exc_info()[1] is not None)
            if test_failed and TestInputSingleton.input.param("stop-on-failure", False)\
                    or self.input.param("skip_cleanup", False):
                self.log.warn("CLEANUP WAS SKIPPED")
            else:
                if test_failed and self.input.param('BUGS', False):
                    self.log.warn(
                        "Test failed. Possible reason is: {0}".format(
                            self.input.param('BUGS', False)))

                self.log.info("==============  basetestcase cleanup was started for test #{0} {1} =============="\
                      .format(self.case_number, self._testMethodName))
                rest = RestConnection(self.master)
                alerts = rest.get_alerts()
                if alerts is not None and len(alerts) != 0:
                    self.log.warn("Alerts were found: {0}".format(alerts))
                if rest._rebalance_progress_status() == 'running':
                    self.log.warning(
                        "rebalancing is still running, test should be verified"
                    )
                    stopped = rest.stop_rebalance()
                    self.assertTrue(stopped, msg="unable to stop rebalance")
                BucketOperationHelper.delete_all_buckets_or_assert(
                    self.servers, self)
                if self.input.param("forceEject", False):
                    for server in self.servers:
                        if server != self.servers[0]:
                            try:
                                rest = RestConnection(server)
                                rest.force_eject_node()
                            except BaseException, e:
                                self.log.error(e)
                ClusterOperationHelper.cleanup_cluster(self.servers)
                self.sleep(10)
                ClusterOperationHelper.wait_for_ns_servers_or_assert(
                    self.servers, self)
                self.log.info("==============  basetestcase cleanup was finished for test #{0} {1} =============="\
                      .format(self.case_number, self._testMethodName))
        except BaseException:
            # increase case_number to retry tearDown in setup for the next test
            self.case_number += 1000
        finally:
            # stop all existing task manager threads
            self.cluster.shutdown()
            self._log_finish(self)
 def test_merge_backup_from_old_and_new_bucket_bwc(self):
     """
         1. Create a bucket A
         2. Load docs with key 1
         3. Do backup
         4. Delete bucket A
         5. Re-create bucket A
         6. Load docs with key 2
         7. Do backup
         8. Do merge backup.  Verify backup only contain docs key 2
     """
     gen = BlobGenerator("ent-backup1_", "ent-backup-", self.value_size, end=self.num_items)
     self._load_all_buckets(self.master, gen, "create", 0)
     self.log.info("Start doing backup")
     self.backup_create()
     self.backup_cluster()
     if self.bucket_delete:
         self.log.info("Start to delete bucket")
         BucketOperationHelper.delete_all_buckets_or_assert([self.master], self)
         BucketOperationHelper.create_bucket(serverInfo=self.master, test_case=self)
     elif self.bucket_flush:
         self.log.info("Start to flush bucket")
         self._all_buckets_flush()
     gen = BlobGenerator("ent-backup2_", "ent-backup-", self.value_size, end=self.num_items)
     self.log.info("Start to load bucket again with different key")
     self._load_all_buckets(self.master, gen, "create", 0)
     self.backup_cluster()
     self.backupset.number_of_backups += 1
     status, output, message = self.backup_list()
     if not status:
         self.fail(message)
     self.log.info("Start to merge backup")
     self.backupset.start = randrange(1, self.backupset.number_of_backups)
     self.backupset.end = randrange(self.backupset.start,
                                    self.backupset.number_of_backups + 1)
     self.merged = True
     result, output, _ = self.backup_merge()
     self.backupset.end -= 1
     status, output, message = self.backup_list()
     if not status:
         self.fail(message)
     current_vseqno = self.get_vbucket_seqnos(self.cluster_to_backup, self.buckets,
                                              self.skip_consistency, self.per_node)
     self.log.info("*** Start to validate data in merge backup ")
     self.validate_backup_data(self.backupset.backup_host, [self.master],
                               "ent-backup", False, False, "memory",
                               self.num_items, "ent-backup1")
     self.backup_cluster_validate(skip_backup=True)
Пример #57
0
 def common_tearDown(servers, testcase):
     RemoteUtilHelper.common_basic_setup(servers)
     log = logger.Logger.get_logger()
     log.info("10 seconds delay to wait for couchbase-server to start")
     time.sleep(10)
     ClusterOperationHelper.wait_for_ns_servers_or_assert(servers, testcase)
     try:
         rest = RestConnection(self._servers[0])
         buckets = rest.get_buckets()
         for bucket in buckets:
             MemcachedClientHelper.flush_bucket(servers[0], bucket.name)
     except Exception:
         pass
     BucketOperationHelper.delete_all_buckets_or_assert(servers, testcase)
     ClusterOperationHelper.cleanup_cluster(servers)
     ClusterOperationHelper.wait_for_ns_servers_or_assert(servers, testcase)
Пример #58
0
    def setUp(self):
        self.log = logger.Logger.get_logger()
        self.master = TestInputSingleton.input.servers[0]
        ClusterOperationHelper.cleanup_cluster([self.master])
        BucketOperationHelper.delete_all_buckets_or_assert([self.master], self)

        self._bucket_name = 'default'

        serverInfo = self.master
        rest = RestConnection(serverInfo)
        info = rest.get_nodes_self()
        self._bucket_port = info.moxi
        rest.init_cluster(username=serverInfo.rest_username,
                          password=serverInfo.rest_password)
        rest.init_cluster_memoryQuota(memoryQuota=info.mcdMemoryReserved)
        bucket_ram = info.memoryQuota * 2 / 3

        # Add built-in user
        testuser = [{
            'id': 'cbadminbucket',
            'name': 'cbadminbucket',
            'password': '******'
        }]
        RbacBase().create_user_source(testuser, 'builtin', self.master)
        time.sleep(10)

        # Assign user to role
        role_list = [{
            'id': 'cbadminbucket',
            'name': 'cbadminbucket',
            'roles': 'admin'
        }]
        RbacBase().add_user_role(role_list, RestConnection(self.master),
                                 'builtin')
        time.sleep(10)

        rest.create_bucket(bucket=self._bucket_name,
                           ramQuotaMB=bucket_ram,
                           proxyPort=info.memcached)
        msg = 'create_bucket succeeded but bucket "default" does not exist'
        self.assertTrue(BucketOperationHelper.wait_for_bucket_creation(
            self._bucket_name, rest),
                        msg=msg)
        ready = BucketOperationHelper.wait_for_memcached(
            serverInfo, self._bucket_name)
        self.assertTrue(ready, "wait_for_memcached failed")
        self._log_start()
Пример #59
0
    def setUp(self):
        super(SGConfigTests, self).setUp()
        for server in self.servers:
            if self.case_number == 1:
                with open(
                        'pytests/sg/resources/gateway_config_walrus_template.json',
                        'r') as file:
                    filedata = file.read()
                    filedata = filedata.replace('LOCAL_IP', server.ip)
                with open('pytests/sg/resources/gateway_config_walrus.json',
                          'w') as file:
                    file.write(filedata)
                shell = RemoteMachineShellConnection(server)
                shell.execute_command("rm -rf {0}/tmp/*".format(
                    self.folder_prefix))
                shell.copy_files_local_to_remote(
                    'pytests/sg/resources',
                    '{0}/tmp'.format(self.folder_prefix))
                # will install sg only the first time
                self.install(shell)
                pid = self.is_sync_gateway_process_running(shell)
                self.assertNotEqual(pid, 0)
                exist = shell.file_exists(
                    '{0}/tmp/'.format(self.folder_prefix), 'gateway.log')
                self.assertTrue(exist)
                shell.disconnect()
        if self.case_number == 1:
            shutil.copy2('pytests/sg/resources/gateway_config_backup.json',
                         'pytests/sg/resources/gateway_config.json')
            BucketOperationHelper.delete_all_buckets_or_assert(
                self.servers, self)
            self.cluster = Cluster()
            shared_params = self._create_bucket_params(server=self.master,
                                                       size=150)
            self.cluster.create_default_bucket(shared_params)
            task = self.cluster.async_create_sasl_bucket(
                name='test_%E-.5',
                password='******',
                bucket_params=shared_params)
            task.result()
            task = self.cluster.async_create_standard_bucket(
                name='db', port=11219, bucket_params=shared_params)

            task.result()