Exemple #1
0
 def setUp(self):
     super(NodeServiceTests, self).setUp()
     self.helper = ServerHelper(self)
     num_buckets = self.input.param("num_buckets", 1)
     compression = self.input.param("sdk_compression", True)
     for i in range(num_buckets):
         RestConnection(self.servers[0]).create_bucket(
             bucket='bucket%s' % i,
             ramQuotaMB=100,
             proxyPort=STANDARD_BUCKET_PORT + i + 1)
         gen_load = BlobGenerator('ui', 'ui-', 256, start=0, end=10)
         cluster = Cluster()
         try:
             gen = copy.deepcopy(gen_load)
             task = cluster.async_load_gen_docs(self.servers[0],
                                                'bucket%s' % i,
                                                gen,
                                                Bucket().kvs[1],
                                                'create',
                                                0,
                                                0,
                                                True,
                                                1,
                                                1,
                                                30,
                                                compression=compression)
             task.result()
         finally:
             cluster.shutdown()
     BaseHelper(self).login()
 def setUp(self):
     super(compression, self).setUp()
     self.src_cluster = self.get_cb_cluster_by_name('C1')
     self.src_master = self.src_cluster.get_master_node()
     self.dest_cluster = self.get_cb_cluster_by_name('C2')
     self.dest_master = self.dest_cluster.get_master_node()
     self.cluster = Cluster()
Exemple #3
0
    def setUp(self):
        self.helper = SpatialHelper(self, "default")
        super(SpatialQueryTests, self).setUp()
        self.log = logger.Logger.get_logger()

        self.helper.setup_cluster()
        self.cluster = Cluster()
        self.servers = self.helper.servers
Exemple #4
0
 def setUp(self):
     super(BaseSecondaryIndexingTests, self).setUp()
     self.index_lost_during_move_out = []
     self.verify_using_index_status = self.input.param(
         "verify_using_index_status", False)
     self.use_replica_when_active_down = self.input.param(
         "use_replica_when_active_down", True)
     self.use_where_clause_in_index = self.input.param(
         "use_where_clause_in_index", False)
     self.scan_consistency = self.input.param("scan_consistency",
                                              "request_plus")
     self.scan_vector_per_values = self.input.param(
         "scan_vector_per_values", None)
     self.timeout_for_index_online = self.input.param(
         "timeout_for_index_online", 600)
     self.verify_query_result = self.input.param("verify_query_result",
                                                 True)
     self.verify_explain_result = self.input.param("verify_explain_result",
                                                   True)
     self.defer_build = self.input.param("defer_build", True)
     self.run_query_with_explain = self.input.param(
         "run_query_with_explain", True)
     self.run_query = self.input.param("run_query", True)
     self.graceful = self.input.param("graceful", False)
     self.groups = self.input.param("groups", "all").split(":")
     self.use_rest = self.input.param("use_rest", False)
     if not self.use_rest:
         query_definition_generator = SQLDefinitionGenerator()
         if self.dataset == "default" or self.dataset == "employee":
             self.query_definitions = query_definition_generator.generate_employee_data_query_definitions(
             )
         if self.dataset == "simple":
             self.query_definitions = query_definition_generator.generate_simple_data_query_definitions(
             )
         if self.dataset == "sabre":
             self.query_definitions = query_definition_generator.generate_sabre_data_query_definitions(
             )
         if self.dataset == "bigdata":
             self.query_definitions = query_definition_generator.generate_big_data_query_definitions(
             )
         if self.dataset == "array":
             self.query_definitions = query_definition_generator.generate_airlines_data_query_definitions(
             )
         self.query_definitions = query_definition_generator.filter_by_group(
             self.groups, self.query_definitions)
     self.ops_map = self._create_operation_map()
     self.find_nodes_in_list()
     self.generate_map_nodes_out_dist()
     self.memory_create_list = []
     self.memory_drop_list = []
     self.skip_cleanup = self.input.param("skip_cleanup", False)
     self.index_loglevel = self.input.param("index_loglevel", None)
     if self.index_loglevel:
         self.set_indexer_logLevel(self.index_loglevel)
     if self.dgm_run:
         self._load_doc_data_all_buckets(gen_load=self.gens_load)
     self.gsi_thread = Cluster()
     self.defer_build = self.defer_build and self.use_gsi_for_secondary
 def replace_services(self, servers, server, services):
     """ Changes the services of a server
     """
     # Remove server and rebalance
     Cluster().rebalance(servers, [], [server])
     # Add back with new services and rebalance
     Cluster().rebalance(servers, [server], [], services=services)
     # A little sleep for services to warmup
     self.sleep(15)
Exemple #6
0
 def setUp(self):
     super(nwusage, self).setUp()
     self.src_cluster = self.get_cb_cluster_by_name('C1')
     self.src_master = self.src_cluster.get_master_node()
     self.dest_cluster = self.get_cb_cluster_by_name('C2')
     self.dest_master = self.dest_cluster.get_master_node()
     self.cluster = Cluster()
     self.num_src_nodes = len(self.src_cluster.get_nodes())
     self.num_dest_nodes = len(self.dest_cluster.get_nodes())
class SwapRebalanceBase(unittest.TestCase):

    @staticmethod
    def common_setup(self):
        self.log = logger.Logger.get_logger()
        self.cluster_run = False
        self.input = TestInputSingleton.input
        self.servers = self.input.servers
        serverInfo = self.servers[0]
        rest = RestConnection(serverInfo)
        if len(set([server.ip for server in self.servers])) == 1:
            ip = rest.get_nodes_self().ip
            for server in self.servers:
                server.ip = ip
            self.cluster_run = True
        self.case_number = self.input.param("case_number", 0)
        self.replica = self.input.param("replica", 1)
        self.keys_count = self.input.param("keys-count", 1000)
        self.load_ratio = self.input.param("load-ratio", 1)
        self.ratio_expiry = self.input.param("ratio-expiry", 0.03)
        self.ratio_deletes = self.input.param("ratio-deletes", 0.13)
        self.num_buckets = self.input.param("num-buckets", 1)
        self.failover_factor = self.num_swap = self.input.param("num-swap", 1)
        self.num_initial_servers = self.input.param("num-initial-servers", 3)
        self.fail_orchestrator = self.swap_orchestrator = self.input.param("swap-orchestrator", False)
        self.do_access = self.input.param("do-access", True)
        self.load_started = False
        self.loaders = []
        try:
            # Clear the state from Previous invalid run
            if rest._rebalance_progress_status() == 'running':
                self.log.warning("rebalancing is still running, previous test should be verified")
                stopped = rest.stop_rebalance()
                self.assertTrue(stopped, msg="unable to stop rebalance")
            self.log.info("==============  SwapRebalanceBase setup was started for test #{0} {1}=============="\
                      .format(self.case_number, self._testMethodName))
            SwapRebalanceBase.reset(self)
            self.cluster_helper = Cluster()

            # Make sure the test is setup correctly
            min_servers = int(self.num_initial_servers) + int(self.num_swap)
            msg = "minimum {0} nodes required for running swap rebalance"
            self.assertTrue(len(self.servers) >= min_servers, msg=msg.format(min_servers))

            self.log.info('picking server : {0} as the master'.format(serverInfo))
            node_ram_ratio = BucketOperationHelper.base_bucket_ratio(self.servers)
            info = rest.get_nodes_self()
            rest.init_cluster(username=serverInfo.rest_username, password=serverInfo.rest_password)
            rest.init_cluster_memoryQuota(memoryQuota=int(info.mcdMemoryReserved * node_ram_ratio))
            if self.num_buckets > 10:
                BaseTestCase.change_max_buckets(self, self.num_buckets)
            self.log.info("==============  SwapRebalanceBase setup was finished for test #{0} {1} =============="
                      .format(self.case_number, self._testMethodName))
            SwapRebalanceBase._log_start(self)
        except Exception, e:
            self.cluster_helper.shutdown()
            self.fail(e)
 def setUp(self):
     super(compression, self).setUp()
     self.src_cluster = self.get_cb_cluster_by_name('C1')
     self.src_master = self.src_cluster.get_master_node()
     self.dest_cluster = self.get_cb_cluster_by_name('C2')
     self.dest_master = self.dest_cluster.get_master_node()
     self.chain_length = self._input.param("chain_length", 2)
     self.topology = self._input.param("ctopology", "chain")
     if self.chain_length > 2:
         self.c3_cluster = self.get_cb_cluster_by_name('C3')
         self.c3_master = self.c3_cluster.get_master_node()
     self.cluster = Cluster()
def main():
    try:
        (opts, args) = getopt.getopt(sys.argv[1:], 'hi:p', [])
        for o, a in opts:
            if o == "-h":
                usage()

        input = TestInput.TestInputParser.get_test_input(sys.argv)
        if not input.servers:
            usage("ERROR: no servers specified. Please use the -i parameter.")
    except IndexError:
        usage()
    except getopt.GetoptError as error:
        usage("ERROR: " + str(error))

    docs_per_day = input.param("doc_per_day", 49)
    years = input.param("years", 2)
    bucket_name = input.param("bucket_name", "default")
    bucket_port = input.param("bucket_port", None)
    bucket_sasl_pass = input.param("bucket_sasl_pass", None)
    flag = input.param("flags", 0)
    to_directory = input.param("to_dir", '')
    loader_type = input.param("loader_type", 'default')

    if to_directory:
        loader = DocLoaderDirectory(input.servers[0], to_directory,
                                    bucket_name)
        generators_load = loader.generate_docs(docs_per_day, years)
        loader.load(generators_load)
    else:
        cluster = Cluster()
        try:
            bucket = initialize_bucket(bucket_name, bucket_port,
                                       bucket_sasl_pass)
            if loader_type == 'default':
                loader = DocLoaderCouchbase(input.servers, cluster)
            elif loader_type == 'join':
                loader = JoinDocLoader(input.servers, cluster)
            elif loader_type == 'sabre':
                loader = SabreDocLoader(input.servers, cluster)
            elif loader_type == 'base64':
                loader = Base64DocLoader(input.servers, cluster)
            elif loader_type == 'nondoc':
                loader = NonDocLoader(input.servers, cluster)
            generators_load = loader.generate_docs(docs_per_day, years)
            loader.load(generators_load, bucket, flag=flag)
        finally:
            cluster.shutdown()
Exemple #10
0
 def setUp(self):
     super(nwusage, self).setUp()
     self.src_cluster = self.get_cb_cluster_by_name('C1')
     self.src_master = self.src_cluster.get_master_node()
     self.dest_cluster = self.get_cb_cluster_by_name('C2')
     self.dest_master = self.dest_cluster.get_master_node()
     self.cluster = Cluster()
 def setUp(self):
     super(SGConfigTests, self).setUp()
     for server in self.servers:
         if self.case_number == 1:
             with open('pytests/sg/resources/gateway_config_walrus_template.json', 'r') as file:
                 filedata = file.read()
                 filedata = filedata.replace('LOCAL_IP', server.ip)
             with open('pytests/sg/resources/gateway_config_walrus.json', 'w') as file:
                 file.write(filedata)
             shell = RemoteMachineShellConnection(server)
             shell.execute_command("rm -rf {0}/tmp/*".format(self.folder_prefix))
             shell.copy_files_local_to_remote('pytests/sg/resources', '{0}/tmp'.format(self.folder_prefix))
             # will install sg only the first time
             self.install(shell)
             pid = self.is_sync_gateway_process_running(shell)
             self.assertNotEqual(pid, 0)
             exist = shell.file_exists('{0}/tmp/'.format(self.folder_prefix), 'gateway.log')
             self.assertTrue(exist)
             shell.disconnect()
     if self.case_number == 1:
         shutil.copy2('pytests/sg/resources/gateway_config_backup.json', 'pytests/sg/resources/gateway_config.json')
         BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self)
         self.cluster = Cluster()
         self.cluster.create_default_bucket(self.master, 150)
         task = self.cluster.async_create_sasl_bucket(self.master, 'test_%E-.5', 'password', 150, 1)
         task.result()
         task = self.cluster.async_create_standard_bucket(self.master, 'db', 11219, 150, 1)
         task.result()
Exemple #12
0
    def test_load_collections_in_bucket(self):
        import time
        start = time.time()
        self.scope_num = self.input.param("num_scopes", 2)
        self.collection_num = self.input.param("num_collections", 2)
        self.bucket_name = self.input.param("bucket", self.default_bucket_name)
        try:
            self.rest.async_create_scope_collection(self.scope_num,
                                                    self.collection_num,
                                                    self.bucket_name)
        except:
            pass
        create = time.time()
        self.log.info(
            "{} scopes with {} collections each created in {} s".format(
                self.scope_num, self.collection_num, round(create - start)))
        time.sleep(5)

        self.enable_bloom_filter = self.input.param("enable_bloom_filter",
                                                    False)
        self.buckets = self.conn.get_buckets()
        self.cluster = Cluster()
        self.active_resident_threshold = 100

        self.gen_create = SDKDataLoader(num_ops=self.num_items,
                                        percent_create=80,
                                        percent_update=20,
                                        percent_delete=20)
        self._load_all_buckets(self.master, self.gen_create)
        load = time.time()
        self.log.info("Done loading {} collections in bucket {} in {}s".format(
            self.collection_num, self.bucket_name, round(load - create)))
        for bkt in self.buckets:
            print(self.stat.get_collection_stats(bkt))
Exemple #13
0
 def test_multiple_backups_merge_with_tombstoning(self):
     self.log.info("*** start to load items to all buckets")
     self.expected_error = self.input.param("expected_error", None)
     if int(self.active_resident_threshold) > 0:
         self.log.info("Disable compaction to speed up dgm")
         RestConnection(self.master).disable_auto_compaction()
     if self.expires:
         for bucket in self.buckets:
             cb = self._get_python_sdk_client(self.master.ip, bucket)
             for i in range(1, self.num_items + 1):
                 cb.upsert("doc" + str(i), {"key": "value"})
     else:
         self._load_all_buckets(self.master, self.initial_load_gen,
                                "create", self.expires)
     self.log.info("*** done to load items to all buckets")
     self.backup_create_validate()
     self.backup()
     self.set_meta_purge_interval()
     self._load_all_buckets(self.master, self.delete_gen, "delete",
                            self.expires)
     self.sleep(
         360, "Sleep for 6 minutes for the meta-data purge "
         "interval to be completed")
     self.compact_buckets()
     self.backup()
     self.backupset.start = 1
     self.backupset.end = len(self.backups)
     self.merge()
     start = self.number_of_backups_taken
     end = self.number_of_backups_taken
     if self.reset_restore_cluster:
         self.log.info("*** start to reset cluster")
         self.backup_reset_clusters(self.cluster_to_restore)
         if self.same_cluster:
             self._initialize_nodes(Cluster(),
                                    self.servers[:self.nodes_init])
         else:
             self._initialize_nodes(
                 Cluster(), self.input.clusters[0][:self.nodes_init])
         self.log.info("Done reset cluster")
     self.sleep(10)
     self.backupset.start = start
     self.backupset.end = end
     self.log.info("*** start restore validation")
     self.backup_restore_validate(compare_uuid=False,
                                  seqno_compare_function=">=",
                                  expected_error=self.expected_error)
 def setUp(self):
     super(NodeServiceTests, self).setUp()
     self.helper = ServerHelper(self)
     num_buckets = self.input.param("num_buckets", 1)
     for i in xrange(num_buckets):
         RestConnection(self.servers[0]).create_bucket(bucket='bucket%s' % i, ramQuotaMB=100, proxyPort=STANDARD_BUCKET_PORT + i + 1)
         gen_load = BlobGenerator('ui', 'ui-', 256, start=0, end=10)
         cluster = Cluster()
         try:
             gen = copy.deepcopy(gen_load)
             task = cluster.async_load_gen_docs(self.servers[0], 'bucket%s' % i, gen,
                                                Bucket().kvs[1], 'create',
                                                0, 0, True, 1, 1, 30)
             task.result()
         finally:
             cluster.shutdown()
     BaseHelper(self).login()
    def setUp(self):
        self.helper = SpatialHelper(self, "default")
        super(SpatialQueryTests, self).setUp()
        self.log = logger.Logger.get_logger()

        self.helper.setup_cluster()
        self.cluster = Cluster()
        self.servers = self.helper.servers
Exemple #16
0
    def test_multiple_backups_merges(self):
        self.log.info("*** start to load items to all buckets")
        self.expected_error = self.input.param("expected_error", None)
        if int(self.active_resident_threshold) > 0:
            self.log.info("Disable compaction to speed up dgm")
            RestConnection(self.master).disable_auto_compaction()
        if self.expires:
            for bucket in self.buckets:
                cb = self._get_python_sdk_client(self.master.ip, bucket,
                                                 self.backupset.cluster_host)
                for i in range(1, self.num_items + 1):
                    cb.upsert("doc" + str(i), {"key": "value"})
        else:
            self._load_all_buckets(self.master, self.initial_load_gen,
                                   "create", self.expires)
        self.log.info("*** done to load items to all buckets")
        self.backup_create_validate()
        for i in range(1, self.number_of_repeats + 1):
            self.do_backup_merge_actions()
        start = self.number_of_backups_taken
        end = self.number_of_backups_taken
        if self.backup_corrupted:
            self.log.info(
                "Stop restore due to backup files corrupted as intended")
            return
        if self.reset_restore_cluster:
            self.log.info("*** start to reset cluster")
            self.backup_reset_clusters(self.cluster_to_restore)
            if self.same_cluster:
                self._initialize_nodes(Cluster(),
                                       self.servers[:self.nodes_init])
            else:
                shell = RemoteMachineShellConnection(self.input.clusters[0][0])
                shell.enable_diag_eval_on_non_local_hosts()
                shell.disconnect()
                rest = RestConnection(self.input.clusters[0][0])
                rest.force_eject_node()
                master_services = self.get_services(
                    [self.backupset.cluster_host],
                    self.services_init,
                    start_node=0)
                info = rest.get_nodes_self()
                if info.memoryQuota and int(info.memoryQuota) > 0:
                    self.quota = info.memoryQuota
                rest.init_node()
            self.log.info("Done reset cluster")
        self.sleep(10)
        """ Add built-in user cbadminbucket to second cluster """
        self.add_built_in_server_user(
            node=self.input.clusters[0][:self.nodes_init][0])

        self.backupset.start = start
        self.backupset.end = end
        self.log.info("*** start restore validation")
        self.backup_restore_validate(compare_uuid=False,
                                     seqno_compare_function=">=",
                                     expected_error=self.expected_error)
Exemple #17
0
 def setUp(self):
     super(QueryMagmaTests, self).setUp()
     self.log.info(
         "==============  QueryMagmaTests setup has started ==============")
     self.bucket_name = self.input.param("bucket", self.default_bucket_name)
     self.active_resident_threshold = self.input.param(
         "resident_ratio", 100)
     self.num_items = self.input.param("num_items", 10000)
     self.expiry = self.input.param("expiry", 0)
     self.rollback = self.input.param("rollback", False)
     self.conn = RestConnection(self.master)
     self.stat = CollectionsStats(self.master)
     self.cbqpath = '{0}cbq -quiet -u {1} -p {2} -e=localhost:8093 '.format(
         self.path, self.username, self.password)
     self.cluster = Cluster()
     self.log.info(
         "==============  QueryMagmaTests setup has completed =============="
     )
Exemple #18
0
 def setUp(self):
     super(Capi, self).setUp()
     self.cluster = Cluster()
     self.src_cluster = self.get_cb_cluster_by_name('C1')
     self.src_master = self.src_cluster.get_master_node()
     self.dest_cluster = self.get_cb_cluster_by_name('C2')
     self.dest_master = self.dest_cluster.get_master_node()
     self.use_hostnames = self._input.param("use_hostnames", False)
     self.src_init = self._input.param('src_init', 2)
     self.dest_init = self._input.param('dest_init', 1)
     self.product = self._input.param('product', 'couchbase-server')
     self.initial_version = self._input.param('initial_version', '2.5.1-1083')
     self.initial_vbuckets = self._input.param('initial_vbuckets', 1024)
     self.init_nodes = self._input.param('init_nodes', True)
     self.initial_build_type = self._input.param('initial_build_type', None)
     self.upgrade_build_type = self._input.param('upgrade_build_type', self.initial_build_type)
     self.master = self.src_master
     self.rest = RestConnection(self.src_master)
Exemple #19
0
    def setUp(self):
        super(SGConfigTests, self).setUp()
        for server in self.servers:
            if self.case_number == 1:
                with open(
                        'pytests/sg/resources/gateway_config_walrus_template.json',
                        'r') as file:
                    filedata = file.read()
                    filedata = filedata.replace('LOCAL_IP', server.ip)
                with open('pytests/sg/resources/gateway_config_walrus.json',
                          'w') as file:
                    file.write(filedata)
                shell = RemoteMachineShellConnection(server)
                shell.execute_command("rm -rf {0}/tmp/*".format(
                    self.folder_prefix))
                shell.copy_files_local_to_remote(
                    'pytests/sg/resources',
                    '{0}/tmp'.format(self.folder_prefix))
                # will install sg only the first time
                self.install(shell)
                pid = self.is_sync_gateway_process_running(shell)
                self.assertNotEqual(pid, 0)
                exist = shell.file_exists(
                    '{0}/tmp/'.format(self.folder_prefix), 'gateway.log')
                self.assertTrue(exist)
                shell.disconnect()
        if self.case_number == 1:
            shutil.copy2('pytests/sg/resources/gateway_config_backup.json',
                         'pytests/sg/resources/gateway_config.json')
            BucketOperationHelper.delete_all_buckets_or_assert(
                self.servers, self)
            self.cluster = Cluster()
            shared_params = self._create_bucket_params(server=self.master,
                                                       size=150)
            self.cluster.create_default_bucket(shared_params)
            task = self.cluster.async_create_sasl_bucket(
                name='test_%E-.5',
                password='******',
                bucket_params=shared_params)
            task.result()
            task = self.cluster.async_create_standard_bucket(
                name='db', port=11219, bucket_params=shared_params)

            task.result()
Exemple #20
0
    def test_multiple_backups_merges(self):
        if self.data_type == "binary":
            gen = BlobGenerator("ent-backup",
                                "ent-backup-",
                                self.value_size,
                                end=self.num_items)
        elif self.data_type == "json":
            gen = DocumentGenerator("ent-backup",
                                    '{{"key":"value"}}',
                                    xrange(100),
                                    start=0,
                                    end=self.num_items)
        self.log.info("*** start to load items to all buckets")
        self.expected_error = self.input.param("expected_error", None)
        self._load_all_buckets(self.master, gen, "create", self.expires)
        self.log.info("*** done to load items to all buckets")
        self.backup_create_validate()
        for i in range(1, self.number_of_repeats + 1):
            self.do_backup_merge_actions()
        start = self.number_of_backups_taken
        end = self.number_of_backups_taken
        if self.reset_restore_cluster:
            self.log.info("*** start to reset cluster")
            self.backup_reset_clusters(self.cluster_to_restore)
            if self.same_cluster:
                self._initialize_nodes(Cluster(),
                                       self.servers[:self.nodes_init])
            else:
                self._initialize_nodes(
                    Cluster(), self.input.clusters[0][:self.nodes_init])
            self.log.info("Done reset cluster")
        self.sleep(10)
        """ Add built-in user cbadminbucket to second cluster """
        self.add_built_in_server_user(
            node=self.input.clusters[0][:self.nodes_init][0])

        self.backupset.start = start
        self.backupset.end = end
        self.log.info("*** start restore validation")
        self.backup_restore_validate(compare_uuid=False,
                                     seqno_compare_function=">=",
                                     expected_error=self.expected_error)
Exemple #21
0
 def setUp(self):
     super(BaseSecondaryIndexingTests, self).setUp()
     self.initial_stats = None
     self.final_stats = None
     self.index_lost_during_move_out =[]
     self.verify_using_index_status = self.input.param("verify_using_index_status",False)
     self.use_replica_when_active_down = self.input.param("use_replica_when_active_down",True)
     self.use_where_clause_in_index= self.input.param("use_where_clause_in_index",False)
     self.check_stats= self.input.param("check_stats",True)
     self.create_index_usage= self.input.param("create_index_usage","no_usage")
     self.scan_consistency= self.input.param("scan_consistency","request_plus")
     self.scan_vector_per_values= self.input.param("scan_vector_per_values",None)
     self.timeout_for_index_online= self.input.param("timeout_for_index_online",600)
     self.max_attempts_check_index= self.input.param("max_attempts_check_index",10)
     self.max_attempts_query_and_validate= self.input.param("max_attempts_query_and_validate",10)
     self.index_present= self.input.param("index_present",True)
     self.run_create_index= self.input.param("run_create_index",True)
     self.verify_query_result= self.input.param("verify_query_result",True)
     self.verify_explain_result= self.input.param("verify_explain_result",True)
     self.defer_build= self.input.param("defer_build",True)
     self.deploy_on_particular_node= self.input.param("deploy_on_particular_node",None)
     self.run_drop_index= self.input.param("run_drop_index",True)
     self.run_query_with_explain= self.input.param("run_query_with_explain",True)
     self.run_query= self.input.param("run_query",True)
     self.graceful = self.input.param("graceful",False)
     self.groups = self.input.param("groups", "all").split(":")
     self.use_rest = self.input.param("use_rest", False)
     if not self.use_rest:
         query_definition_generator = SQLDefinitionGenerator()
         if self.dataset == "default" or self.dataset == "employee":
             self.query_definitions = query_definition_generator.generate_employee_data_query_definitions()
         if self.dataset == "simple":
             self.query_definitions = query_definition_generator.generate_simple_data_query_definitions()
         if self.dataset == "sabre":
             self.query_definitions = query_definition_generator.generate_sabre_data_query_definitions()
         if self.dataset == "bigdata":
             self.query_definitions = query_definition_generator.generate_big_data_query_definitions()
         self.query_definitions = query_definition_generator.filter_by_group(self.groups, self.query_definitions)
     self.ops_map = self._create_operation_map()
     #self.log.info(self.ops_map)
     self.find_nodes_in_list()
     self.generate_map_nodes_out_dist()
     self.memory_create_list = []
     self.memory_drop_list = []
     self.n1ql_node = self.get_nodes_from_services_map(service_type = "n1ql")
     self.skip_cleanup = self.input.param("skip_cleanup", False)
     self.index_loglevel = self.input.param("index_loglevel", None)
     if self.index_loglevel:
         self.set_indexer_logLevel(self.index_loglevel)
     if self.dgm_run:
         self._load_doc_data_all_buckets(gen_load=self.gens_load)
     self.gsi_thread = Cluster()
     self.index_op = self.input.param("index_op", None)
     self.defer_build = self.defer_build and self.use_gsi_for_secondary
 def setUp(self):
     super(compression, self).setUp()
     self.src_cluster = self.get_cb_cluster_by_name('C1')
     self.src_master = self.src_cluster.get_master_node()
     self.dest_cluster = self.get_cb_cluster_by_name('C2')
     self.dest_master = self.dest_cluster.get_master_node()
     self.chain_length = self._input.param("chain_length", 2)
     self.topology = self._input.param("ctopology", "chain")
     if self.chain_length > 2:
         self.c3_cluster = self.get_cb_cluster_by_name('C3')
         self.c3_master = self.c3_cluster.get_master_node()
     self.cluster = Cluster()
Exemple #23
0
 def wait_for_replication(servers, cluster_helper=None, timeout=600):
     if cluster_helper is None:
         cluster = Cluster()
     else:
         cluster = cluster_helper
     tasks = []
     rest = RestConnection(servers[0])
     buckets = rest.get_buckets()
     for server in servers:
         for bucket in buckets:
             for server_repl in list(set(servers) - {server}):
                 tasks.append(
                     cluster.async_wait_for_stats(
                         [server], bucket, 'tap',
                         'eq_tapq:replication_ns_1@' + server_repl.ip +
                         ':idle', '==', 'true'))
                 tasks.append(
                     cluster.async_wait_for_stats(
                         [server], bucket, 'tap',
                         'eq_tapq:replication_ns_1@' + server_repl.ip +
                         ':backfill_completed', '==', 'true'))
     try:
         for task in tasks:
             task.result(timeout)
     finally:
         if cluster_helper is None:
             # stop all newly created task manager threads
             cluster.shutdown()
         return True
Exemple #24
0
    def test_multiple_backups_merges(self):
        self.log.info("*** start to load items to all buckets")
        self.expected_error = self.input.param("expected_error", None)
        if self.expires:
            for bucket in self.buckets:
                cb = self._get_python_sdk_client(self.master.ip, bucket,
                                                 self.backupset.cluster_host)
                for i in range(1, self.num_items + 1):
                    cb.upsert("doc" + str(i), {"key": "value"})
        else:
            self._load_all_buckets(self.master, self.initial_load_gen,
                                   "create", self.expires)
        self.log.info("*** done to load items to all buckets")
        self.backup_create_validate()
        for i in range(1, self.number_of_repeats + 1):
            self.do_backup_merge_actions()
        start = self.number_of_backups_taken
        end = self.number_of_backups_taken
        if self.reset_restore_cluster:
            self.log.info("*** start to reset cluster")
            self.backup_reset_clusters(self.cluster_to_restore)
            if self.same_cluster:
                self._initialize_nodes(Cluster(),
                                       self.servers[:self.nodes_init])
            else:
                self._initialize_nodes(
                    Cluster(), self.input.clusters[0][:self.nodes_init])
            self.log.info("Done reset cluster")
        self.sleep(10)
        """ Add built-in user cbadminbucket to second cluster """
        self.add_built_in_server_user(
            node=self.input.clusters[0][:self.nodes_init][0])

        self.backupset.start = start
        self.backupset.end = end
        self.log.info("*** start restore validation")
        self.backup_restore_validate(compare_uuid=False,
                                     seqno_compare_function=">=",
                                     expected_error=self.expected_error)
 def wait_for_replication(servers, cluster_helper=None, timeout=600):
     if cluster_helper is None:
         cluster = Cluster()
     else:
         cluster = cluster_helper
     tasks = []
     rest = RestConnection(servers[0])
     buckets = rest.get_buckets()
     for server in servers:
         for bucket in buckets:
             for server_repl in list(set(servers) - set([server])):
                 tasks.append(cluster.async_wait_for_stats([server], bucket, 'tap',
                                'eq_tapq:replication_ns_1@' + server_repl.ip + ':idle', '==', 'true'))
                 tasks.append(cluster.async_wait_for_stats([server], bucket, 'tap',
                                'eq_tapq:replication_ns_1@' + server_repl.ip + ':backfill_completed', '==', 'true'))
     try:
         for task in tasks:
             task.result(timeout)
     finally:
         if cluster_helper is None:
             # stop all newly created task manager threads
             cluster.shutdown()
         return True
    def setUp(self):
        super(BucketConfig, self).setUp()
        self.testcase = '2'
        self.log = logger.Logger.get_logger()
        self.input = TestInputSingleton.input
        self.servers = self.input.servers
        #self.time_synchronization = self.input.param("time_sync", "enabledWithoutDrift")
        self.lww = self.input.param("lww", True)
        self.drift = self.input.param("drift", False)
        self.bucket='bucket-1'
        self.master = self.servers[0]
        self.rest = RestConnection(self.master)
        self.cluster = Cluster()
        self.skip_rebalance = self.input.param("skip_rebalance", False)

        node_ram_ratio = BucketOperationHelper.base_bucket_ratio(self.servers)
        mem_quota = int(self.rest.get_nodes_self().mcdMemoryReserved *
                        node_ram_ratio)

        if not self.skip_rebalance:
            self.rest.init_cluster(self.master.rest_username,
                self.master.rest_password)
            self.rest.init_cluster_memoryQuota(self.master.rest_username,
                self.master.rest_password,
                memoryQuota=mem_quota)
            for server in self.servers:
                ClusterOperationHelper.cleanup_cluster([server])
                ClusterOperationHelper.wait_for_ns_servers_or_assert(
                    [self.master], self.testcase)
            try:
                rebalanced = ClusterOperationHelper.add_and_rebalance(
                    self.servers)

            except Exception as e:
                self.fail(e, 'cluster is not rebalanced')

        self._create_bucket(self.lww, self.drift)
    def setUp(self):
        super(ObserveXdcrTest, self).setUp()
        self.src_cluster = self.get_cb_cluster_by_name('C1')
        self.src_master = self.src_cluster.get_master_node()
        self.dest_cluster = self.get_cb_cluster_by_name('C2')
        self.dest_master = self.dest_cluster.get_master_node()

        # Variables defined for _run_observe() in observetest.
        self.observe_with = self._input.param("observe_with", "")
        self.default_map_func = 'function (doc) { emit(doc.age, doc.first_name);}'
        self.default_design_doc = "Doc1"
        map_func = 'function (doc) { emit(null, doc);}'
        self.default_view = View("default_view", map_func, None)
        self.mutate_by = self._input.param("mutate_by", "set")
        self.cluster = Cluster()
        self.wait_timeout = self._wait_timeout
        self.num_items = self._num_items
Exemple #28
0
 def setUp(self):
     try:
         self.log = logger.Logger.get_logger()
         self.input = TestInputSingleton.input
         self.servers = self.input.servers
         self.browser = self.input.ui_conf['browser']
         self.replica  = self.input.param("replica", 1)
         self.case_number = self.input.param("case_number", 0)
         self.cluster = Cluster()
         self.machine = self.input.ui_conf['server']
         self.driver = None
         self.shell = RemoteMachineShellConnection(self.machine)
         #avoid clean up if the previous test has been tear down
         if not self.input.param("skip_cleanup", True) \
                                         or self.case_number == 1:
             self.tearDown()
         self._log_start(self)
         self._kill_old_drivers()
         #thread for selenium server
         if not self._is_selenium_running():
             self.log.info('start selenium')
             self._start_selenium_thread()
             self._wait_for_selenium_is_started()
         self.log.info('start selenium session')
         if self.browser == 'ff':
             self.driver = webdriver.Remote(command_executor='http://{0}:{1}/wd/hub'
                                            .format(self.machine.ip,
                                                    self.machine.port),
                                            desired_capabilities=DesiredCapabilities.FIREFOX)
         elif self.browser == 'chrome':
             self.driver = webdriver.Remote(command_executor='http://{0}:{1}/wd/hub'
                                            .format(self.machine.ip,
                                                    self.machine.port),
                                            desired_capabilities=DesiredCapabilities.CHROME)
         self.log.info('start selenium started')
         self.driver.get("http://{0}:{1}".format(self.servers[0].ip,
                                                 self.servers[0].port))
         self.driver.maximize_window()
     except Exception as ex:
         self.input.test_params["stop-on-failure"] = True
         self.log.error("SETUP WAS FAILED. ALL TESTS WILL BE SKIPPED")
         self.fail(ex)
Exemple #29
0
 def setUp(self):
     self.input = TestInputSingleton.input
     self.servers = self.input.servers
     self.master = self.servers[0]
     self.log = logger.Logger.get_logger()
     self.helper = SpatialHelper(self, "default")
     self.helper.setup_cluster()
     self.cluster = Cluster()
     self.default_bucket = self.input.param("default_bucket", True)
     self.sasl_buckets = self.input.param("sasl_buckets", 0)
     self.standard_buckets = self.input.param("standard_buckets", 0)
     self.memcached_buckets = self.input.param("memcached_buckets", 0)
     self.servers = self.helper.servers
     self.shell = RemoteMachineShellConnection(self.master)
     info = self.shell.extract_remote_info()
     self.os = info.type.lower()
     self.couchbase_login_info = "%s:%s" % (
         self.input.membase_settings.rest_username,
         self.input.membase_settings.rest_password)
     self.backup_location = self.input.param("backup_location",
                                             "/tmp/backup")
     self.command_options = self.input.param("command_options", '')
Exemple #30
0
 def setUp(self):
     for server in TestInputSingleton.input.servers:
         remote = RemoteMachineShellConnection(server)
         remote.enable_diag_eval_on_non_local_hosts()
         remote.disconnect()
     super(AlternateAddressTests, self).setUp()
     self.remove_all_alternate_address_settings()
     self.cluster_helper = Cluster()
     self.ex_path = self.tmp_path + "export{0}/".format(self.master.ip)
     self.num_items = self.input.param("items", 1000)
     self.client_os = self.input.param("client_os", "linux")
     self.localhost = self.input.param("localhost", False)
     self.json_create_gen = JsonDocGenerator("altaddr",
                                             op_type="create",
                                             encoding="utf-8",
                                             start=0,
                                             end=self.num_items)
     self.json_delete_gen = JsonDocGenerator("imex",
                                             op_type="delete",
                                             encoding="utf-8",
                                             start=0,
                                             end=self.num_items)
Exemple #31
0
class nwusage(XDCRNewBaseTest):
    def setUp(self):
        super(nwusage, self).setUp()
        self.src_cluster = self.get_cb_cluster_by_name('C1')
        self.src_master = self.src_cluster.get_master_node()
        self.dest_cluster = self.get_cb_cluster_by_name('C2')
        self.dest_master = self.dest_cluster.get_master_node()
        self.cluster = Cluster()

    def tearDown(self):
        super(nwusage, self).tearDown()

    def _set_nwusage_limit(self, cluster, nw_limit=0):
        repl_id = cluster.get_remote_clusters()[0].get_replications()[0].get_repl_id()
        shell = RemoteMachineShellConnection(cluster.get_master_node())
        repl_id = str(repl_id).replace('/','%2F')
        base_url = "http://" + cluster.get_master_node().ip + ":8091/settings/replications/" + repl_id
        command = "curl -X POST -u Administrator:password " + base_url + " -d networkUsageLimit=" + str(nw_limit)
        output, error = shell.execute_command(command)
        shell.log_command_output(output, error)

    def _verify_bandwidth_usage(self, node, nw_limit=1, no_of_nodes=2, event_time=None,
                                nw_usage="[1-9][0-9]*", end_time=None):
        goxdcr_log = NodeHelper.get_goxdcr_log_dir(node) + '/goxdcr.log'
        nw_max = (nw_limit * 1024 * 1024)/no_of_nodes

        if event_time:
            time_to_compare = time.strptime(event_time, '%Y-%m-%dT%H:%M:%S')
        else:
            matches, _ = NodeHelper.check_goxdcr_log(node, "Success adding replication specification",
                                                 goxdcr_log, print_matches=True)
            time_to_compare_str = matches[-1].split(' ')[0].split('.')[0]
            time_to_compare = time.strptime(time_to_compare_str, '%Y-%m-%dT%H:%M:%S')

        matches, count = NodeHelper.check_goxdcr_log(node, "bandwidth_limit=" + str(nw_max) +
                                            ", bandwidth_usage=" + nw_usage, goxdcr_log, print_matches=True)
        match_count = 0
        skip_count = 0
        for item in matches:
            items = item.split(' ')
            item_time = items[0].split('.')[0]
            item_datetime = time.strptime(item_time, '%Y-%m-%dT%H:%M:%S')
            if item_datetime < time_to_compare:
                skip_count += 1
                continue
            if end_time:
                end_datetime = time.strptime(end_time, '%Y-%m-%dT%H:%M:%S')
                if item_datetime > end_datetime:
                    skip_count += 1
                    continue
            bandwidth_usage = items[-1].split('=')[-1]
            if int(bandwidth_usage) <= nw_max:
                match_count += 1
                continue
            else:
                self.fail("Bandwidth usage higher than Bandwidth limit in {0}".format(item))

        if match_count + skip_count == count:
            self.log.info("{0} stale entries skipped".format(skip_count))
            if match_count > 0:
                self.log.info("{0} entries checked - Bandwidth usage always lower than Bandwidth limit as expected".
                          format(match_count))
            else:
                if self._input.param("replication_type") == "capi":
                    self.log.info("Bandwidth Throttler not enabled on replication as expected")
                else:
                    self.fail("Bandwidth Throttler not enabled on replication")

    def _get_current_time(self, server):
        shell = RemoteMachineShellConnection(server)
        command = "date +'%Y-%m-%dT%H:%M:%S'"
        output, error = shell.execute_command(command)
        shell.log_command_output(output, error)
        curr_time = output[0].strip()
        return curr_time

    def test_nwusage_with_unidirection(self):
        self.setup_xdcr()
        self.sleep(60)
        nw_limit = self._input.param("nw_limit", 1)
        self._set_nwusage_limit(self.src_cluster, nw_limit)

        gen_create = BlobGenerator('nwOne', 'nwOne', self._value_size, end=self._num_items)
        self.src_cluster.load_all_buckets_from_generator(kv_gen=gen_create)

        self.perform_update_delete()

        self.verify_results()
        self._verify_bandwidth_usage(node=self.src_cluster.get_master_node(), nw_limit=nw_limit)

    def test_nwusage_with_bidirection(self):
        self.setup_xdcr()
        self.sleep(60)
        nw_limit = self._input.param("nw_limit", 1)
        self._set_nwusage_limit(self.src_cluster, nw_limit)
        self._set_nwusage_limit(self.dest_cluster, nw_limit)

        gen_create1 = BlobGenerator('nwOne', 'nwOne', self._value_size, end=self._num_items)
        self.src_cluster.load_all_buckets_from_generator(kv_gen=gen_create1)
        gen_create2 = BlobGenerator('nwTwo', 'nwTwo', self._value_size, end=self._num_items)
        self.dest_cluster.load_all_buckets_from_generator(kv_gen=gen_create2)

        self.perform_update_delete()

        self.verify_results()
        self._verify_bandwidth_usage(node=self.src_cluster.get_master_node(), nw_limit=nw_limit)
        self._verify_bandwidth_usage(node=self.dest_cluster.get_master_node(), nw_limit=nw_limit)

    def test_nwusage_with_unidirection_pause_resume(self):
        self.setup_xdcr()

        gen_create = BlobGenerator('nwOne', 'nwOne', self._value_size, end=self._num_items)
        tasks = self.src_cluster.async_load_all_buckets_from_generator(kv_gen=gen_create)

        self.src_cluster.pause_all_replications()

        nw_limit = self._input.param("nw_limit", 1)
        self._set_nwusage_limit(self.src_cluster, nw_limit)

        self.src_cluster.resume_all_replications()

        for task in tasks:
            task.result()

        self._wait_for_replication_to_catchup()

        self.verify_results()
        self._verify_bandwidth_usage(node=self.src_cluster.get_master_node(), nw_limit=nw_limit)

    def test_nwusage_with_bidirection_pause_resume(self):
        self.setup_xdcr()

        gen_create1 = BlobGenerator('nwOne', 'nwOne', self._value_size, end=self._num_items)
        tasks = self.src_cluster.async_load_all_buckets_from_generator(kv_gen=gen_create1)
        gen_create2 = BlobGenerator('nwTwo', 'nwTwo', self._value_size, end=self._num_items)
        tasks.extend(self.dest_cluster.async_load_all_buckets_from_generator(kv_gen=gen_create2))

        self.src_cluster.pause_all_replications()
        self.dest_cluster.pause_all_replications()

        nw_limit = self._input.param("nw_limit", 1)
        self._set_nwusage_limit(self.src_cluster, nw_limit)
        self._set_nwusage_limit(self.dest_cluster, nw_limit)

        self.src_cluster.resume_all_replications()
        self.dest_cluster.resume_all_replications()

        for task in tasks:
            task.result()

        self._wait_for_replication_to_catchup()

        self.verify_results()
        self._verify_bandwidth_usage(node=self.src_cluster.get_master_node(), nw_limit=nw_limit)
        self._verify_bandwidth_usage(node=self.dest_cluster.get_master_node(), nw_limit=nw_limit)

    def test_nwusage_with_unidirection_in_parallel(self):
        self.setup_xdcr()

        gen_create = BlobGenerator('nwOne', 'nwOne', self._value_size, end=self._num_items)
        tasks = self.src_cluster.async_load_all_buckets_from_generator(kv_gen=gen_create)

        nw_limit = self._input.param("nw_limit", 1)
        self._set_nwusage_limit(self.src_cluster, nw_limit)

        for task in tasks:
            task.result()

        self._wait_for_replication_to_catchup()

        self.verify_results()
        self._verify_bandwidth_usage(node=self.src_cluster.get_master_node(), nw_limit=nw_limit)

    def test_nwusage_with_bidirection_in_parallel(self):
        self.setup_xdcr()

        gen_create1 = BlobGenerator('nwOne', 'nwOne', self._value_size, end=self._num_items)
        tasks = self.src_cluster.async_load_all_buckets_from_generator(kv_gen=gen_create1)
        gen_create2 = BlobGenerator('nwTwo', 'nwTwo', self._value_size, end=self._num_items)
        tasks.extend(self.dest_cluster.async_load_all_buckets_from_generator(kv_gen=gen_create2))

        nw_limit = self._input.param("nw_limit", 1)
        self._set_nwusage_limit(self.src_cluster, nw_limit)
        self._set_nwusage_limit(self.dest_cluster, nw_limit)

        for task in tasks:
            task.result()

        self._wait_for_replication_to_catchup()

        self.verify_results()
        self._verify_bandwidth_usage(node=self.src_cluster.get_master_node(), nw_limit=nw_limit)
        self._verify_bandwidth_usage(node=self.dest_cluster.get_master_node(), nw_limit=nw_limit)

    def test_nwusage_with_rebalance_in(self):
        self.setup_xdcr()
        self.sleep(60)
        nw_limit = self._input.param("nw_limit", 1)
        self._set_nwusage_limit(self.src_cluster, nw_limit)

        gen_create = BlobGenerator('nwOne', 'nwOne', self._value_size, end=self._num_items)
        self.src_cluster.load_all_buckets_from_generator(kv_gen=gen_create)

        self.src_cluster.rebalance_in()

        self.perform_update_delete()

        self.verify_results()
        self._verify_bandwidth_usage(node=self.src_cluster.get_master_node(), nw_limit=nw_limit, no_of_nodes=3)

    def test_nwusage_with_rebalance_out(self):
        self.setup_xdcr()
        self.sleep(60)
        nw_limit = self._input.param("nw_limit", 1)
        self._set_nwusage_limit(self.src_cluster, nw_limit)

        gen_create = BlobGenerator('nwOne', 'nwOne', self._value_size, end=self._num_items)
        self.src_cluster.load_all_buckets_from_generator(kv_gen=gen_create)

        self.src_cluster.rebalance_out()

        self.perform_update_delete()

        self.verify_results()
        self._verify_bandwidth_usage(node=self.src_cluster.get_master_node(), nw_limit=nw_limit, no_of_nodes=1)

    def test_nwusage_reset_to_zero(self):
        self.setup_xdcr()
        self.sleep(60)
        nw_limit = self._input.param("nw_limit", 1)
        self._set_nwusage_limit(self.src_cluster, nw_limit)

        gen_create = BlobGenerator('nwOne', 'nwOne', self._value_size, end=self._num_items)
        tasks = self.src_cluster.async_load_all_buckets_from_generator(kv_gen=gen_create)

        self.sleep(30)
        self._set_nwusage_limit(self.src_cluster, 0)
        event_time = self._get_current_time(self.src_cluster.get_master_node())
        self.log.info("Network limit reset to 0 at {0}".format(event_time))

        for task in tasks:
            task.result()

        self.verify_results()
        self._verify_bandwidth_usage(node=self.src_cluster.get_master_node(), nw_limit=nw_limit, end_time=event_time)
        self._verify_bandwidth_usage(node=self.src_cluster.get_master_node(), nw_limit=0, no_of_nodes=2, event_time=event_time, nw_usage="0")

    def test_nwusage_with_hard_failover_and_bwthrottle_enabled(self):
        self.setup_xdcr()
        self.sleep(60)
        nw_limit = self._input.param("nw_limit", 1)
        self._set_nwusage_limit(self.src_cluster, nw_limit)

        self.src_cluster.pause_all_replications()

        gen_create = BlobGenerator('nwOne', 'nwOne', self._value_size, end=self._num_items)
        self.src_cluster.load_all_buckets_from_generator(kv_gen=gen_create)

        self.src_cluster.resume_all_replications()

        self.sleep(15)

        self.src_cluster.failover_and_rebalance_nodes()
        failover_time = self._get_current_time(self.src_cluster.get_master_node())
        self.log.info("Node failed over at {0}".format(failover_time))

        self.sleep(15)

        self.src_cluster.rebalance_in()
        node_back_time = self._get_current_time(self.src_cluster.get_master_node())
        self.log.info("Node added back at {0}".format(node_back_time))

        self._wait_for_replication_to_catchup()

        self.verify_results()
        self._verify_bandwidth_usage(node=self.src_cluster.get_master_node(), end_time=failover_time)
        self._verify_bandwidth_usage(node=self.src_cluster.get_master_node(), event_time=failover_time, end_time=node_back_time, no_of_nodes=1)
        self._verify_bandwidth_usage(node=self.src_cluster.get_master_node(), event_time=node_back_time)

    def test_nwusage_with_hard_failover_and_bwthrottle_enabled_later(self):
        self.setup_xdcr()

        self.src_cluster.pause_all_replications()

        gen_create = BlobGenerator('nwOne', 'nwOne', self._value_size, end=self._num_items)
        self.src_cluster.load_all_buckets_from_generator(kv_gen=gen_create)

        self.src_cluster.resume_all_replications()

        self.sleep(15)

        self.src_cluster.failover_and_rebalance_nodes()

        self.sleep(15)

        nw_limit = self._input.param("nw_limit", 1)
        self._set_nwusage_limit(self.src_cluster, nw_limit)
        bw_enable_time = self._get_current_time(self.src_cluster.get_master_node())
        self.log.info("Bandwidth throttler enabled at {0}".format(bw_enable_time))

        self.sleep(60)

        self.src_cluster.rebalance_in()
        node_back_time = self._get_current_time(self.src_cluster.get_master_node())
        self.log.info("Node added back at {0}".format(node_back_time))

        self._wait_for_replication_to_catchup(timeout=600)

        self.verify_results()
        self._verify_bandwidth_usage(node=self.src_cluster.get_master_node(), event_time=bw_enable_time, end_time=node_back_time, no_of_nodes=1)
        self._verify_bandwidth_usage(node=self.src_cluster.get_master_node(), event_time=node_back_time)

    def test_nwusage_with_auto_failover_and_bwthrottle_enabled(self):
        self.setup_xdcr()

        self.src_cluster.rebalance_in()

        nw_limit = self._input.param("nw_limit", 1)
        self._set_nwusage_limit(self.src_cluster, nw_limit)

        src_conn = RestConnection(self.src_cluster.get_master_node())
        src_conn.update_autofailover_settings(enabled=True, timeout=30)

        self.src_cluster.pause_all_replications()

        gen_create = BlobGenerator('nwOne', 'nwOne', self._value_size, end=self._num_items)
        self.src_cluster.load_all_buckets_from_generator(kv_gen=gen_create)

        self.src_cluster.resume_all_replications()

        self.sleep(15)

        shell = RemoteMachineShellConnection(self._input.servers[1])
        shell.stop_couchbase()
        self.sleep(30)
        task = self.cluster.async_rebalance(self.src_cluster.get_nodes(), [], [])
        task.result()
        failover_time = self._get_current_time(self.src_cluster.get_master_node())
        self.log.info("Node auto failed over at {0}".format(failover_time))
        FloatingServers._serverlist.append(self._input.servers[1])

        self.sleep(15)

        shell.start_couchbase()
        shell.disable_firewall()
        self.sleep(45)
        self.src_cluster.rebalance_in()
        node_back_time = self._get_current_time(self.src_cluster.get_master_node())
        self.log.info("Node added back at {0}".format(node_back_time))

        self._wait_for_replication_to_catchup(timeout=600)

        self.verify_results()
        self._verify_bandwidth_usage(node=self.src_cluster.get_master_node(), end_time=failover_time, no_of_nodes=3)
        self._verify_bandwidth_usage(node=self.src_cluster.get_master_node(), event_time=failover_time, end_time=node_back_time, no_of_nodes=2)
        self._verify_bandwidth_usage(node=self.src_cluster.get_master_node(), event_time=node_back_time, no_of_nodes=3)

    def test_nwusage_with_auto_failover_and_bwthrottle_enabled_later(self):
        self.setup_xdcr()

        self.src_cluster.rebalance_in()

        self.src_cluster.pause_all_replications()

        gen_create = BlobGenerator('nwOne', 'nwOne', self._value_size, end=self._num_items)
        self.src_cluster.load_all_buckets_from_generator(kv_gen=gen_create)

        self.src_cluster.resume_all_replications()

        self.sleep(15)

        shell = RemoteMachineShellConnection(self._input.servers[1])
        shell.stop_couchbase()
        self.sleep(45)
        task = self.cluster.async_rebalance(self.src_cluster.get_nodes(), [], [])
        task.result()
        FloatingServers._serverlist.append(self._input.servers[1])

        self.sleep(15)

        nw_limit = self._input.param("nw_limit", 1)
        self._set_nwusage_limit(self.src_cluster, nw_limit)
        bw_enable_time = self._get_current_time(self.src_cluster.get_master_node())
        self.log.info("Bandwidth throttler enabled at {0}".format(bw_enable_time))

        self.sleep(60)

        shell.start_couchbase()
        shell.disable_firewall()
        self.sleep(30)
        self.src_cluster.rebalance_in()
        node_back_time = self._get_current_time(self.src_cluster.get_master_node())
        self.log.info("Node added back at {0}".format(node_back_time))

        self._wait_for_replication_to_catchup(timeout=600)

        self.verify_results()
        self._verify_bandwidth_usage(node=self.src_cluster.get_master_node(), event_time=bw_enable_time, end_time=node_back_time, no_of_nodes=2)
        self._verify_bandwidth_usage(node=self.src_cluster.get_master_node(), event_time=node_back_time, no_of_nodes=3)
Exemple #32
0
    def test_backup_restore_collection_sanity(self):
        """
        1. Create default bucket on the cluster and loads it with given number of items
        2. Perform updates and create backups for specified number of times (test param number_of_backups)
        3. Perform restores for the same number of times with random start and end values
        """
        self.log.info("*** create collection in all buckets")
        self.log.info("*** start to load items to all buckets")
        self.active_resident_threshold = 100
        self.load_all_buckets(self.backupset.cluster_host)
        self.log.info("*** done to load items to all buckets")
        self.ops_type = self.input.param("ops-type", "update")
        self.expected_error = self.input.param("expected_error", None)
        self.create_scope_cluster_host()
        self.create_collection_cluster_host(self.backupset.col_per_scope)
        backup_scopes = self.get_bucket_scope_cluster_host()
        backup_collections = self.get_bucket_collection_cluster_host()
        col_stats = self.get_collection_stats_cluster_host()
        for backup_scope in backup_scopes:
            bk_scope_id = self.get_scopes_id_cluster_host(backup_scope)
        if self.auto_failover:
            self.log.info("Enabling auto failover on " +
                          str(self.backupset.cluster_host))
            rest_conn = RestConnection(self.backupset.cluster_host)
            rest_conn.update_autofailover_settings(self.auto_failover,
                                                   self.auto_failover_timeout)
        self.backup_create_validate()
        for i in range(1, self.backupset.number_of_backups + 1):
            if self.ops_type == "update":
                self.log.info("*** start to update items in all buckets")
                self.load_all_buckets(self.backupset.cluster_host, ratio=0.1)
                self.log.info("*** done update items in all buckets")
            self.sleep(10)
            self.log.info("*** start to validate backup cluster")
            self.backup_cluster_validate()
        self.targetMaster = True
        start = randrange(1, self.backupset.number_of_backups + 1)
        if start == self.backupset.number_of_backups:
            end = start
        else:
            end = randrange(start, self.backupset.number_of_backups + 1)
        self.log.info("*** start to restore cluster")
        restored = {"{0}/{1}".format(start, end): ""}
        for i in range(1, self.backupset.number_of_backups + 1):
            if self.reset_restore_cluster:
                self.log.info("*** start to reset cluster")
                self.backup_reset_clusters(self.cluster_to_restore)
                if self.same_cluster:
                    self._initialize_nodes(Cluster(),
                                           self.servers[:self.nodes_init])
                else:
                    shell = RemoteMachineShellConnection(
                        self.backupset.restore_cluster_host)
                    shell.enable_diag_eval_on_non_local_hosts()
                    shell.disconnect()
                    rest = RestConnection(self.backupset.restore_cluster_host)
                    rest.force_eject_node()
                    rest.init_node()
                self.log.info("Done reset cluster")
            self.sleep(10)
            """ Add built-in user cbadminbucket to second cluster """
            self.add_built_in_server_user(
                node=self.input.clusters[0][:self.nodes_init][0])

            self.backupset.start = start
            self.backupset.end = end
            self.log.info("*** start restore validation")
            data_map_collection = []
            for scope in backup_scopes:
                if "default" in scope:
                    continue
                data_map_collection.append(self.buckets[0].name + "." + scope + "=" + \
                                           self.buckets[0].name + "." + scope)
            self.bucket_map_collection = ",".join(data_map_collection)
            self.backup_restore_validate(compare_uuid=False,
                                         seqno_compare_function=">=",
                                         expected_error=self.expected_error)
            if self.backupset.number_of_backups == 1:
                continue
            while "{0}/{1}".format(start, end) in restored:
                start = randrange(1, self.backupset.number_of_backups + 1)
                if start == self.backupset.number_of_backups:
                    end = start
                else:
                    end = randrange(start,
                                    self.backupset.number_of_backups + 1)
            restored["{0}/{1}".format(start, end)] = ""
        restore_scopes = self.get_bucket_scope_restore_cluster_host()
        restore_collections = self.get_bucket_collection_restore_cluster_host()
        self.verify_collections_in_restore_cluster_host()
Exemple #33
0
    except IndexError:
        usage()
    except getopt.GetoptError, error:
        usage("ERROR: " + str(error))

    docs_per_day = input.param("doc_per_day", 49)
    years = input.param("years", 2)
    bucket_name = input.param("bucket_name", "default")
    bucket_port = input.param("bucket_port", None)
    bucket_sasl_pass = input.param("bucket_sasl_pass", None)
    flag = input.param("flags", 0)
    to_directory = input.param("to_dir", "")

    if to_directory:
        loader = DocLoaderDirectory(input.servers[0], to_directory, bucket_name)
        generators_load = loader.generate_docs(docs_per_day, years)
        loader.load(generators_load)
    else:
        cluster = Cluster()
        try:
            bucket = initialize_bucket(bucket_name, bucket_port, bucket_sasl_pass)
            loader = DocLoaderCouchbase(input.servers, cluster)
            generators_load = loader.generate_docs(docs_per_day, years)
            loader.load(generators_load, bucket, flag=flag)
        finally:
            cluster.shutdown()


if __name__ == "__main__":
    main()
 def __init__(self, nodes, num_clusters=2):
     self.log = logger.Logger.get_logger()
     self.cluster_list = []
     self.__clusterop = Cluster()
     self.setup_xdcr(nodes, num_clusters)
Exemple #35
0
class BaseSecondaryIndexingTests(QueryTests):
    def setUp(self):
        super(BaseSecondaryIndexingTests, self).setUp()
        self.index_lost_during_move_out = []
        self.verify_using_index_status = self.input.param("verify_using_index_status", False)
        self.use_replica_when_active_down = self.input.param("use_replica_when_active_down", True)
        self.use_where_clause_in_index = self.input.param("use_where_clause_in_index", False)
        self.scan_consistency = self.input.param("scan_consistency", "request_plus")
        self.scan_vector_per_values = self.input.param("scan_vector_per_values", None)
        self.timeout_for_index_online = self.input.param("timeout_for_index_online", 600)
        self.verify_query_result = self.input.param("verify_query_result", True)
        self.verify_explain_result = self.input.param("verify_explain_result", True)
        self.defer_build = self.input.param("defer_build", True)
        self.run_query_with_explain = self.input.param("run_query_with_explain", True)
        self.run_query = self.input.param("run_query", True)
        self.graceful = self.input.param("graceful", False)
        self.groups = self.input.param("groups", "all").split(":")
        self.use_rest = self.input.param("use_rest", False)
        if not self.use_rest:
            query_definition_generator = SQLDefinitionGenerator()
            if self.dataset == "default" or self.dataset == "employee":
                self.query_definitions = query_definition_generator.generate_employee_data_query_definitions()
            if self.dataset == "simple":
                self.query_definitions = query_definition_generator.generate_simple_data_query_definitions()
            if self.dataset == "sabre":
                self.query_definitions = query_definition_generator.generate_sabre_data_query_definitions()
            if self.dataset == "bigdata":
                self.query_definitions = query_definition_generator.generate_big_data_query_definitions()
            if self.dataset == "array":
                self.query_definitions = query_definition_generator.generate_airlines_data_query_definitions()
            self.query_definitions = query_definition_generator.filter_by_group(self.groups, self.query_definitions)
        self.ops_map = self._create_operation_map()
        self.find_nodes_in_list()
        self.generate_map_nodes_out_dist()
        self.memory_create_list = []
        self.memory_drop_list = []
        self.n1ql_node = self.get_nodes_from_services_map(service_type="n1ql")
        self.skip_cleanup = self.input.param("skip_cleanup", False)
        self.index_loglevel = self.input.param("index_loglevel", None)
        if self.index_loglevel:
            self.set_indexer_logLevel(self.index_loglevel)
        if self.dgm_run:
            self._load_doc_data_all_buckets(gen_load=self.gens_load)
        self.gsi_thread = Cluster()
        self.defer_build = self.defer_build and self.use_gsi_for_secondary

    def tearDown(self):
        super(BaseSecondaryIndexingTests, self).tearDown()

    def create_index(self, bucket, query_definition, deploy_node_info=None):
        create_task = self.async_create_index(bucket, query_definition, deploy_node_info)
        create_task.result()
        if self.defer_build:
            build_index_task = self.async_build_index(bucket, [query_definition.index_name])
            build_index_task.result()
        check = self.n1ql_helper.is_index_ready_and_in_list(bucket, query_definition.index_name, server=self.n1ql_node)
        self.assertTrue(check, "index {0} failed to be created".format(query_definition.index_name))

    def async_create_index(self, bucket, query_definition, deploy_node_info=None):
        index_where_clause = None
        if self.use_where_clause_in_index:
            index_where_clause = query_definition.index_where_clause
        self.query = query_definition.generate_index_create_query(
            bucket=bucket,
            use_gsi_for_secondary=self.use_gsi_for_secondary,
            deploy_node_info=deploy_node_info,
            defer_build=self.defer_build,
            index_where_clause=index_where_clause,
        )
        create_index_task = self.gsi_thread.async_create_index(
            server=self.n1ql_node,
            bucket=bucket,
            query=self.query,
            n1ql_helper=self.n1ql_helper,
            index_name=query_definition.index_name,
            defer_build=self.defer_build,
        )
        return create_index_task

    def create_index_using_rest(self, bucket, query_definition, exprType="N1QL", deploy_node_info=None):
        ind_content = query_definition.generate_gsi_index_create_query_using_rest(
            bucket=bucket,
            deploy_node_info=deploy_node_info,
            defer_build=None,
            index_where_clause=None,
            gsi_type=self.gsi_type,
        )

        log.info("Creating index {0}...".format(query_definition.index_name))
        return self.rest.create_index_with_rest(ind_content)

    def async_build_index(self, bucket, index_list=None):
        if not index_list:
            index_list = []
        self.query = self.n1ql_helper.gen_build_index_query(bucket=bucket, index_list=index_list)
        self.log.info(self.query)
        build_index_task = self.gsi_thread.async_build_index(
            server=self.n1ql_node, bucket=bucket, query=self.query, n1ql_helper=self.n1ql_helper
        )
        return build_index_task

    def async_monitor_index(self, bucket, index_name=None):
        monitor_index_task = self.gsi_thread.async_monitor_index(
            server=self.n1ql_node,
            bucket=bucket,
            n1ql_helper=self.n1ql_helper,
            index_name=index_name,
            timeout=self.timeout_for_index_online,
        )
        return monitor_index_task

    def multi_create_index(self, buckets=None, query_definitions=None, deploy_node_info=None):
        if not buckets:
            buckets = self.buckets
        if not query_definitions:
            query_definitions = self.query_definitions
        for bucket in buckets:
            for query_definition in query_definitions:
                index_info = "{0}:{1}".format(bucket.name, query_definition.index_name)
                if index_info not in self.memory_create_list:
                    self.memory_create_list.append(index_info)
                    self.create_index(bucket.name, query_definition, deploy_node_info)

    def multi_create_index_using_rest(self, buckets=None, query_definitions=None, deploy_node_info=None):
        self.index_id_map = {}
        if not buckets:
            buckets = self.buckets
        if not query_definitions:
            query_definitions = self.query_definitions
        for bucket in buckets:
            if bucket not in self.index_id_map.keys():
                self.index_id_map[bucket] = {}
            for query_definition in query_definitions:
                id_map = self.create_index_using_rest(
                    bucket=bucket, query_definition=query_definition, deploy_node_info=deploy_node_info
                )
                self.index_id_map[bucket][query_definition] = id_map["id"]

    def async_multi_create_index(self, buckets=None, query_definitions=None):
        if not buckets:
            buckets = self.buckets
        if not query_definitions:
            query_definitions = self.query_definitions
        create_index_tasks = []
        self.index_lost_during_move_out = []
        self.log.info(self.index_nodes_out)
        index_node_count = 0
        for query_definition in query_definitions:
            index_info = "{0}".format(query_definition.index_name)
            if index_info not in self.memory_create_list:
                self.memory_create_list.append(index_info)
                self.deploy_node_info = None
                if index_node_count < len(self.index_nodes_out):
                    node_index = index_node_count
                    self.deploy_node_info = [
                        "{0}:{1}".format(
                            self.index_nodes_out[index_node_count].ip, self.index_nodes_out[index_node_count].port
                        )
                    ]
                    if query_definition.index_name not in self.index_lost_during_move_out:
                        self.index_lost_during_move_out.append(query_definition.index_name)
                    index_node_count += 1
                for bucket in buckets:
                    create_index_tasks.append(
                        self.async_create_index(bucket.name, query_definition, deploy_node_info=self.deploy_node_info)
                    )
                self.sleep(3)
        if self.defer_build:
            index_list = []
            for task in create_index_tasks:
                task.result()
            for query_definition in query_definitions:
                if query_definition.index_name not in index_list:
                    index_list.append(query_definition.index_name)
            for bucket in self.buckets:
                build_index_task = self.async_build_index(bucket, index_list)
                build_index_task.result()
            monitor_index_tasks = []
            for index_name in index_list:
                for bucket in self.buckets:
                    monitor_index_tasks.append(self.async_monitor_index(bucket.name, index_name))
            return monitor_index_tasks
        else:
            return create_index_tasks

    def multi_drop_index_using_rest(self, buckets=None, query_definitions=None):
        if not buckets:
            buckets = self.buckets
        if not query_definitions:
            query_definitions = self.query_definitions
        for bucket in buckets:
            for query_definition in query_definitions:
                self.drop_index_using_rest(bucket, query_definition)

    def multi_drop_index(self, buckets=None, query_definitions=None):
        if not buckets:
            buckets = self.buckets
        if not query_definitions:
            query_definitions = self.query_definitions
        for bucket in buckets:
            for query_definition in query_definitions:
                index_info = query_definition.generate_index_drop_query(bucket=bucket.name)
                if index_info not in self.memory_drop_list:
                    self.memory_drop_list.append(index_info)
                    self.drop_index(bucket.name, query_definition)

    def async_multi_drop_index(self, buckets=None, query_definitions=None):
        if not buckets:
            buckets = self.buckets
        if not query_definitions:
            query_definitions = self.query_definitions
        drop_index_tasks = []
        for bucket in buckets:
            for query_definition in query_definitions:
                index_info = query_definition.generate_index_drop_query(bucket=bucket.name)
                if index_info not in self.memory_drop_list:
                    self.memory_drop_list.append(index_info)
                    drop_index_tasks.append(self.async_drop_index(bucket.name, query_definition))
        return drop_index_tasks

    def drop_index(self, bucket, query_definition, verify_drop=True):
        try:
            self.query = query_definition.generate_index_drop_query(
                bucket=bucket,
                use_gsi_for_secondary=self.use_gsi_for_secondary,
                use_gsi_for_primary=self.use_gsi_for_primary,
            )
            actual_result = self.n1ql_helper.run_cbq_query(query=self.query, server=self.n1ql_node)
            if verify_drop:
                check = self.n1ql_helper._is_index_in_list(bucket, query_definition.index_name, server=self.n1ql_node)
                self.assertFalse(check, "index {0} failed to be deleted".format(query_definition.index_name))
        except Exception, ex:
            self.log.info(ex)
            query = "select * from system:indexes"
            actual_result = self.n1ql_helper.run_cbq_query(query=query, server=self.n1ql_node)
            self.log.info(actual_result)
Exemple #36
0
class BaseSecondaryIndexingTests(QueryTests):
    def setUp(self):
        super(BaseSecondaryIndexingTests, self).setUp()
        self.index_lost_during_move_out = []
        self.verify_using_index_status = self.input.param(
            "verify_using_index_status", False)
        self.use_replica_when_active_down = self.input.param(
            "use_replica_when_active_down", True)
        self.use_where_clause_in_index = self.input.param(
            "use_where_clause_in_index", False)
        self.scan_consistency = self.input.param("scan_consistency",
                                                 "request_plus")
        self.scan_vector_per_values = self.input.param(
            "scan_vector_per_values", None)
        self.timeout_for_index_online = self.input.param(
            "timeout_for_index_online", 600)
        self.verify_query_result = self.input.param("verify_query_result",
                                                    True)
        self.verify_explain_result = self.input.param("verify_explain_result",
                                                      True)
        self.defer_build = self.input.param("defer_build", True)
        self.run_query_with_explain = self.input.param(
            "run_query_with_explain", True)
        self.run_query = self.input.param("run_query", True)
        self.graceful = self.input.param("graceful", False)
        self.groups = self.input.param("groups", "all").split(":")
        self.use_rest = self.input.param("use_rest", False)
        if not self.use_rest:
            query_definition_generator = SQLDefinitionGenerator()
            if self.dataset == "default" or self.dataset == "employee":
                self.query_definitions = query_definition_generator.generate_employee_data_query_definitions(
                )
            if self.dataset == "simple":
                self.query_definitions = query_definition_generator.generate_simple_data_query_definitions(
                )
            if self.dataset == "sabre":
                self.query_definitions = query_definition_generator.generate_sabre_data_query_definitions(
                )
            if self.dataset == "bigdata":
                self.query_definitions = query_definition_generator.generate_big_data_query_definitions(
                )
            if self.dataset == "array":
                self.query_definitions = query_definition_generator.generate_airlines_data_query_definitions(
                )
            self.query_definitions = query_definition_generator.filter_by_group(
                self.groups, self.query_definitions)
        self.ops_map = self._create_operation_map()
        self.find_nodes_in_list()
        self.generate_map_nodes_out_dist()
        self.memory_create_list = []
        self.memory_drop_list = []
        self.skip_cleanup = self.input.param("skip_cleanup", False)
        self.index_loglevel = self.input.param("index_loglevel", None)
        if self.index_loglevel:
            self.set_indexer_logLevel(self.index_loglevel)
        if self.dgm_run:
            self._load_doc_data_all_buckets(gen_load=self.gens_load)
        self.gsi_thread = Cluster()
        self.defer_build = self.defer_build and self.use_gsi_for_secondary

    def tearDown(self):
        super(BaseSecondaryIndexingTests, self).tearDown()

    def create_index(self, bucket, query_definition, deploy_node_info=None):
        create_task = self.async_create_index(bucket, query_definition,
                                              deploy_node_info)
        create_task.result()
        if self.defer_build:
            build_index_task = self.async_build_index(
                bucket, [query_definition.index_name])
            build_index_task.result()
        check = self.n1ql_helper.is_index_ready_and_in_list(
            bucket, query_definition.index_name, server=self.n1ql_node)
        self.assertTrue(
            check, "index {0} failed to be created".format(
                query_definition.index_name))

    def async_create_index(self,
                           bucket,
                           query_definition,
                           deploy_node_info=None):
        index_where_clause = None
        if self.use_where_clause_in_index:
            index_where_clause = query_definition.index_where_clause
        self.query = query_definition.generate_index_create_query(
            bucket=bucket,
            use_gsi_for_secondary=self.use_gsi_for_secondary,
            deploy_node_info=deploy_node_info,
            defer_build=self.defer_build,
            index_where_clause=index_where_clause)
        create_index_task = self.gsi_thread.async_create_index(
            server=self.n1ql_node,
            bucket=bucket,
            query=self.query,
            n1ql_helper=self.n1ql_helper,
            index_name=query_definition.index_name,
            defer_build=self.defer_build)
        return create_index_task

    def create_index_using_rest(self,
                                bucket,
                                query_definition,
                                exprType='N1QL',
                                deploy_node_info=None):
        ind_content = query_definition.generate_gsi_index_create_query_using_rest(
            bucket=bucket,
            deploy_node_info=deploy_node_info,
            defer_build=None,
            index_where_clause=None,
            gsi_type=self.gsi_type)

        log.info("Creating index {0}...".format(query_definition.index_name))
        return self.rest.create_index_with_rest(ind_content)

    def async_build_index(self, bucket, index_list=None):
        if not index_list:
            index_list = []
        self.query = self.n1ql_helper.gen_build_index_query(
            bucket=bucket, index_list=index_list)
        self.log.info(self.query)
        build_index_task = self.gsi_thread.async_build_index(
            server=self.n1ql_node,
            bucket=bucket,
            query=self.query,
            n1ql_helper=self.n1ql_helper)
        return build_index_task

    def async_monitor_index(self, bucket, index_name=None):
        monitor_index_task = self.gsi_thread.async_monitor_index(
            server=self.n1ql_node,
            bucket=bucket,
            n1ql_helper=self.n1ql_helper,
            index_name=index_name,
            timeout=self.timeout_for_index_online)
        return monitor_index_task

    def multi_create_index(self,
                           buckets=None,
                           query_definitions=None,
                           deploy_node_info=None):
        if not buckets:
            buckets = self.buckets
        if not query_definitions:
            query_definitions = self.query_definitions
        for bucket in buckets:
            for query_definition in query_definitions:
                index_info = "{0}:{1}".format(bucket.name,
                                              query_definition.index_name)
                if index_info not in self.memory_create_list:
                    self.memory_create_list.append(index_info)
                    self.create_index(bucket.name, query_definition,
                                      deploy_node_info)

    def multi_create_index_using_rest(self,
                                      buckets=None,
                                      query_definitions=None,
                                      deploy_node_info=None):
        self.index_id_map = {}
        if not buckets:
            buckets = self.buckets
        if not query_definitions:
            query_definitions = self.query_definitions
        for bucket in buckets:
            if bucket not in self.index_id_map.keys():
                self.index_id_map[bucket] = {}
            for query_definition in query_definitions:
                id_map = self.create_index_using_rest(
                    bucket=bucket,
                    query_definition=query_definition,
                    deploy_node_info=deploy_node_info)
                self.index_id_map[bucket][query_definition] = id_map["id"]

    def async_multi_create_index(self, buckets=None, query_definitions=None):
        if not buckets:
            buckets = self.buckets
        if not query_definitions:
            query_definitions = self.query_definitions
        create_index_tasks = []
        self.index_lost_during_move_out = []
        self.log.info(self.index_nodes_out)
        index_node_count = 0
        for query_definition in query_definitions:
            index_info = "{0}".format(query_definition.index_name)
            if index_info not in self.memory_create_list:
                self.memory_create_list.append(index_info)
                self.deploy_node_info = None
                if index_node_count < len(self.index_nodes_out):
                    node_index = index_node_count
                    self.deploy_node_info = [
                        "{0}:{1}".format(
                            self.index_nodes_out[index_node_count].ip,
                            self.index_nodes_out[index_node_count].port)
                    ]
                    if query_definition.index_name not in self.index_lost_during_move_out:
                        self.index_lost_during_move_out.append(
                            query_definition.index_name)
                    index_node_count += 1
                for bucket in buckets:
                    create_index_tasks.append(
                        self.async_create_index(
                            bucket.name,
                            query_definition,
                            deploy_node_info=self.deploy_node_info))
                self.sleep(3)
        if self.defer_build:
            index_list = []
            for task in create_index_tasks:
                task.result()
            for query_definition in query_definitions:
                if query_definition.index_name not in index_list:
                    index_list.append(query_definition.index_name)
            for bucket in self.buckets:
                build_index_task = self.async_build_index(bucket, index_list)
                build_index_task.result()
            monitor_index_tasks = []
            for index_name in index_list:
                for bucket in self.buckets:
                    monitor_index_tasks.append(
                        self.async_monitor_index(bucket.name, index_name))
            return monitor_index_tasks
        else:
            return create_index_tasks

    def multi_drop_index_using_rest(self,
                                    buckets=None,
                                    query_definitions=None):
        if not buckets:
            buckets = self.buckets
        if not query_definitions:
            query_definitions = self.query_definitions
        for bucket in buckets:
            for query_definition in query_definitions:
                self.drop_index_using_rest(bucket, query_definition)

    def multi_drop_index(self, buckets=None, query_definitions=None):
        if not buckets:
            buckets = self.buckets
        if not query_definitions:
            query_definitions = self.query_definitions
        for bucket in buckets:
            for query_definition in query_definitions:
                index_info = query_definition.generate_index_drop_query(
                    bucket=bucket.name)
                if index_info not in self.memory_drop_list:
                    self.memory_drop_list.append(index_info)
                    self.drop_index(bucket.name, query_definition)

    def async_multi_drop_index(self, buckets=None, query_definitions=None):
        if not buckets:
            buckets = self.buckets
        if not query_definitions:
            query_definitions = self.query_definitions
        drop_index_tasks = []
        for bucket in buckets:
            for query_definition in query_definitions:
                index_info = query_definition.generate_index_drop_query(
                    bucket=bucket.name)
                if index_info not in self.memory_drop_list:
                    self.memory_drop_list.append(index_info)
                    drop_index_tasks.append(
                        self.async_drop_index(bucket.name, query_definition))
        return drop_index_tasks

    def drop_index(self, bucket, query_definition, verify_drop=True):
        try:
            self.query = query_definition.generate_index_drop_query(
                bucket=bucket,
                use_gsi_for_secondary=self.use_gsi_for_secondary,
                use_gsi_for_primary=self.use_gsi_for_primary)
            actual_result = self.n1ql_helper.run_cbq_query(
                query=self.query, server=self.n1ql_node)
            if verify_drop:
                check = self.n1ql_helper._is_index_in_list(
                    bucket, query_definition.index_name, server=self.n1ql_node)
                self.assertFalse(
                    check, "index {0} failed to be deleted".format(
                        query_definition.index_name))
        except Exception, ex:
            self.log.info(ex)
            query = "select * from system:indexes"
            actual_result = self.n1ql_helper.run_cbq_query(
                query=query, server=self.n1ql_node)
            self.log.info(actual_result)
Exemple #37
0
class CliBaseTest(BaseTestCase):
    vbucketId = 0

    def setUp(self):
        self.times_teardown_called = 1
        super(CliBaseTest, self).setUp()
        self.r = random.Random()
        self.vbucket_count = 1024
        self.cluster = Cluster()
        self.clusters_dic = self.input.clusters
        if self.clusters_dic:
            if len(self.clusters_dic) > 1:
                self.dest_nodes = self.clusters_dic[1]
                self.dest_master = self.dest_nodes[0]
            elif len(self.clusters_dic) == 1:
                self.log.error("=== need 2 cluster to setup xdcr in ini file ===")
        else:
            self.log.error("**** Cluster config is setup in ini file. ****")
        self.shell = RemoteMachineShellConnection(self.master)
        if not self.skip_init_check_cbserver:
            self.rest = RestConnection(self.master)
            self.cb_version = self.rest.get_nodes_version()
            """ cli output message """
            self.cli_bucket_create_msg = "SUCCESS: Bucket created"
            self.cli_rebalance_msg = "SUCCESS: Rebalance complete"
            if self.cb_version[:3] == "4.6":
                self.cli_bucket_create_msg = "SUCCESS: bucket-create"
                self.cli_rebalance_msg = "SUCCESS: rebalanced cluster"
        self.import_back = self.input.param("import_back", False)
        if self.import_back:
            if len(self.servers) < 3:
                self.fail("This test needs minimum of 3 vms to run ")
        self.test_type = self.input.param("test_type", "import")
        self.import_file = self.input.param("import_file", None)
        self.imex_type = self.input.param("imex_type", "json")
        self.format_type = self.input.param("format_type", "lines")
        self.import_method = self.input.param("import_method", "file://")
        self.force_failover = self.input.param("force_failover", False)
        self.json_invalid_errors = self.input.param("json-invalid-errors", None)
        self.field_separator = self.input.param("field-separator", "comma")
        self.key_gen = self.input.param("key-gen", True)
        self.skip_docs = self.input.param("skip-docs", None)
        self.limit_docs = self.input.param("limit-docs", None)
        self.limit_rows = self.input.param("limit-rows", None)
        self.skip_rows = self.input.param("skip-rows", None)
        self.omit_empty = self.input.param("omit-empty", None)
        self.infer_types = self.input.param("infer-types", None)
        self.fx_generator = self.input.param("fx-generator", None)
        self.fx_gen_start = self.input.param("fx-gen-start", None)
        self.secure_conn = self.input.param("secure-conn", False)
        self.no_cacert = self.input.param("no-cacert", False)
        self.no_ssl_verify = self.input.param("no-ssl-verify", False)
        self.verify_data = self.input.param("verify-data", False)
        self.field_substitutions = self.input.param("field-substitutions", None)
        self.check_preload_keys = self.input.param("check-preload-keys", True)
        self.debug_logs = self.input.param("debug-logs", False)
        self.should_fail = self.input.param("should-fail", False)
        info = self.shell.extract_remote_info()
        self.os_version = info.distribution_version.lower()
        self.deliverable_type = info.deliverable_type.lower()
        type = info.type.lower()
        self.excluded_commands = self.input.param("excluded_commands", None)
        self.os = 'linux'
        self.full_v = None
        self.short_v = None
        self.build_number = None
        cmd =  'curl -g {0}:8091/diag/eval -u {1}:{2} '.format(self.master.ip,
                                                              self.master.rest_username,
                                                              self.master.rest_password)
        cmd += '-d "path_config:component_path(bin)."'
        bin_path  = subprocess.check_output(cmd, shell=True)
        if "bin" not in bin_path:
            self.fail("Check if cb server install on %s" % self.master.ip)
        else:
            self.cli_command_path = bin_path.replace('"','') + "/"
        self.root_path = LINUX_ROOT_PATH
        self.tmp_path = "/tmp/"
        self.tmp_path_raw = "/tmp/"
        self.cmd_backup_path = LINUX_BACKUP_PATH
        self.backup_path = LINUX_BACKUP_PATH
        self.cmd_ext = ""
        self.src_file = ""
        self.des_file = ""
        self.sample_files_path = LINUX_COUCHBASE_SAMPLE_PATH
        self.log_path = LINUX_COUCHBASE_LOGS_PATH
        self.base_cb_path = LINUX_CB_PATH
        """ non root path """
        if self.nonroot:
            self.sample_files_path = "/home/%s%s" % (self.master.ssh_username,
                                                     LINUX_COUCHBASE_SAMPLE_PATH)
            self.log_path = "/home/%s%s" % (self.master.ssh_username,
                                            LINUX_COUCHBASE_LOGS_PATH)
            self.base_cb_path = "/home/%s%s" % (self.master.ssh_username,
                                                LINUX_CB_PATH)
            self.root_path = "/home/%s/" % self.master.ssh_username
        if type == 'windows':
            self.os = 'windows'
            self.cmd_ext = ".exe"
            self.root_path = WIN_ROOT_PATH
            self.tmp_path = WIN_TMP_PATH
            self.tmp_path_raw = WIN_TMP_PATH_RAW
            self.cmd_backup_path = WIN_BACKUP_C_PATH
            self.backup_path = WIN_BACKUP_PATH
            self.sample_files_path = WIN_COUCHBASE_SAMPLE_PATH_C
            self.log_path = WIN_COUCHBASE_LOGS_PATH
            win_format = "C:/Program Files"
            cygwin_format = "/cygdrive/c/Program\ Files"
            if win_format in self.cli_command_path:
                self.cli_command_path = self.cli_command_path.replace(win_format,
                                                                      cygwin_format)
            self.base_cb_path = WIN_CB_PATH
        if info.distribution_type.lower() == 'mac':
            self.os = 'mac'
        self.full_v, self.short_v, self.build_number = self.shell.get_cbversion(type)
        self.couchbase_usrname = "%s" % (self.input.membase_settings.rest_username)
        self.couchbase_password = "******" % (self.input.membase_settings.rest_password)
        self.cb_login_info = "%s:%s" % (self.couchbase_usrname,
                                        self.couchbase_password)
        self.path_type = self.input.param("path_type", None)
        if self.path_type is None:
            self.log.info("Test command with absolute path ")
        elif self.path_type == "local":
            self.log.info("Test command at %s dir " % self.cli_command_path)
            self.cli_command_path = "cd %s; ./" % self.cli_command_path
        self.cli_command = self.input.param("cli_command", None)
        self.command_options = self.input.param("command_options", None)
        if self.command_options is not None:
            self.command_options = self.command_options.split(";")
        if str(self.__class__).find('couchbase_clitest.CouchbaseCliTest') == -1:
            if len(self.servers) > 1 and int(self.nodes_init) == 1:
                servers_in = [self.servers[i + 1] for i in range(self.num_servers - 1)]
                self.cluster.rebalance(self.servers[:1], servers_in, [])
        for bucket in self.buckets:
            testuser = [{'id': bucket.name, 'name': bucket.name, 'password': '******'}]
            rolelist = [{'id': bucket.name, 'name': bucket.name, 'roles': 'admin'}]
            self.add_built_in_server_user(testuser=testuser, rolelist=rolelist)


    def tearDown(self):
        if not self.input.param("skip_cleanup", True):
            if self.times_teardown_called > 1 :
                self.shell.disconnect()
        if self.input.param("skip_cleanup", True):
            if self.case_number > 1 or self.times_teardown_called > 1:
                self.shell.disconnect()
        self.times_teardown_called += 1
        serverInfo = self.servers[0]
        rest = RestConnection(serverInfo)
        zones = rest.get_zone_names()
        for zone in zones:
            if zone != "Group 1":
                rest.delete_zone(zone)
        self.clusters_dic = self.input.clusters
        if self.clusters_dic:
            if len(self.clusters_dic) > 1:
                self.dest_nodes = self.clusters_dic[1]
                self.dest_master = self.dest_nodes[0]
                if self.dest_nodes and len(self.dest_nodes) > 1:
                    self.log.info("======== clean up destination cluster =======")
                    rest = RestConnection(self.dest_nodes[0])
                    rest.remove_all_remote_clusters()
                    rest.remove_all_replications()
                    BucketOperationHelper.delete_all_buckets_or_assert(self.dest_nodes, self)
                    ClusterOperationHelper.cleanup_cluster(self.dest_nodes)
            elif len(self.clusters_dic) == 1:
                self.log.error("=== need 2 cluster to setup xdcr in ini file ===")
        else:
            self.log.info("**** If run xdcr test, need cluster config is setup in ini file. ****")
        super(CliBaseTest, self).tearDown()


    """ in sherlock, there is an extra value called runCmd in the 1st element """
    def del_runCmd_value(self, output):
        if "runCmd" in output[0]:
            output = output[1:]
        return output

    def verifyCommandOutput(self, output, expect_error, message):
        """Inspects each line of the output and checks to see if the expected error was found

        Options:
        output - A list of output lines
        expect_error - Whether or not the command should have succeeded or failed
        message - The success or error message

        Returns a boolean indicating whether or not the error/success message was found in the output
        """
        if expect_error:
            for line in output:
                if line == "ERROR: " + message:
                    return True
            log.info("Did not receive expected error message `ERROR: %s`", message)
            return False
        else:
            for line in output:
                if line == "SUCCESS: " + message:
                    return True
            log.info("Did not receive expected success message `SUCCESS: %s`", message)
            return False

    def verifyWarningOutput(self, output, message):
        for line in output:
            if line == "WARNING: " + message:
                return True
        log.info("Did not receive expected error message `WARNING: %s`", message)
        return False

    def verifyServices(self, server, expected_services):
        """Verifies that the services on a given node match the expected service

            Options:
            server - A TestInputServer object of the server to connect to
            expected_services - A comma separated list of services

            Returns a boolean corresponding to whether or not the expected services
            are available on the server.
        """
        rest = RestConnection(server)
        hostname = "%s:%s" % (server.ip, server.port)
        expected_services = expected_services.replace("data", "kv")
        expected_services = expected_services.replace("query", "n1ql")
        expected_services = expected_services.split(",")

        nodes_services = rest.get_nodes_services()
        for node, services in nodes_services.iteritems():
            if node.encode('ascii') == hostname:
                if len(services) != len(expected_services):
                    log.info("Services on %s do not match expected services (%s vs. %s)",
                             hostname, services, expected_services)
                    return False
                for service in services:
                    if service.encode("ascii") not in expected_services:
                        log.info("Services on %s do not match expected services (%s vs. %s)",
                                 hostname, services, expected_services)
                        return False
                return True

        log.info("Services on %s not found, the server may not exist", hostname)
        return False

    def verifyRamQuotas(self, server, data, index, fts):
        """Verifies that the RAM quotas for each service are set properly

        Options:
        server - A TestInputServer object of the server to connect to
        data - An int containing the data service RAM quota, None will skip the check
        index - An int containing the index service RAM quota, None will skip the check
        fts - An int containing the FTS service RAM quota, None will skip the check

        Returns a boolean corresponding to whether or not the RAM quotas were set properly
        """
        rest = RestConnection(server)
        settings = rest.get_pools_default()
        if data:
            if "memoryQuota" not in settings:
                log.info("Unable to get data service ram quota")
                return False
            if int(settings["memoryQuota"]) != int(data):
                log.info("Data service memory quota does not match (%d vs %d)",
                         int(settings["memoryQuota"]), int(data))
                return False

        if index:
            if "indexMemoryQuota" not in settings:
                log.info("Unable to get index service ram quota")
                return False
            if int(settings["indexMemoryQuota"]) != int(index):
                log.info(
                    "Index service memory quota does not match (%d vs %d)",
                    int(settings["indexMemoryQuota"]), int(index))
                return False

        if fts:
            if "ftsMemoryQuota" not in settings:
                log.info("Unable to get fts service ram quota")
                return False
            if int(settings["ftsMemoryQuota"]) != int(fts):
                log.info("FTS service memory quota does not match (%d vs %d)",
                         int(settings["ftsMemoryQuota"]), int(fts))
                return False

        return True

    def verifyBucketSettings(self, server, bucket_name, bucket_type, memory_quota,
                             eviction_policy, replica_count, enable_index_replica,
                             priority, enable_flush):
        rest = RestConnection(server)
        result = rest.get_bucket_json(bucket_name)

        if bucket_type == "couchbase":
            bucket_type = "membase"

        if bucket_type is not None and bucket_type != result["bucketType"]:
            log.info("Memory quota does not match (%s vs %s)", bucket_type,
                     result["bucketType"])
            return False

        quota = result["quota"]["rawRAM"] / 1024 / 1024
        if memory_quota is not None and memory_quota != quota:
            log.info("Bucket quota does not match (%s vs %s)", memory_quota,
                     quota)
            return False

        if eviction_policy is not None and eviction_policy != result[
            "evictionPolicy"]:
            log.info("Eviction policy does not match (%s vs %s)",
                     eviction_policy, result["evictionPolicy"])
            return False

        if replica_count is not None and replica_count != result[
            "replicaNumber"]:
            log.info("Replica count does not match (%s vs %s)", replica_count,
                     result["replicaNumber"])
            return False

        if enable_index_replica == 1:
            enable_index_replica = True
        elif enable_index_replica == 0:
            enable_index_replica = False

        if enable_index_replica is not None and enable_index_replica != result[
            "replicaIndex"]:
            log.info("Replica index enabled does not match (%s vs %s)",
                     enable_index_replica, result["replicaIndex"])
            return False

        if priority == "high":
            priority = 8
        elif priority == "low":
            priority = 3

        if priority is not None and priority != result["threadsNumber"]:
            log.info("Bucket priority does not match (%s vs %s)", priority,
                     result["threadsNumber"])
            return False

        if enable_flush is not None:
            if enable_flush == 1 and "flush" not in result["controllers"]:
                log.info("Bucket flush is not enabled, but it should be")
                return False
            elif enable_flush == 0 and "flush" in result["controllers"]:
                log.info("Bucket flush is not enabled, but it should be")
                return False

        return True

    def verifyContainsBucket(self, server, name):
        rest = RestConnection(server)
        buckets = rest.get_buckets()

        for bucket in buckets:
            if bucket.name == name:
                return True
        return False

    def verifyClusterName(self, server, name):
        rest = RestConnection(server)
        settings = rest.get_pools_default("waitChange=0")

        if name is None:
            name = ""
        if name == "empty":
            name = " "

        if "clusterName" not in settings:
            log.info("Unable to get cluster name from server")
            return False
        if settings["clusterName"] != name:
            log.info("Cluster name does not match (%s vs %s)",
                     settings["clusterName"], name)
            return False

        return True

    def isClusterInitialized(self, server):
        """Checks whether or not the server is initialized

        Options:
        server - A TestInputServer object of the server to connect to

        Checks to see whether or not the default pool was created in order to
        determine whether or no the server was initialized. Returns a boolean value
        to indicate initialization.
        """
        rest = RestConnection(server)
        settings = rest.get_pools_info()
        if "pools" in settings and len(settings["pools"]) > 0:
            return True

        return False

    def verifyNotificationsEnabled(self, server):
        rest = RestConnection(server)
        enabled = rest.get_notifications()
        if enabled:
            return True
        return False

    def verifyIndexSettings(self, server, max_rollbacks, stable_snap_interval,
                            mem_snap_interval,
                            storage_mode, threads, log_level):
        rest = RestConnection(server)
        settings = rest.get_global_index_settings()

        if storage_mode == "default":
            storage_mode = "plasma"
        elif storage_mode == "memopt":
            storage_mode = "memory_optimized"

        if max_rollbacks and str(settings["maxRollbackPoints"]) != str(
                max_rollbacks):
            log.info("Max rollbacks does not match (%s vs. %s)",
                     str(settings["maxRollbackPoints"]), str(max_rollbacks))
            return False
        if stable_snap_interval and str(
                settings["stableSnapshotInterval"]) != str(
                stable_snap_interval):
            log.info("Stable snapshot interval does not match (%s vs. %s)",
                     str(settings["stableSnapshotInterval"]),
                     str(stable_snap_interval))
            return False
        if mem_snap_interval and str(
                settings["memorySnapshotInterval"]) != str(mem_snap_interval):
            log.info("Memory snapshot interval does not match (%s vs. %s)",
                     str(settings["memorySnapshotInterval"]),
                     str(mem_snap_interval))
            return False
        if storage_mode and str(settings["storageMode"]) != str(storage_mode):
            log.info("Storage mode does not match (%s vs. %s)",
                     str(settings["storageMode"]), str(storage_mode))
            return False
        if threads and str(settings["indexerThreads"]) != str(threads):
            log.info("Threads does not match (%s vs. %s)",
                     str(settings["indexerThreads"]), str(threads))
            return False
        if log_level and str(settings["logLevel"]) != str(log_level):
            log.info("Log level does not match (%s vs. %s)",
                     str(settings["logLevel"]), str(log_level))
            return False

        return True

    def verifyAutofailoverSettings(self, server, enabled, timeout):
        rest = RestConnection(server)
        settings = rest.get_autofailover_settings()

        if enabled and not ((str(enabled) == "1" and settings.enabled) or (
                str(enabled) == "0" and not settings.enabled)):
            log.info("Enabled does not match (%s vs. %s)", str(enabled),
                     str(settings.enabled))
            return False
        if timeout and str(settings.timeout) != str(timeout):
            log.info("Timeout does not match (%s vs. %s)", str(timeout),
                     str(settings.timeout))
            return False

        return True

    def verifyAutoreprovisionSettings(self, server, enabled, max_nodes):
        rest = RestConnection(server)
        settings = rest.get_autoreprovision_settings()

        if enabled and not ((str(enabled) == "1" and settings.enabled) or (
                str(enabled) == "0" and not settings.enabled)):
            log.info("Enabled does not match (%s vs. %s)", str(max_nodes),
                     str(settings.enabled))
            return False
        if max_nodes and str(settings.max_nodes) != str(max_nodes):
            log.info("max_nodes does not match (%s vs. %s)", str(max_nodes),
                     str(settings.max_nodes))
            return False

        return True

    def verifyAuditSettings(self, server, enabled, log_path, rotate_interval):
        rest = RestConnection(server)
        settings = rest.getAuditSettings()

        if enabled and not (
            (str(enabled) == "1" and settings["auditdEnabled"]) or (
                str(enabled) == "0" and not settings["auditdEnabled"])):
            log.info("Enabled does not match (%s vs. %s)", str(enabled),
                     str(settings["auditdEnabled"]))
            return False
        if log_path and str(str(settings["logPath"])) != str(log_path):
            log.info("Log path does not match (%s vs. %s)", str(log_path),
                     str(settings["logPath"]))
            return False

        if rotate_interval and str(str(settings["rotateInterval"])) != str(
                rotate_interval):
            log.info("Rotate interval does not match (%s vs. %s)",
                     str(rotate_interval), str(settings["rotateInterval"]))
            return False

        return True

    def verifyPendingServer(self, server, server_to_add, group_name, services):
        rest = RestConnection(server)
        settings = rest.get_all_zones_info()
        if not settings or "groups" not in settings:
            log.info("Group settings payload appears to be invalid")
            return False

        expected_services = services.replace("data", "kv")
        expected_services = expected_services.replace("query", "n1ql")
        expected_services = expected_services.split(",")

        for group in settings["groups"]:
            for node in group["nodes"]:
                if node["hostname"] == server_to_add:
                    if node["clusterMembership"] != "inactiveAdded":
                        log.info("Node `%s` not in pending status",
                                 server_to_add)
                        return False

                    if group["name"] != group_name:
                        log.info("Node `%s` not in correct group (%s vs %s)",
                                 node["hostname"], group_name,
                                 group["name"])
                        return False

                    if len(node["services"]) != len(expected_services):
                        log.info("Services do not match on %s (%s vs %s) ",
                                 node["hostname"], services,
                                 ",".join(node["services"]))
                        return False

                    for service in node["services"]:
                        if service not in expected_services:
                            log.info("Services do not match on %s (%s vs %s) ",
                                     node["hostname"], services,
                                     ",".join(node["services"]))
                            return False
                    return True

        log.info("Node `%s` not found in nodes list", server_to_add)
        return False

    def verifyPendingServerDoesNotExist(self, server, server_to_add):
        rest = RestConnection(server)
        settings = rest.get_all_zones_info()
        if not settings or "groups" not in settings:
            log.info("Group settings payload appears to be invalid")
            return False

        for group in settings["groups"]:
            for node in group["nodes"]:
                if node["hostname"] == server_to_add:
                    return False

        log.info("Node `%s` not found in nodes list", server_to_add)
        return True

    def verifyActiveServers(self, server, expected_num_servers):
        return self._verifyServersByStatus(server, expected_num_servers,
                                           "active")

    def verifyFailedServers(self, server, expected_num_servers):
        return self._verifyServersByStatus(server, expected_num_servers,
                                           "inactiveFailed")

    def _verifyServersByStatus(self, server, expected_num_servers, status):
        rest = RestConnection(server)
        settings = rest.get_pools_default()

        count = 0
        for node in settings["nodes"]:
            if node["clusterMembership"] == status:
                count += 1

        return count == expected_num_servers

    def verifyRecoveryType(self, server, recovery_servers, recovery_type):
        rest = RestConnection(server)
        settings = rest.get_all_zones_info()
        if not settings or "groups" not in settings:
            log.info("Group settings payload appears to be invalid")
            return False

        if not recovery_servers:
            return True

        num_found = 0
        recovery_servers = recovery_servers.split(",")
        for group in settings["groups"]:
            for node in group["nodes"]:
                for rs in recovery_servers:
                    if node["hostname"] == rs:
                        if node["recoveryType"] != recovery_type:
                            log.info(
                                "Node %s doesn't contain recovery type %s ",
                                rs, recovery_type)
                            return False
                        else:
                            num_found = num_found + 1

        if num_found == len(recovery_servers):
            return True

        log.info("Node `%s` not found in nodes list",
                 ",".join(recovery_servers))
        return False

    def verifyUserRoles(self, server, username, roles):
        rest = RestConnection(server)
        status, content, header = rbacmain(server)._retrieve_user_roles()
        content = json.loads(content)
        temp = rbacmain()._parse_get_user_response(content, username, username, roles)
        return temp

    def verifyLdapSettings(self, server, admins, ro_admins, default, enabled):
        rest = RestConnection(server)
        settings = rest.ldapRestOperationGetResponse()

        if admins is None:
            admins = []
        else:
            admins = admins.split(",")

        if ro_admins is None:
            ro_admins = []
        else:
            ro_admins = ro_admins.split(",")

        if str(enabled) == "0":
            admins = []
            ro_admins = []

        if default == "admins" and str(enabled) == "1":
            if settings["admins"] != "asterisk":
                log.info("Admins don't match (%s vs asterisk)",
                         settings["admins"])
                return False
        elif not self._list_compare(settings["admins"], admins):
            log.info("Admins don't match (%s vs %s)", settings["admins"],
                     admins)
            return False

        if default == "roadmins" and str(enabled) == "1":
            if settings["roAdmins"] != "asterisk":
                log.info("Read only admins don't match (%s vs asterisk)",
                         settings["roAdmins"])
                return False
        elif not self._list_compare(settings["roAdmins"], ro_admins):
            log.info("Read only admins don't match (%s vs %s)",
                     settings["roAdmins"], ro_admins)
            return False

        return True

    def verifyAlertSettings(self, server, enabled, email_recipients,
                            email_sender, email_username, email_password,
                            email_host,
                            email_port, encrypted, alert_af_node,
                            alert_af_max_reached, alert_af_node_down,
                            alert_af_small,
                            alert_af_disable, alert_ip_changed,
                            alert_disk_space, alert_meta_overhead,
                            alert_meta_oom,
                            alert_write_failed, alert_audit_dropped):
        rest = RestConnection(server)
        settings = rest.get_alerts_settings()
        print settings

        if not enabled:
            if not settings["enabled"]:
                return True
            else:
                log.info("Alerts should be disabled")
                return False

        if encrypted is None or encrypted == "0":
            encrypted = False
        else:
            encrypted = True

        if email_recipients is not None and not self._list_compare(
                email_recipients.split(","), settings["recipients"]):
            log.info("Email recipients don't match (%s vs %s)",
                     email_recipients.split(","), settings["recipients"])
            return False

        if email_sender is not None and email_sender != settings["sender"]:
            log.info("Email sender does not match (%s vs %s)", email_sender,
                     settings["sender"])
            return False

        if email_username is not None and email_username != \
                settings["emailServer"]["user"]:
            log.info("Email username does not match (%s vs %s)",
                     email_username, settings["emailServer"]["user"])
            return False

        if email_host is not None and email_host != settings["emailServer"][
            "host"]:
            log.info("Email host does not match (%s vs %s)", email_host,
                     settings["emailServer"]["host"])
            return False

        if email_port is not None and email_port != settings["emailServer"][
            "port"]:
            log.info("Email port does not match (%s vs %s)", email_port,
                     settings["emailServer"]["port"])
            return False

        if encrypted is not None and encrypted != settings["emailServer"][
            "encrypt"]:
            log.info("Email encryption does not match (%s vs %s)", encrypted,
                     settings["emailServer"]["encrypt"])
            return False

        alerts = list()
        if alert_af_node:
            alerts.append('auto_failover_node')
        if alert_af_max_reached:
            alerts.append('auto_failover_maximum_reached')
        if alert_af_node_down:
            alerts.append('auto_failover_other_nodes_down')
        if alert_af_small:
            alerts.append('auto_failover_cluster_too_small')
        if alert_af_disable:
            alerts.append('auto_failover_disabled')
        if alert_ip_changed:
            alerts.append('ip')
        if alert_disk_space:
            alerts.append('disk')
        if alert_meta_overhead:
            alerts.append('overhead')
        if alert_meta_oom:
            alerts.append('ep_oom_errors')
        if alert_write_failed:
            alerts.append('ep_item_commit_failed')
        if alert_audit_dropped:
            alerts.append('audit_dropped_events')

        if not self._list_compare(alerts, settings["alerts"]):
            log.info("Alerts don't match (%s vs %s)", alerts,
                     settings["alerts"])
            return False

        return True

    def verify_node_settings(self, server, data_path, index_path, hostname):
        rest = RestConnection(server)
        node_settings = rest.get_nodes_self()

        if data_path != node_settings.storage[0].path:
            log.info("Data path does not match (%s vs %s)", data_path,
                     node_settings.storage[0].path)
            return False
        if index_path != node_settings.storage[0].index_path:
            log.info("Index path does not match (%s vs %s)", index_path,
                     node_settings.storage[0].index_path)
            return False
        if hostname is not None:
            if hostname != node_settings.hostname:
                log.info("Hostname does not match (%s vs %s)", hostname,
                         node_settings.hostname)
                return True
        return True

    def verifyCompactionSettings(self, server, db_frag_perc, db_frag_size,
                                 view_frag_perc, view_frag_size, from_period,
                                 to_period, abort_outside, parallel_compact,
                                 purgeInt):
        rest = RestConnection(server)
        settings = rest.get_auto_compaction_settings()
        ac = settings["autoCompactionSettings"]

        if db_frag_perc is not None and str(db_frag_perc) != str(
                ac["databaseFragmentationThreshold"]["percentage"]):
            log.info("DB frag perc does not match (%s vs %s)",
                     str(db_frag_perc),
                     str(ac["databaseFragmentationThreshold"]["percentage"]))
            return False

        if db_frag_size is not None and str(db_frag_size * 1024 ** 2) != str(
                ac["databaseFragmentationThreshold"]["size"]):
            log.info("DB frag size does not match (%s vs %s)",
                     str(db_frag_size * 1024 ** 2),
                     str(ac["databaseFragmentationThreshold"]["size"]))
            return False

        if view_frag_perc is not None and str(view_frag_perc) != str(
                ac["viewFragmentationThreshold"]["percentage"]):
            log.info("View frag perc does not match (%s vs %s)",
                     str(view_frag_perc),
                     str(ac["viewFragmentationThreshold"]["percentage"]))
            return False

        if view_frag_size is not None and str(
                        view_frag_size * 1024 ** 2) != str(
                ac["viewFragmentationThreshold"]["size"]):
            log.info("View frag size does not match (%s vs %s)",
                     str(view_frag_size * 1024 ** 2),
                     str(ac["viewFragmentationThreshold"]["size"]))
            return False

        print from_period, to_period
        if from_period is not None:
            fromHour, fromMin = from_period.split(":", 1)
            if int(fromHour) != int(ac["allowedTimePeriod"]["fromHour"]):
                log.info("From hour does not match (%s vs %s)", str(fromHour),
                         str(ac["allowedTimePeriod"]["fromHour"]))
                return False
            if int(fromMin) != int(ac["allowedTimePeriod"]["fromMinute"]):
                log.info("From minute does not match (%s vs %s)", str(fromMin),
                         str(ac["allowedTimePeriod"]["fromMinute"]))
                return False

        if to_period is not None:
            toHour, toMin = to_period.split(":", 1)
            if int(toHour) != int(ac["allowedTimePeriod"]["toHour"]):
                log.info("To hour does not match (%s vs %s)", str(toHour),
                         str(ac["allowedTimePeriod"]["toHour"]))
                return False
            if int(toMin) != int(ac["allowedTimePeriod"]["toMinute"]):
                log.info("To minute does not match (%s vs %s)", str(toMin),
                         str(ac["allowedTimePeriod"]["toMinute"]))
                return False

        if str(abort_outside) == "1":
            abort_outside = True
        elif str(abort_outside) == "0":
            abort_outside = False

        if abort_outside is not None and abort_outside != \
                ac["allowedTimePeriod"]["abortOutside"]:
            log.info("Abort outside does not match (%s vs %s)", abort_outside,
                     ac["allowedTimePeriod"]["abortOutside"])
            return False

        if str(parallel_compact) == "1":
            parallel_compact = True
        elif str(parallel_compact) == "0":
            parallel_compact = False

        if parallel_compact is not None and parallel_compact != ac[
            "parallelDBAndViewCompaction"]:
            log.info("Parallel compact does not match (%s vs %s)",
                     str(parallel_compact),
                     str(ac["parallelDBAndViewCompaction"]))
            return False

        if purgeInt is not None and str(purgeInt) != str(
                settings["purgeInterval"]):
            log.info("Purge interval does not match (%s vs %s)", str(purgeInt),
                     str(settings["purgeInterval"]))
            return False

        return True

    def verify_gsi_compact_settings(self, compact_mode, compact_percent,
                                    compact_interval,
                                    from_period, to_period, enable_abort):
        rest = RestConnection(self.master)
        settings = rest.get_auto_compaction_settings()
        ac = settings["autoCompactionSettings"]["indexFragmentationThreshold"]
        cc = settings["autoCompactionSettings"]["indexCircularCompaction"]
        if compact_mode is not None:
            if compact_mode == "append":
                self.log.info("append compactino settings %s " % ac)
                if compact_percent is not None and \
                                compact_percent != ac["percentage"]:
                    raise Exception(
                        "setting percent does not match.  Set: %s vs %s :Actual"
                        % (compact_percent, ac["percentage"]))
            if compact_mode == "circular":
                self.log.info("circular compaction settings %s " % cc)
                if enable_abort and not cc["interval"]["abortOutside"]:
                    raise Exception("setting enable abort failed")
                if compact_interval is not None:
                    if compact_interval != cc["daysOfWeek"]:
                        raise Exception(
                            "Failed to set compaction on %s " % compact_interval)
                    elif from_period is None and int(
                            cc["interval"]["fromHour"]) != 0 and \
                                    int(cc["interval"]["fromMinute"]) != 0:
                        raise Exception(
                            "fromHour and fromMinute should be zero")
                if compact_interval is None:
                    if (from_period != str(cc["interval"][
                                                    "fromHour"]) + ":" + str(
                                cc["interval"]["fromMinute"])) \
                    and (to_period != str(cc["interval"]["toHour"]) + ":" + str(
                                cc["interval"]["toMinute"])):
                        raise Exception(
                            "fromHour and fromMinute do not set correctly")
        return True

    def verifyGroupExists(self, server, name):
        rest = RestConnection(server)
        groups = rest.get_zone_names()
        print groups

        for gname, _ in groups.iteritems():
            if name == gname:
                return True

        return False

    def _list_compare(self, list1, list2):
        if len(list1) != len(list2):
            return False
        for elem1 in list1:
            found = False
            for elem2 in list2:
                if elem1 == elem2:
                    found = True
                    break
            if not found:
                return False
        return True

    def waitForItemCount(self, server, bucket_name, count, timeout=30):
        rest = RestConnection(server)
        for sec in range(timeout):
            items = int(
                rest.get_bucket_json(bucket_name)["basicStats"]["itemCount"])
            if items != count:
                time.sleep(1)
            else:
                return True
        log.info("Waiting for item count to be %d timed out", count)
        return False
Exemple #38
0
class BaseUITestCase(unittest.TestCase):
    # selenium thread

    def _start_selenium(self):
        host = self.machine.ip
        if host in ['localhost', '127.0.0.1']:
            os.system("java -jar %sselenium-server-standalone*.jar -Dwebdriver.chrome.driver=%s > selenium.log 2>&1"
                      % (self.input.ui_conf['selenium_path'], self.input.ui_conf['chrome_path']))
        else:
            self.shell.execute_command('{0}start-selenium.bat > {0}selenium.log 2>&1 &'.format(self.input.ui_conf['selenium_path']))

    def _kill_old_drivers(self):
        if self.shell.extract_remote_info().type.lower() == 'windows':
            self.shell.execute_command('taskkill /F /IM chromedriver.exe')
            self.shell.execute_command('taskkill /F /IM chrome.exe')

    def _wait_for_selenium_is_started(self, timeout=10):
        if self.machine.ip in ['localhost', '127.0.0.1']:
            start_time = time.time()
            while (time.time() - start_time) < timeout:
                log = open("/tmp/selenium.log")
                if log.read().find('Started org.openqa.jetty.jetty.Server') > -1:
                    log.close()
                    if self._is_selenium_running():
                        time.sleep(1)
                        return
                time.sleep(1)
        else:
            time.sleep(timeout)

    def _start_selenium_thread(self):
        self.t = Thread(target=self._start_selenium,
                       name="selenium",
                       args=())
        self.t.start()

    def _is_selenium_running(self):
        host = self.machine.ip
        if host in ['localhost', '127.0.0.1']:
            cmd = 'ps -ef|grep selenium-server'
            output = commands.getstatusoutput(cmd)
            if str(output).find('selenium-server-standalone') > -1:
                return True
        else:
            #cmd = "ssh {0}@{1} 'bash -s' < 'tasklist |grep selenium-server'".format(self.input.servers[0].ssh_username,
            #                                                                        host)
            cmd = 'tasklist |grep java'
            o, r = self.shell.execute_command(cmd)
            #cmd = "ssh {0}@{1} 'bash -s' < 'ps -ef|grep selenium-server'"
            if str(o).find('java') > -1:
                return True
        return False

    def setUp(self):
        try:
            self.log = logger.Logger.get_logger()
            self.input = TestInputSingleton.input
            self.servers = self.input.servers
            self.browser = self.input.ui_conf['browser']
            self.replica  = self.input.param("replica", 1)
            self.case_number = self.input.param("case_number", 0)
            self.cluster = Cluster()
            self.machine = self.input.ui_conf['server']
            self.driver = None
            self.shell = RemoteMachineShellConnection(self.machine)
            #avoid clean up if the previous test has been tear down
            if not self.input.param("skip_cleanup", True) \
                                            or self.case_number == 1:
                self.tearDown()
            self._log_start(self)
            self._kill_old_drivers()
            #thread for selenium server
            if not self._is_selenium_running():
                self.log.info('start selenium')
                self._start_selenium_thread()
                self._wait_for_selenium_is_started()
            self.log.info('start selenium session')
            if self.browser == 'ff':
                self.driver = webdriver.Remote(command_executor='http://{0}:{1}/wd/hub'
                                               .format(self.machine.ip,
                                                       self.machine.port),
                                               desired_capabilities=DesiredCapabilities.FIREFOX)
            elif self.browser == 'chrome':
                self.driver = webdriver.Remote(command_executor='http://{0}:{1}/wd/hub'
                                               .format(self.machine.ip,
                                                       self.machine.port),
                                               desired_capabilities=DesiredCapabilities.CHROME)
            self.log.info('start selenium started')
            self.driver.get("http://{0}:{1}".format(self.servers[0].ip,
                                                    self.servers[0].port))
            self.driver.maximize_window()
        except Exception as ex:
            self.input.test_params["stop-on-failure"] = True
            self.log.error("SETUP WAS FAILED. ALL TESTS WILL BE SKIPPED")
            self.fail(ex)

    @staticmethod
    def _log_start(self):
        try:
            msg = "{0} : {1} started ".format(datetime.datetime.now(),
                                              self._testMethodName)
            RestConnection(self.servers[0]).log_client_error(msg)
        except:
            pass

    @staticmethod
    def _log_finish(self):
        try:
            msg = "{0} : {1} finished ".format(datetime.datetime.now(),
                                               self._testMethodName)
            RestConnection(self.servers[0]).log_client_error(msg)
        except:
            pass

    def tearDown(self):
        try:
            if self.driver:
                path_screen = self.input.ui_conf['screenshots'] or 'logs/screens'
                full_path = '{1}/screen_{0}.png'.format(time.time(), path_screen)
                self.log.info('screenshot is available: %s' % full_path)
                if not os.path.exists(path_screen):
                    os.mkdir(path_screen)
                self.driver.get_screenshot_as_file(os.path.abspath(full_path))
            rest = RestConnection(self.servers[0])
            if rest._rebalance_progress_status() == 'running':
                stopped = rest.stop_rebalance()
                self.assertTrue(stopped, msg="unable to stop rebalance")
            BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self)
            for server in self.servers:
                ClusterOperationHelper.cleanup_cluster([server])
            ClusterOperationHelper.wait_for_ns_servers_or_assert(self.servers, self)
            if self.driver:
                self.driver.close()
        except Exception as e:
            raise e
        finally:
            if self.driver:
                self.shell.disconnect()
            self.cluster.shutdown()
class BucketConfig(BaseTestCase):

    def setUp(self):
        super(BucketConfig, self).setUp()
        self.testcase = '2'
        self.log = logger.Logger.get_logger()
        self.input = TestInputSingleton.input
        self.servers = self.input.servers
        #self.time_synchronization = self.input.param("time_sync", "enabledWithoutDrift")
        self.lww = self.input.param("lww", True)
        self.drift = self.input.param("drift", False)
        self.bucket='bucket-1'
        self.master = self.servers[0]
        self.rest = RestConnection(self.master)
        self.cluster = Cluster()
        self.skip_rebalance = self.input.param("skip_rebalance", False)

        node_ram_ratio = BucketOperationHelper.base_bucket_ratio(self.servers)
        mem_quota = int(self.rest.get_nodes_self().mcdMemoryReserved *
                        node_ram_ratio)

        if not self.skip_rebalance:
            self.rest.init_cluster(self.master.rest_username,
                self.master.rest_password)
            self.rest.init_cluster_memoryQuota(self.master.rest_username,
                self.master.rest_password,
                memoryQuota=mem_quota)
            for server in self.servers:
                ClusterOperationHelper.cleanup_cluster([server])
                ClusterOperationHelper.wait_for_ns_servers_or_assert(
                    [self.master], self.testcase)
            try:
                rebalanced = ClusterOperationHelper.add_and_rebalance(
                    self.servers)

            except Exception as e:
                self.fail(e, 'cluster is not rebalanced')

        self._create_bucket(self.lww, self.drift)

    def tearDown(self):
        super(BucketConfig, self).tearDown()
        return
        if not "skip_cleanup" in TestInputSingleton.input.test_params:
            BucketOperationHelper.delete_all_buckets_or_assert(
                self.servers, self.testcase)
            ClusterOperationHelper.cleanup_cluster(self.servers)
            ClusterOperationHelper.wait_for_ns_servers_or_assert(
                self.servers, self.testcase)

    def test_modify_bucket_params(self):
        try:
            self.log.info("Modifying timeSynchronization value after bucket creation .....")
            self._modify_bucket()
        except Exception as e:
            traceback.print_exc()
            self.fail('[ERROR] Modify testcase failed .., {0}'.format(e))

    def test_restart(self):
        try:
            self.log.info("Restarting the servers ..")
            self._restart_server(self.servers[:])
            self.log.info("Verifying bucket settings after restart ..")
            self._check_config()
        except Exception as e:
            traceback.print_exc()
            self.fail("[ERROR] Check data after restart failed with exception {0}".format(e))

    def test_failover(self):
        num_nodes=1
        self.cluster.failover(self.servers, self.servers[1:num_nodes])
        try:
            self.log.info("Failing over 1 of the servers ..")
            self.cluster.rebalance(self.servers, [], self.servers[1:num_nodes])
            self.log.info("Verifying bucket settings after failover ..")
            self._check_config()
        except Exception as e:
            traceback.print_exc()
            self.fail('[ERROR]Failed to failover .. , {0}'.format(e))

    def test_rebalance_in(self):
        try:
            self.log.info("Rebalancing 1 of the servers ..")
            ClusterOperationHelper.add_and_rebalance(
                self.servers)
            self.log.info("Verifying bucket settings after rebalance ..")
            self._check_config()
        except Exception as e:
            self.fail('[ERROR]Rebalance failed .. , {0}'.format(e))

    def test_backup_same_cluster(self):
        self.shell = RemoteMachineShellConnection(self.master)
        self.buckets = RestConnection(self.master).get_buckets()
        self.couchbase_login_info = "%s:%s" % (self.input.membase_settings.rest_username,
                                               self.input.membase_settings.rest_password)
        self.backup_location = "/tmp/backup"
        self.command_options = self.input.param("command_options", '')
        try:
            shell = RemoteMachineShellConnection(self.master)
            self.shell.execute_cluster_backup(self.couchbase_login_info, self.backup_location, self.command_options)

            time.sleep(5)
            shell.restore_backupFile(self.couchbase_login_info, self.backup_location, [bucket.name for bucket in self.buckets])

        finally:
            self._check_config()

    def test_backup_diff_bucket(self):
        self.shell = RemoteMachineShellConnection(self.master)
        self.buckets = RestConnection(self.master).get_buckets()
        self.couchbase_login_info = "%s:%s" % (self.input.membase_settings.rest_username,
                                               self.input.membase_settings.rest_password)
        self.backup_location = "/tmp/backup"
        self.command_options = self.input.param("command_options", '')
        try:
            shell = RemoteMachineShellConnection(self.master)
            self.shell.execute_cluster_backup(self.couchbase_login_info, self.backup_location, self.command_options)

            time.sleep(5)
            self._create_bucket(lww=False, name="new_bucket")
            self.buckets = RestConnection(self.master).get_buckets()
            shell.restore_backupFile(self.couchbase_login_info, self.backup_location, ["new_bucket"])

        finally:
            self._check_config()

    ''' Helper functions for above testcases
    '''
    #create a bucket if it doesn't exist. The drift parameter is currently unused
    def _create_bucket(self, lww=True, drift=False, name=None):

        if lww:
            self.lww=lww

        if  name:
            self.bucket=name

        helper = RestHelper(self.rest)
        if not helper.bucket_exists(self.bucket):
            node_ram_ratio = BucketOperationHelper.base_bucket_ratio(
                self.servers)
            info = self.rest.get_nodes_self()
            self.rest.create_bucket(bucket=self.bucket,
                ramQuotaMB=512, authType='sasl', lww=self.lww)
            try:
                ready = BucketOperationHelper.wait_for_memcached(self.master,
                    self.bucket)
            except Exception as e:
                self.fail('unable to create bucket')

    # KETAKI tochange this
    def _modify_bucket(self):
        helper = RestHelper(self.rest)
        node_ram_ratio = BucketOperationHelper.base_bucket_ratio(
            self.servers)
        info = self.rest.get_nodes_self()

        status, content = self.rest.change_bucket_props(bucket=self.bucket,
            ramQuotaMB=512, authType='sasl', timeSynchronization='enabledWithOutDrift')
        if re.search('TimeSyncronization not allowed in update bucket', content):
            self.log.info('[PASS]Expected modify bucket to disallow Time Synchronization.')
        else:
            self.fail('[ERROR] Not expected to allow modify bucket for Time Synchronization')

    def _restart_server(self, servers):
        for server in servers:
            shell = RemoteMachineShellConnection(server)
            shell.stop_couchbase()
            time.sleep(10)
            shell.start_couchbase()
            shell.disconnect()
        ClusterOperationHelper.wait_for_ns_servers_or_assert(servers, self, wait_if_warmup=True)

    # REBOOT
    def _reboot_server(self):
        try:
            for server in self.servers[:]:
                shell = RemoteMachineShellConnection(server)
                if shell.extract_remote_info().type.lower() == 'windows':
                    o, r = shell.execute_command("shutdown -r -f -t 0")
                    shell.log_command_output(o, r)
                    shell.disconnect()
                    self.log.info("Node {0} is being stopped".format(server.ip))
                elif shell.extract_remote_info().type.lower() == 'linux':
                    o, r = shell.execute_command("reboot")
                    shell.log_command_output(o, r)
                    shell.disconnect()
                    self.log.info("Node {0} is being stopped".format(server.ip))

                    ClusterOperationHelper.wait_for_ns_servers_or_assert([server], self, wait_if_warmup=True)
                    shell = RemoteMachineShellConnection(server)
                    command = "/sbin/iptables -F"
                    o, r = shell.execute_command(command)
                    shell.log_command_output(o, r)
                    shell.disconnect()
                    self.log.info("Node {0} backup".format(server.ip))
        finally:
            self.log.info("Warming-up servers ..")
            ClusterOperationHelper.wait_for_ns_servers_or_assert(self.servers, self, wait_if_warmup=True)



    def _check_config(self):
        rc = self.rest.get_bucket_json(self.bucket)
        if 'conflictResolution' in rc:
            conflictResolution  = self.rest.get_bucket_json(self.bucket)['conflictResolutionType']
            self.assertTrue(conflictResolution == 'lww', 'Expected conflict resolution of lww but got {0}'.format(conflictResolution))


        """ drift is disabled in 4.6, commenting out for now as it may come back later
Exemple #40
0
 def setUp(self):
     self.times_teardown_called = 1
     super(CliBaseTest, self).setUp()
     self.r = random.Random()
     self.vbucket_count = 1024
     self.cluster = Cluster()
     self.clusters_dic = self.input.clusters
     if self.clusters_dic:
         if len(self.clusters_dic) > 1:
             self.dest_nodes = self.clusters_dic[1]
             self.dest_master = self.dest_nodes[0]
         elif len(self.clusters_dic) == 1:
             self.log.error("=== need 2 cluster to setup xdcr in ini file ===")
     else:
         self.log.error("**** Cluster config is setup in ini file. ****")
     self.shell = RemoteMachineShellConnection(self.master)
     if not self.skip_init_check_cbserver:
         self.rest = RestConnection(self.master)
         self.cb_version = self.rest.get_nodes_version()
         """ cli output message """
         self.cli_bucket_create_msg = "SUCCESS: Bucket created"
         self.cli_rebalance_msg = "SUCCESS: Rebalance complete"
         if self.cb_version[:3] == "4.6":
             self.cli_bucket_create_msg = "SUCCESS: bucket-create"
             self.cli_rebalance_msg = "SUCCESS: rebalanced cluster"
     self.import_back = self.input.param("import_back", False)
     if self.import_back:
         if len(self.servers) < 3:
             self.fail("This test needs minimum of 3 vms to run ")
     self.test_type = self.input.param("test_type", "import")
     self.import_file = self.input.param("import_file", None)
     self.imex_type = self.input.param("imex_type", "json")
     self.format_type = self.input.param("format_type", "lines")
     self.import_method = self.input.param("import_method", "file://")
     self.force_failover = self.input.param("force_failover", False)
     self.json_invalid_errors = self.input.param("json-invalid-errors", None)
     self.field_separator = self.input.param("field-separator", "comma")
     self.key_gen = self.input.param("key-gen", True)
     self.skip_docs = self.input.param("skip-docs", None)
     self.limit_docs = self.input.param("limit-docs", None)
     self.limit_rows = self.input.param("limit-rows", None)
     self.skip_rows = self.input.param("skip-rows", None)
     self.omit_empty = self.input.param("omit-empty", None)
     self.infer_types = self.input.param("infer-types", None)
     self.fx_generator = self.input.param("fx-generator", None)
     self.fx_gen_start = self.input.param("fx-gen-start", None)
     self.secure_conn = self.input.param("secure-conn", False)
     self.no_cacert = self.input.param("no-cacert", False)
     self.no_ssl_verify = self.input.param("no-ssl-verify", False)
     self.verify_data = self.input.param("verify-data", False)
     self.field_substitutions = self.input.param("field-substitutions", None)
     self.check_preload_keys = self.input.param("check-preload-keys", True)
     self.debug_logs = self.input.param("debug-logs", False)
     self.should_fail = self.input.param("should-fail", False)
     info = self.shell.extract_remote_info()
     self.os_version = info.distribution_version.lower()
     type = info.type.lower()
     self.excluded_commands = self.input.param("excluded_commands", None)
     self.os = 'linux'
     self.full_v = None
     self.short_v = None
     self.build_number = None
     cmd =  'curl -g %s:8091/diag/eval -u Administrator:password ' % self.master.ip
     cmd += '-d "path_config:component_path(bin)."'
     bin_path  = subprocess.check_output(cmd, shell=True)
     if "bin" not in bin_path:
         self.fail("Check if cb server install on %s" % self.master.ip)
     else:
         self.cli_command_path = bin_path.replace('"','') + "/"
     self.root_path = LINUX_ROOT_PATH
     self.tmp_path = "/tmp/"
     self.tmp_path_raw = "/tmp/"
     self.cmd_backup_path = LINUX_BACKUP_PATH
     self.backup_path = LINUX_BACKUP_PATH
     self.cmd_ext = ""
     self.src_file = ""
     self.des_file = ""
     self.sample_files_path = LINUX_COUCHBASE_SAMPLE_PATH
     self.log_path = LINUX_COUCHBASE_LOGS_PATH
     self.base_cb_path = LINUX_CB_PATH
     """ non root path """
     if self.nonroot:
         self.sample_files_path = "/home/%s%s" % (self.master.ssh_username,
                                                  LINUX_COUCHBASE_SAMPLE_PATH)
         self.log_path = "/home/%s%s" % (self.master.ssh_username,
                                         LINUX_COUCHBASE_LOGS_PATH)
         self.base_cb_path = "/home/%s%s" % (self.master.ssh_username,
                                             LINUX_CB_PATH)
         self.root_path = "/home/%s/" % self.master.ssh_username
     if type == 'windows':
         self.os = 'windows'
         self.cmd_ext = ".exe"
         self.root_path = WIN_ROOT_PATH
         self.tmp_path = WIN_TMP_PATH
         self.tmp_path_raw = WIN_TMP_PATH_RAW
         self.cmd_backup_path = WIN_BACKUP_C_PATH
         self.backup_path = WIN_BACKUP_PATH
         self.sample_files_path = WIN_COUCHBASE_SAMPLE_PATH_C
         self.log_path = WIN_COUCHBASE_LOGS_PATH
         win_format = "C:/Program Files"
         cygwin_format = "/cygdrive/c/Program\ Files"
         if win_format in self.cli_command_path:
             self.cli_command_path = self.cli_command_path.replace(win_format,
                                                                   cygwin_format)
     if info.distribution_type.lower() == 'mac':
         self.os = 'mac'
     self.full_v, self.short_v, self.build_number = self.shell.get_cbversion(type)
     self.couchbase_usrname = "%s" % (self.input.membase_settings.rest_username)
     self.couchbase_password = "******" % (self.input.membase_settings.rest_password)
     self.cb_login_info = "%s:%s" % (self.couchbase_usrname,
                                     self.couchbase_password)
     self.path_type = self.input.param("path_type", None)
     if self.path_type is None:
         self.log.info("Test command with absolute path ")
     elif self.path_type == "local":
         self.log.info("Test command at %s dir " % self.cli_command_path)
         self.cli_command_path = "cd %s; ./" % self.cli_command_path
     self.cli_command = self.input.param("cli_command", None)
     self.command_options = self.input.param("command_options", None)
     if self.command_options is not None:
         self.command_options = self.command_options.split(";")
     if str(self.__class__).find('couchbase_clitest.CouchbaseCliTest') == -1:
         if len(self.servers) > 1 and int(self.nodes_init) == 1:
             servers_in = [self.servers[i + 1] for i in range(self.num_servers - 1)]
             self.cluster.rebalance(self.servers[:1], servers_in, [])
     for bucket in self.buckets:
         testuser = [{'id': bucket.name, 'name': bucket.name, 'password': '******'}]
         rolelist = [{'id': bucket.name, 'name': bucket.name, 'roles': 'admin'}]
         self.add_built_in_server_user(testuser=testuser, rolelist=rolelist)
class compression(XDCRNewBaseTest):
    def setUp(self):
        super(compression, self).setUp()
        self.src_cluster = self.get_cb_cluster_by_name('C1')
        self.src_master = self.src_cluster.get_master_node()
        self.dest_cluster = self.get_cb_cluster_by_name('C2')
        self.dest_master = self.dest_cluster.get_master_node()
        self.chain_length = self._input.param("chain_length", 2)
        self.topology = self._input.param("ctopology", "chain")
        if self.chain_length > 2:
            self.c3_cluster = self.get_cb_cluster_by_name('C3')
            self.c3_master = self.c3_cluster.get_master_node()
        self.cluster = Cluster()

    def tearDown(self):
        super(compression, self).tearDown()

    def _set_compression_type(self, cluster, bucket_name, compression_type="None"):
        repls = cluster.get_remote_clusters()[0].get_replications()
        for repl in repls:
            if bucket_name in str(repl):
                repl_id = repl.get_repl_id()
        shell = RemoteMachineShellConnection(cluster.get_master_node())
        repl_id = str(repl_id).replace('/','%2F')
        base_url = "http://" + cluster.get_master_node().ip + ":8091/settings/replications/" + repl_id
        command = "curl -X POST -u Administrator:password " + base_url + " -d compressionType=" + str(compression_type)
        output, error = shell.execute_command(command)
        shell.log_command_output(output, error)
        shell.disconnect()
        return output, error

    def _verify_compression(self, cluster, compr_bucket_name="", uncompr_bucket_name="",
                            compression_type="None", repl_time=0):
        repls = cluster.get_remote_clusters()[0].get_replications()
        for repl in repls:
            if compr_bucket_name in str(repl):
                compr_repl_id = repl.get_repl_id()
            elif uncompr_bucket_name in str(repl):
                uncompr_repl_id = repl.get_repl_id()

        compr_repl_id = str(compr_repl_id).replace('/', '%2F')
        uncompr_repl_id = str(uncompr_repl_id).replace('/', '%2F')

        base_url = "http://" + cluster.get_master_node().ip + ":8091/settings/replications/" + compr_repl_id
        shell = RemoteMachineShellConnection(cluster.get_master_node())
        command = "curl -u Administrator:password " + base_url
        output, error = shell.execute_command(command)
        shell.log_command_output(output, error)
        self.assertTrue('"compressionType":"Snappy"' in output[0],
                        "Compression Type for replication " + compr_repl_id + " is not Snappy")
        self.log.info("Compression Type for replication " + compr_repl_id + " is Snappy")

        base_url = "http://" + cluster.get_master_node().ip + ":8091/pools/default/buckets/" + compr_bucket_name + \
                   "/stats/replications%2F" + compr_repl_id + "%2Fdata_replicated?haveTStamp=" + str(repl_time)
        command = "curl -u Administrator:password " + base_url
        output, error = shell.execute_command(command)
        shell.log_command_output(output, error)
        output = json.loads(output[0])
        compressed_data_replicated = 0
        for node in cluster.get_nodes():
            items = output["nodeStats"]["{0}:8091".format(node.ip)]
            for item in items:
                compressed_data_replicated += item
        self.log.info("Compressed data for replication {0} is {1}".format(compr_repl_id, compressed_data_replicated))

        base_url = "http://" + cluster.get_master_node().ip + ":8091/pools/default/buckets/" + uncompr_bucket_name + \
                   "/stats/replications%2F" + uncompr_repl_id + "%2Fdata_replicated?haveTStamp=" + str(repl_time)
        command = "curl -u Administrator:password " + base_url
        output, error = shell.execute_command(command)
        shell.log_command_output(output, error)
        output = json.loads(output[0])
        uncompressed_data_replicated = 0
        for node in cluster.get_nodes():
            items = output["nodeStats"]["{0}:8091".format(node.ip)]
            for item in items:
                uncompressed_data_replicated += item
        self.log.info("Uncompressed data for replication {0} is {1}".format(uncompr_repl_id, uncompressed_data_replicated))

        self.assertTrue(uncompressed_data_replicated > compressed_data_replicated,
                        "Compression did not work as expected")
        self.log.info("Compression worked as expected")

        shell.disconnect()

    def test_compression_with_unixdcr_incr_load(self):
        bucket_prefix = self._input.param("bucket_prefix", "standard_bucket_")
        self.setup_xdcr()
        repl_time = int(time.time())
        self.sleep(60)
        compression_type = self._input.param("compression_type", "Snappy")
        self._set_compression_type(self.src_cluster, bucket_prefix + "1", compression_type)
        self._set_compression_type(self.src_cluster, bucket_prefix + "2")
        if self.chain_length > 2 and self.topology == TOPOLOGY.CHAIN:
            self._set_compression_type(self.dest_cluster, bucket_prefix + "1", compression_type)
            self._set_compression_type(self.dest_cluster, bucket_prefix + "2")
        if self.chain_length > 2 and self.topology == TOPOLOGY.RING:
            self._set_compression_type(self.dest_cluster, bucket_prefix + "1", compression_type)
            self._set_compression_type(self.dest_cluster, bucket_prefix + "2")
            self._set_compression_type(self.c3_cluster, bucket_prefix + "1", compression_type)
            self._set_compression_type(self.c3_cluster, bucket_prefix + "2")

        gen_create = BlobGenerator('comprOne-', 'comprOne-', self._value_size, end=self._num_items)
        self.src_cluster.load_all_buckets_from_generator(kv_gen=gen_create)

        self.perform_update_delete()

        self._wait_for_replication_to_catchup()

        self._verify_compression(cluster=self.src_cluster,
                                 compr_bucket_name=bucket_prefix + "1",
                                 uncompr_bucket_name=bucket_prefix + "2",
                                 compression_type=compression_type,
                                 repl_time=repl_time)
        if self.chain_length > 2 and self.topology == TOPOLOGY.CHAIN:
            self._verify_compression(cluster=self.dest_cluster,
                                     compr_bucket_name=bucket_prefix + "1",
                                     uncompr_bucket_name=bucket_prefix + "2",
                                     compression_type=compression_type,
                                     repl_time=repl_time)
        if self.chain_length > 2 and self.topology == TOPOLOGY.RING:
            self._verify_compression(cluster=self.dest_cluster,
                                     compr_bucket_name=bucket_prefix + "1",
                                     uncompr_bucket_name=bucket_prefix + "2",
                                     compression_type=compression_type,
                                     repl_time=repl_time)
            self._verify_compression(cluster=self.c3_cluster,
                                     compr_bucket_name=bucket_prefix + "1",
                                     uncompr_bucket_name=bucket_prefix + "2",
                                     compression_type=compression_type,
                                     repl_time=repl_time)
        self.verify_results()

    def test_compression_with_unixdcr_backfill_load(self):
        self.setup_xdcr()
        repl_time = int(time.time())
        self.sleep(60)
        compression_type = self._input.param("compression_type", "Snappy")
        self._set_compression_type(self.src_cluster, "standard_bucket_1", compression_type)
        self._set_compression_type(self.src_cluster, "standard_bucket_2")

        self.src_cluster.pause_all_replications()

        gen_create = BlobGenerator('comprOne-', 'comprOne-', self._value_size, end=self._num_items)
        self.src_cluster.load_all_buckets_from_generator(kv_gen=gen_create)

        self.src_cluster.resume_all_replications()

        self.perform_update_delete()

        self._wait_for_replication_to_catchup()

        self._verify_compression(cluster=self.src_cluster,
                                 compr_bucket_name="standard_bucket_1",
                                 uncompr_bucket_name="standard_bucket_2",
                                 compression_type=compression_type,
                                 repl_time=repl_time)
        self.verify_results()

    def test_compression_with_bixdcr_incr_load(self):
        self.setup_xdcr()
        repl_time = int(time.time())
        self.sleep(60)
        compression_type = self._input.param("compression_type", "Snappy")
        self._set_compression_type(self.src_cluster, "standard_bucket_1", compression_type)
        self._set_compression_type(self.src_cluster, "standard_bucket_2")
        self._set_compression_type(self.dest_cluster, "standard_bucket_1", compression_type)
        self._set_compression_type(self.dest_cluster, "standard_bucket_2")

        gen_create = BlobGenerator('comprOne-', 'comprOne-', self._value_size, end=self._num_items)
        self.src_cluster.load_all_buckets_from_generator(kv_gen=gen_create)
        gen_create = BlobGenerator('comprTwo-', 'comprTwo-', self._value_size, end=self._num_items)
        self.dest_cluster.load_all_buckets_from_generator(kv_gen=gen_create)

        self.perform_update_delete()

        self._wait_for_replication_to_catchup()

        self._verify_compression(cluster=self.src_cluster,
                                 compr_bucket_name="standard_bucket_1",
                                 uncompr_bucket_name="standard_bucket_2",
                                 compression_type=compression_type,
                                 repl_time=repl_time)
        self._verify_compression(cluster=self.dest_cluster,
                                 compr_bucket_name="standard_bucket_1",
                                 uncompr_bucket_name="standard_bucket_2",
                                 compression_type=compression_type,
                                 repl_time=repl_time)
        self.verify_results()

    def test_compression_with_bixdcr_backfill_load(self):
        self.setup_xdcr()
        repl_time = int(time.time())
        self.sleep(60)
        compression_type = self._input.param("compression_type", "Snappy")
        self._set_compression_type(self.src_cluster, "standard_bucket_1", compression_type)
        self._set_compression_type(self.src_cluster, "standard_bucket_2")
        self._set_compression_type(self.dest_cluster, "standard_bucket_1", compression_type)
        self._set_compression_type(self.dest_cluster, "standard_bucket_2")

        self.src_cluster.pause_all_replications()
        self.dest_cluster.pause_all_replications()

        gen_create = BlobGenerator('comprOne-', 'comprOne-', self._value_size, end=self._num_items)
        self.src_cluster.load_all_buckets_from_generator(kv_gen=gen_create)
        gen_create = BlobGenerator('comprTwo-', 'comprTwo-', self._value_size, end=self._num_items)
        self.dest_cluster.load_all_buckets_from_generator(kv_gen=gen_create)

        self.src_cluster.resume_all_replications()
        self.dest_cluster.resume_all_replications()

        self.perform_update_delete()

        self._wait_for_replication_to_catchup()

        self._verify_compression(cluster=self.src_cluster,
                                 compr_bucket_name="standard_bucket_1",
                                 uncompr_bucket_name="standard_bucket_2",
                                 compression_type=compression_type,
                                 repl_time=repl_time)
        self._verify_compression(cluster=self.dest_cluster,
                                 compr_bucket_name="standard_bucket_1",
                                 uncompr_bucket_name="standard_bucket_2",
                                 compression_type=compression_type,
                                 repl_time=repl_time)
        self.verify_results()

    def test_compression_with_pause_resume(self):
        repeat = self._input.param("repeat", 5)
        self.setup_xdcr()
        repl_time = int(time.time())
        self.sleep(60)
        compression_type = self._input.param("compression_type", "Snappy")
        self._set_compression_type(self.src_cluster, "standard_bucket_1", compression_type)
        self._set_compression_type(self.src_cluster, "standard_bucket_2")

        gen_create = BlobGenerator('comprOne-', 'comprOne-', self._value_size, end=self._num_items)
        self.src_cluster.load_all_buckets_from_generator(kv_gen=gen_create)

        self.async_perform_update_delete()

        for i in range(0, repeat):
            self.src_cluster.pause_all_replications()
            self.sleep(30)
            self.src_cluster.resume_all_replications()

        self._wait_for_replication_to_catchup()

        self.verify_results()

    def test_compression_with_optimistic_threshold_change(self):
        self.setup_xdcr()
        repl_time = int(time.time())
        self.sleep(60)
        compression_type = self._input.param("compression_type", "Snappy")
        self._set_compression_type(self.src_cluster, "standard_bucket_1", compression_type)
        self._set_compression_type(self.src_cluster, "standard_bucket_2")

        src_conn = RestConnection(self.src_cluster.get_master_node())
        src_conn.set_xdcr_param('standard_bucket_1', 'standard_bucket_1', 'optimisticReplicationThreshold',
                                self._optimistic_threshold)
        src_conn.set_xdcr_param('standard_bucket_2', 'standard_bucket_2', 'optimisticReplicationThreshold',
                                self._optimistic_threshold)

        self.src_cluster.pause_all_replications()

        gen_create = BlobGenerator('comprOne-', 'comprOne-', self._value_size, end=self._num_items)
        self.src_cluster.load_all_buckets_from_generator(kv_gen=gen_create)

        self.src_cluster.resume_all_replications()

        self.async_perform_update_delete()

        self._wait_for_replication_to_catchup()

        self._verify_compression(cluster=self.src_cluster,
                                 compr_bucket_name="standard_bucket_1",
                                 uncompr_bucket_name="standard_bucket_2",
                                 compression_type=compression_type,
                                 repl_time=repl_time)
        self.verify_results()

    def test_compression_with_advanced_settings(self):
        batch_count = self._input.param("batch_count", 10)
        batch_size = self._input.param("batch_size", 2048)
        source_nozzle = self._input.param("source_nozzle", 2)
        target_nozzle = self._input.param("target_nozzle", 2)

        self.setup_xdcr()
        repl_time = int(time.time())
        self.sleep(60)
        compression_type = self._input.param("compression_type", "Snappy")
        self._set_compression_type(self.src_cluster, "standard_bucket_1", compression_type)
        self._set_compression_type(self.src_cluster, "standard_bucket_2")

        src_conn = RestConnection(self.src_cluster.get_master_node())
        src_conn.set_xdcr_param('standard_bucket_1', 'standard_bucket_1', 'workerBatchSize', batch_count)
        src_conn.set_xdcr_param('standard_bucket_1', 'standard_bucket_1', 'docBatchSizeKb', batch_size)
        src_conn.set_xdcr_param('standard_bucket_1', 'standard_bucket_1', 'sourceNozzlePerNode', source_nozzle)
        src_conn.set_xdcr_param('standard_bucket_1', 'standard_bucket_1', 'targetNozzlePerNode', target_nozzle)
        src_conn.set_xdcr_param('standard_bucket_2', 'standard_bucket_2', 'workerBatchSize', batch_count)
        src_conn.set_xdcr_param('standard_bucket_2', 'standard_bucket_2', 'docBatchSizeKb', batch_size)
        src_conn.set_xdcr_param('standard_bucket_2', 'standard_bucket_2', 'sourceNozzlePerNode', source_nozzle)
        src_conn.set_xdcr_param('standard_bucket_2', 'standard_bucket_2', 'targetNozzlePerNode', target_nozzle)

        self.src_cluster.pause_all_replications()

        gen_create = BlobGenerator('comprOne-', 'comprOne-', self._value_size, end=self._num_items)
        self.src_cluster.load_all_buckets_from_generator(kv_gen=gen_create)

        self.src_cluster.resume_all_replications()

        self.async_perform_update_delete()

        self._wait_for_replication_to_catchup()

        self._verify_compression(cluster=self.src_cluster,
                                 compr_bucket_name="standard_bucket_1",
                                 uncompr_bucket_name="standard_bucket_2",
                                 compression_type=compression_type,
                                 repl_time=repl_time)
        self.verify_results()

    def test_compression_with_capi(self):
        self.setup_xdcr()
        self.sleep(60)
        compression_type = self._input.param("compression_type", "Snappy")
        output, error = self._set_compression_type(self.src_cluster, "default", compression_type)
        self.assertTrue("The value can not be specified for CAPI replication" in output[0], "Compression enabled for CAPI")
        self.log.info("Compression not enabled for CAPI as expected")

    def test_compression_with_rebalance_in(self):
        self.setup_xdcr()
        repl_time = int(time.time())
        self.sleep(60)
        compression_type = self._input.param("compression_type", "Snappy")
        self._set_compression_type(self.src_cluster, "standard_bucket_1", compression_type)
        self._set_compression_type(self.src_cluster, "standard_bucket_2")

        self.src_cluster.pause_all_replications()

        gen_create = BlobGenerator('comprOne-', 'comprOne-', self._value_size, end=self._num_items)
        self.src_cluster.load_all_buckets_from_generator(kv_gen=gen_create)

        self.src_cluster.resume_all_replications()

        self.async_perform_update_delete()

        self.src_cluster.rebalance_in()

        self._wait_for_replication_to_catchup()

        self.verify_results()

    def test_compression_with_rebalance_out(self):
        self.setup_xdcr()
        repl_time = int(time.time())
        self.sleep(60)
        compression_type = self._input.param("compression_type", "Snappy")
        self._set_compression_type(self.src_cluster, "standard_bucket_1", compression_type)
        self._set_compression_type(self.src_cluster, "standard_bucket_2")

        self.src_cluster.pause_all_replications()

        gen_create = BlobGenerator('comprOne-', 'comprOne-', self._value_size, end=self._num_items)
        self.src_cluster.load_all_buckets_from_generator(kv_gen=gen_create)

        self.src_cluster.resume_all_replications()

        self.async_perform_update_delete()

        self.src_cluster.rebalance_out()

        self._wait_for_replication_to_catchup()

        self._verify_compression(cluster=self.src_cluster,
                                 compr_bucket_name="standard_bucket_1",
                                 uncompr_bucket_name="standard_bucket_2",
                                 compression_type=compression_type,
                                 repl_time=repl_time)
        self.verify_results()

    def test_compression_with_swap_rebalance(self):
        self.setup_xdcr()
        repl_time = int(time.time())
        self.sleep(60)
        compression_type = self._input.param("compression_type", "Snappy")
        self._set_compression_type(self.src_cluster, "standard_bucket_1", compression_type)
        self._set_compression_type(self.src_cluster, "standard_bucket_2")

        self.src_cluster.pause_all_replications()

        gen_create = BlobGenerator('comprOne-', 'comprOne-', self._value_size, end=self._num_items)
        self.src_cluster.load_all_buckets_from_generator(kv_gen=gen_create)

        self.src_cluster.resume_all_replications()

        self.async_perform_update_delete()

        self.src_cluster.swap_rebalance()

        self._wait_for_replication_to_catchup()

        self._verify_compression(cluster=self.src_cluster,
                                 compr_bucket_name="standard_bucket_1",
                                 uncompr_bucket_name="standard_bucket_2",
                                 compression_type=compression_type,
                                 repl_time=repl_time)
        self.verify_results()

    def test_compression_with_failover(self):
        self.setup_xdcr()
        repl_time = int(time.time())
        self.sleep(60)
        compression_type = self._input.param("compression_type", "Snappy")
        self._set_compression_type(self.src_cluster, "standard_bucket_1", compression_type)
        self._set_compression_type(self.src_cluster, "standard_bucket_2")

        self.src_cluster.pause_all_replications()

        gen_create = BlobGenerator('comprOne-', 'comprOne-', self._value_size, end=self._num_items)
        self.src_cluster.load_all_buckets_from_generator(kv_gen=gen_create)

        self.src_cluster.resume_all_replications()

        self.async_perform_update_delete()

        src_conn = RestConnection(self.src_cluster.get_master_node())
        graceful = self._input.param("graceful", False)
        self.recoveryType = self._input.param("recoveryType", None)
        self.src_cluster.failover(graceful=graceful)

        self.sleep(30)

        if self.recoveryType:
            server_nodes = src_conn.node_statuses()
            for node in server_nodes:
                if node.ip == self._input.servers[1].ip:
                    src_conn.set_recovery_type(otpNode=node.id, recoveryType=self.recoveryType)
                    self.sleep(30)
                    src_conn.add_back_node(otpNode=node.id)
            rebalance = self.cluster.async_rebalance(self.src_cluster.get_nodes(), [], [])
            rebalance.result()

        self._wait_for_replication_to_catchup()

        self._verify_compression(cluster=self.src_cluster,
                                 compr_bucket_name="standard_bucket_1",
                                 uncompr_bucket_name="standard_bucket_2",
                                 compression_type=compression_type,
                                 repl_time=repl_time)
        self.verify_results()

    def test_compression_with_replication_delete_and_create(self):
        self.setup_xdcr()
        repl_time = int(time.time())
        self.sleep(60)

        gen_create = BlobGenerator('comprOne-', 'comprOne-', self._value_size, end=self._num_items)
        self.src_cluster.load_all_buckets_from_generator(kv_gen=gen_create)

        self.async_perform_update_delete()

        rest_conn = RestConnection(self.src_master)
        rest_conn.remove_all_replications()
        rest_conn.remove_all_remote_clusters()

        self.src_cluster.get_remote_clusters()[0].clear_all_replications()
        self.src_cluster.clear_all_remote_clusters()

        self.setup_xdcr()

        compression_type = self._input.param("compression_type", "Snappy")
        self._set_compression_type(self.src_cluster, "standard_bucket_1", compression_type)
        self._set_compression_type(self.src_cluster, "standard_bucket_2")

        self._wait_for_replication_to_catchup()

        self.verify_results()

    def test_compression_with_bixdcr_and_compression_one_way(self):
        self.setup_xdcr()
        self.sleep(60)
        compression_type = self._input.param("compression_type", "Snappy")
        self._set_compression_type(self.src_cluster, "default", compression_type)

        gen_create = BlobGenerator('comprOne-', 'comprOne-', self._value_size, end=self._num_items)
        self.src_cluster.load_all_buckets_from_generator(kv_gen=gen_create)
        gen_create = BlobGenerator('comprTwo-', 'comprTwo-', self._value_size, end=self._num_items)
        self.dest_cluster.load_all_buckets_from_generator(kv_gen=gen_create)

        self.perform_update_delete()

        self._wait_for_replication_to_catchup()

        self.verify_results()

    def test_compression_with_enabling_later(self):
        self.setup_xdcr()
        self.sleep(60)

        gen_create = BlobGenerator('comprOne-', 'comprOne-', self._value_size, end=self._num_items)
        self.src_cluster.load_all_buckets_from_generator(kv_gen=gen_create)

        self.async_perform_update_delete()
        self.sleep(10)

        compression_type = self._input.param("compression_type", "Snappy")
        self._set_compression_type(self.src_cluster, "default", compression_type)

        self._wait_for_replication_to_catchup()

        self.verify_results()

    def test_compression_with_disabling_later(self):
        self.setup_xdcr()
        self.sleep(60)
        compression_type = self._input.param("compression_type", "Snappy")
        self._set_compression_type(self.src_cluster, "default", compression_type)

        gen_create = BlobGenerator('comprOne-', 'comprOne-', self._value_size, end=self._num_items)
        self.src_cluster.load_all_buckets_from_generator(kv_gen=gen_create)

        self.async_perform_update_delete()
        self.sleep(10)

        self._set_compression_type(self.src_cluster, "default", "None")

        self._wait_for_replication_to_catchup()

        self.verify_results()

    def test_compression_with_rebalance_out_target_and_disabling(self):
        self.setup_xdcr()
        self.sleep(60)
        compression_type = self._input.param("compression_type", "Snappy")
        self._set_compression_type(self.src_cluster, "default", compression_type)

        self.src_cluster.pause_all_replications()

        gen_create = BlobGenerator('comprOne-', 'comprOne-', self._value_size, end=self._num_items)
        self.src_cluster.load_all_buckets_from_generator(kv_gen=gen_create)

        self.src_cluster.resume_all_replications()

        self.async_perform_update_delete()

        self.dest_cluster.rebalance_out()

        self._set_compression_type(self.src_cluster, "default", "None")
        self.sleep(5)
        self._set_compression_type(self.src_cluster, "default", compression_type)

        self._wait_for_replication_to_catchup()

        self.verify_results()

    def test_compression_with_rebalance_out_src_and_disabling(self):
        self.setup_xdcr()
        self.sleep(60)
        compression_type = self._input.param("compression_type", "Snappy")
        self._set_compression_type(self.src_cluster, "default", compression_type)

        self.src_cluster.pause_all_replications()

        gen_create = BlobGenerator('comprOne-', 'comprOne-', self._value_size, end=self._num_items)
        self.src_cluster.load_all_buckets_from_generator(kv_gen=gen_create)

        self.src_cluster.resume_all_replications()

        self.async_perform_update_delete()

        self.src_cluster.rebalance_out()

        self._set_compression_type(self.src_cluster, "default", "None")
        self.sleep(5)
        self._set_compression_type(self.src_cluster, "default", compression_type)

        self._wait_for_replication_to_catchup()

        self.verify_results()
 def setUp(self):
     self.log = logger.Logger.get_logger()
     self.helper = SubdocHelper(self, "default")
     self.helper.setup_cluster()
     self.cluster = Cluster()
     self.servers = self.helper.servers
Exemple #43
0
class Capi(XDCRNewBaseTest, NewUpgradeBaseTest):

    def setUp(self):
        super(Capi, self).setUp()
        self.cluster = Cluster()
        self.src_cluster = self.get_cb_cluster_by_name('C1')
        self.src_master = self.src_cluster.get_master_node()
        self.dest_cluster = self.get_cb_cluster_by_name('C2')
        self.dest_master = self.dest_cluster.get_master_node()
        self.use_hostnames = self._input.param("use_hostnames", False)
        self.src_init = self._input.param('src_init', 2)
        self.dest_init = self._input.param('dest_init', 1)
        self.product = self._input.param('product', 'couchbase-server')
        self.initial_version = self._input.param('initial_version', '2.5.1-1083')
        self.initial_vbuckets = self._input.param('initial_vbuckets', 1024)
        self.init_nodes = self._input.param('init_nodes', True)
        self.initial_build_type = self._input.param('initial_build_type', None)
        self.upgrade_build_type = self._input.param('upgrade_build_type', self.initial_build_type)
        self.master = self.src_master
        self.rest = RestConnection(self.src_master)

    def tearDown(self):
        super(Capi, self).tearDown()

    def _start_es_replication(self, bucket='default', xdcr_params={}):
        rest_conn = RestConnection(self.src_cluster.get_master_node())
        if bucket == 'default':
            self.log.info("Creating default bucket")
            rest_conn.create_bucket(bucket='default', ramQuotaMB=100, authType='none', saslPassword='', replicaNumber=1,
                                proxyPort=11211, bucketType='membase', replica_index=1, threadsNumber=3,
                                flushEnabled=1, lww=False)
            self.src_cluster.add_bucket(ramQuotaMB=100, bucket='default', authType='none',
                                   saslPassword='', replicaNumber=1, proxyPort=11211, bucketType='membase',
                                   evictionPolicy='valueOnly')
        elif bucket == 'sasl':
            self.log.info("Creating sasl bucket")
            rest_conn.create_bucket(bucket='sasl', ramQuotaMB=100, authType='sasl', saslPassword='******', replicaNumber=1,
                                proxyPort=11211, bucketType='membase', replica_index=1, threadsNumber=3,
                                flushEnabled=1, lww=False)
            self.src_cluster.add_bucket(ramQuotaMB=100, bucket='sasl', authType='sasl',
                                   saslPassword='******', replicaNumber=1, proxyPort=11211, bucketType='membase',
                                   evictionPolicy='valueOnly')
        elif bucket == 'standard':
            self.log.info("Creating standard bucket")
            rest_conn.create_bucket(bucket='standard', ramQuotaMB=100, authType='none', saslPassword='', replicaNumber=1,
                                proxyPort=STANDARD_BUCKET_PORT, bucketType='membase', replica_index=1, threadsNumber=3,
                                flushEnabled=1, lww=False)
            self.src_cluster.add_bucket(ramQuotaMB=100, bucket='standard', authType='none',
                                   saslPassword='', replicaNumber=1, proxyPort=STANDARD_BUCKET_PORT, bucketType='membase',
                                   evictionPolicy='valueOnly')
        elif bucket== 'lww':
            self.log.info("Creating lww bucket")
            rest_conn.create_bucket(bucket='lww', ramQuotaMB=100, authType='none', saslPassword='', replicaNumber=1,
                                proxyPort=11211, bucketType='membase', replica_index=1, threadsNumber=3,
                                flushEnabled=1, lww=True)
            self.src_cluster.add_bucket(ramQuotaMB=100, bucket='lww', authType='none',
                                   saslPassword='', replicaNumber=1, proxyPort=11211, bucketType='membase',
                                   evictionPolicy='valueOnly')
        esrest_conn = EsRestConnection(self.dest_cluster.get_master_node())
        esrest_conn.create_index(bucket)
        rest_conn.add_remote_cluster(remoteIp=self.dest_master.ip, remotePort=9091, username='******',
                                     password='******', name='es')
        self.src_cluster.get_remote_clusters().append(XDCRRemoteClusterRef(self.src_cluster, self.dest_cluster,
                                                                       Utility.get_rc_name(self.src_cluster.get_name(),
                                                                                        self.dest_cluster.get_name())))
        repl_id = rest_conn.start_replication(replicationType='continuous', fromBucket=bucket, toCluster='es',
                                              rep_type='capi', toBucket=bucket, xdcr_params=xdcr_params)
        return repl_id

    def _verify_es_results(self, bucket='default'):
        esrest_conn = EsRestConnection(self.dest_master)
        es_docs = esrest_conn.all_docs()
        self.log.info("Retrieved ES Docs")
        rest_conn = RestConnection(self.src_master)
        memcached_conn = VBucketAwareMemcached(rest_conn, bucket)
        self.log.info("Comparing CB and ES data")
        for doc in es_docs:
            es_data = doc['doc']
            mc_active = memcached_conn.memcached(str(es_data['_id']))
            cb_flags, cb_cas, cb_data = mc_active.get(str(es_data['_id']))
            self.assertDictEqual(es_data, json.loads(cb_data), "Data mismatch found - es data: {0} cb data: {1}".
                                 format(str(es_data), str(cb_data)))
        self.log.info("Data verified")

    def test_crud_ops_from_cb_to_es(self):
        bucket = self._input.param("bucket", 'default')
        repl_id = self._start_es_replication(bucket=bucket)

        rest_conn = RestConnection(self.src_master)
        rest_conn.pause_resume_repl_by_id(repl_id, REPL_PARAM.PAUSE_REQUESTED, 'true')

        gen = DocumentGenerator('es', '{{"key":"value","mutated":0}}',  xrange(100), start=0, end=self._num_items)
        self.src_cluster.load_all_buckets_from_generator(gen)

        self.perform_update_delete()

        rest_conn.pause_resume_repl_by_id(repl_id, REPL_PARAM.PAUSE_REQUESTED, 'false')

        self._wait_for_es_replication_to_catchup()

        self._verify_es_results(bucket=bucket)

    def test_incr_crud_ops_from_cb_to_es(self):
        repl_id = self._start_es_replication()

        rest_conn = RestConnection(self.src_master)
        rest_conn.pause_resume_repl_by_id(repl_id, REPL_PARAM.PAUSE_REQUESTED, 'true')

        gen = DocumentGenerator('es', '{{"key":"value","mutated":0}}',  xrange(100), start=0, end=self._num_items)
        self.src_cluster.load_all_buckets_from_generator(gen)

        self.async_perform_update_delete()

        rest_conn.pause_resume_repl_by_id(repl_id, REPL_PARAM.PAUSE_REQUESTED, 'false')

        self._wait_for_es_replication_to_catchup()

        self._verify_es_results()

    def test_capi_with_pause_resume(self):
        repl_id = self._start_es_replication()

        rest_conn = RestConnection(self.src_master)

        gen = DocumentGenerator('es', '{{"key":"value","mutated":0}}',  xrange(100), start=0, end=self._num_items)
        self.src_cluster.async_load_all_buckets_from_generator(gen)

        rest_conn.pause_resume_repl_by_id(repl_id, REPL_PARAM.PAUSE_REQUESTED, 'true')

        self.sleep(30)

        rest_conn.pause_resume_repl_by_id(repl_id, REPL_PARAM.PAUSE_REQUESTED, 'false')

        self._wait_for_es_replication_to_catchup()

        self._verify_es_results()

    def test_capi_with_checkpointing(self):
        repl_id = self._start_es_replication(xdcr_params={"checkpointInterval":"60"})

        rest_conn = RestConnection(self.src_master)
        rest_conn.pause_resume_repl_by_id(repl_id, REPL_PARAM.PAUSE_REQUESTED, 'true')

        gen = DocumentGenerator('es', '{{"key":"value","mutated":0}}',  xrange(100), start=0, end=self._num_items)
        self.src_cluster.load_all_buckets_from_generator(gen)

        rest_conn.pause_resume_repl_by_id(repl_id, REPL_PARAM.PAUSE_REQUESTED, 'false')

        self.sleep(120)

        vb0_node = None
        nodes = self.src_cluster.get_nodes()
        ip = VBucketAwareMemcached(rest_conn,'default').vBucketMap[0].split(':')[0]
        for node in nodes:
            if ip == node.ip:
                vb0_node = node
        if not vb0_node:
            raise XDCRCheckpointException("Error determining the node containing active vb0")
        vb0_conn = RestConnection(vb0_node)
        try:
            checkpoint_record = vb0_conn.get_recent_xdcr_vb_ckpt(repl_id)
            self.log.info("Checkpoint record : {0}".format(checkpoint_record))
        except Exception as e:
            raise XDCRCheckpointException("Error retrieving last checkpoint document - {0}".format(e))

        self._verify_es_results()

    def test_capi_with_optimistic_replication(self):
        repl_id = self._start_es_replication()

        rest_conn = RestConnection(self.src_master)

        rest_conn.set_xdcr_param('default', 'default', 'optimisticReplicationThreshold', self._optimistic_threshold)

        rest_conn.pause_resume_repl_by_id(repl_id, REPL_PARAM.PAUSE_REQUESTED, 'true')

        gen = DocumentGenerator('es', '{{"key":"value","mutated":0}}',  xrange(100), start=0, end=self._num_items)
        self.src_cluster.load_all_buckets_from_generator(gen)

        self.perform_update_delete()

        rest_conn.pause_resume_repl_by_id(repl_id, REPL_PARAM.PAUSE_REQUESTED, 'false')

        self._wait_for_es_replication_to_catchup()

        self._verify_es_results()

    def test_capi_with_filter(self):
        repl_id = self._start_es_replication(xdcr_params={'filterExpression':'es-5*'})

        rest_conn = RestConnection(self.src_master)

        rest_conn.pause_resume_repl_by_id(repl_id, REPL_PARAM.PAUSE_REQUESTED, 'true')

        gen = DocumentGenerator('es', '{{"key":"value","mutated":0}}',  xrange(100), start=0, end=self._num_items)
        self.src_cluster.load_all_buckets_from_generator(gen)

        rest_conn.pause_resume_repl_by_id(repl_id, REPL_PARAM.PAUSE_REQUESTED, 'false')

        self._wait_for_es_replication_to_catchup()

        self._verify_es_results()

    def test_capi_with_advanced_settings(self):
        batch_count = self._input.param("batch_count", 10)
        batch_size = self._input.param("batch_size", 2048)
        source_nozzle = self._input.param("source_nozzle", 2)
        target_nozzle = self._input.param("target_nozzle", 2)
        enable_firewall = self._input.param("enable_firewall", False)

        capi_data_chan_size_multi = self._input.param("capi_data_chan_size_multi", None)
        if capi_data_chan_size_multi:
            shell = RemoteMachineShellConnection(self.src_master)
            command = "curl -X POST -u Administrator:password http://127.0.0.1:9998/xdcr/internalSettings " + \
                      "-d CapiDataChanSizeMultiplier=" + str(capi_data_chan_size_multi)
            output, error = shell.execute_command(command)
            shell.log_command_output(output, error)

        repl_id = self._start_es_replication()

        rest_conn = RestConnection(self.src_master)

        rest_conn.set_xdcr_param('default', 'default', 'workerBatchSize', batch_count)
        rest_conn.set_xdcr_param('default', 'default', 'docBatchSizeKb', batch_size)
        rest_conn.set_xdcr_param('default', 'default', 'sourceNozzlePerNode', source_nozzle)
        rest_conn.set_xdcr_param('default', 'default', 'targetNozzlePerNode', target_nozzle)

        rest_conn.pause_resume_repl_by_id(repl_id, REPL_PARAM.PAUSE_REQUESTED, 'true')

        gen = DocumentGenerator('es', '{{"key":"value","mutated":0}}',  xrange(100), start=0, end=self._num_items)
        self.src_cluster.load_all_buckets_from_generator(gen)

        rest_conn.pause_resume_repl_by_id(repl_id, REPL_PARAM.PAUSE_REQUESTED, 'false')

        if enable_firewall:
            NodeHelper.enable_firewall(self.dest_cluster.get_master_node())
            self.sleep(120)
            NodeHelper.disable_firewall(self.dest_cluster.get_master_node())

        self._verify_es_results()

    def test_capi_with_rebalance_in(self):
        repl_id = self._start_es_replication()

        rest_conn = RestConnection(self.src_master)
        rest_conn.pause_resume_repl_by_id(repl_id, REPL_PARAM.PAUSE_REQUESTED, 'true')

        gen = DocumentGenerator('es', '{{"key":"value","mutated":0}}',  xrange(100), start=0, end=self._num_items)
        self.src_cluster.load_all_buckets_from_generator(gen)

        rest_conn.pause_resume_repl_by_id(repl_id, REPL_PARAM.PAUSE_REQUESTED, 'false')

        self.src_cluster.rebalance_in()

        self._wait_for_es_replication_to_catchup(timeout=900)

        self._verify_es_results()

    def test_capi_with_rebalance_out(self):
        repl_id = self._start_es_replication()

        rest_conn = RestConnection(self.src_master)
        rest_conn.pause_resume_repl_by_id(repl_id, REPL_PARAM.PAUSE_REQUESTED, 'true')

        gen = DocumentGenerator('es', '{{"key":"value","mutated":0}}',  xrange(100), start=0, end=self._num_items)
        self.src_cluster.load_all_buckets_from_generator(gen)

        rest_conn.pause_resume_repl_by_id(repl_id, REPL_PARAM.PAUSE_REQUESTED, 'false')

        self.src_cluster.rebalance_out()

        self._wait_for_es_replication_to_catchup(timeout=900)

        self._verify_es_results()

    def test_capi_with_swap_rebalance(self):
        repl_id = self._start_es_replication()

        rest_conn = RestConnection(self.src_master)
        rest_conn.pause_resume_repl_by_id(repl_id, REPL_PARAM.PAUSE_REQUESTED, 'true')

        gen = DocumentGenerator('es', '{{"key":"value","mutated":0}}',  xrange(100), start=0, end=self._num_items)
        self.src_cluster.load_all_buckets_from_generator(gen)

        rest_conn.pause_resume_repl_by_id(repl_id, REPL_PARAM.PAUSE_REQUESTED, 'false')

        self.src_cluster.swap_rebalance()

        self._wait_for_es_replication_to_catchup(timeout=600)

        self._verify_es_results()

    def test_capi_with_failover(self):
        repl_id = self._start_es_replication()

        rest_conn = RestConnection(self.src_master)
        rest_conn.pause_resume_repl_by_id(repl_id, REPL_PARAM.PAUSE_REQUESTED, 'true')

        gen = DocumentGenerator('es', '{{"key":"value","mutated":0}}',  xrange(100), start=0, end=self._num_items)
        self.src_cluster.load_all_buckets_from_generator(gen)

        rest_conn.pause_resume_repl_by_id(repl_id, REPL_PARAM.PAUSE_REQUESTED, 'false')

        graceful = self._input.param("graceful", False)
        self.recoveryType = self._input.param("recoveryType", None)
        self.src_cluster.failover(graceful=graceful)

        self.sleep(30)

        if self.recoveryType:
            server_nodes = rest_conn.node_statuses()
            for node in server_nodes:
                if node.ip == self._input.servers[1].ip:
                    rest_conn.set_recovery_type(otpNode=node.id, recoveryType=self.recoveryType)
                    self.sleep(30)
                    rest_conn.add_back_node(otpNode=node.id)
            rebalance = self.cluster.async_rebalance(self.src_cluster.get_nodes(), [], [])
            rebalance.result()

        self._verify_es_results()

    def test_capi_with_malformed_http_resp(self):
        repl_id = self._start_es_replication(xdcr_params={'workerBatchSize':'2000',
                                                          'docBatchSizeKb':'8096',
                                                          'targetNozzlePerNode':'64'})

        rest_conn = RestConnection(self.src_master)

        rest_conn.pause_resume_repl_by_id(repl_id, REPL_PARAM.PAUSE_REQUESTED, 'true')

        gen = DocumentGenerator('es', '{{"key":"value","mutated":0}}',  xrange(100), start=0, end=self._num_items)
        self.src_cluster.load_all_buckets_from_generator(gen)

        rest_conn.pause_resume_repl_by_id(repl_id, REPL_PARAM.PAUSE_REQUESTED, 'false')

        self._wait_for_es_replication_to_catchup()

        goxdcr_log = NodeHelper.get_goxdcr_log_dir(self.src_master)\
                     + '/goxdcr.log*'
        for node in self.src_cluster.get_nodes():
            count = NodeHelper.check_goxdcr_log(
                            node,
                            "malformed HTTP response",
                            goxdcr_log)
            self.assertEqual(count, 0, "malformed HTTP response error message found in " + str(node.ip))
            self.log.info("malformed HTTP response error message not found in " + str(node.ip))

        self._verify_es_results()

    def test_capi_with_offline_upgrade(self):
        self._install(self._input.servers[:self.src_init + self.dest_init])
        upgrade_nodes = self.src_cluster.get_nodes()
        upgrade_version = self._input.param("upgrade_version", "5.0.0-1797")

        repl_id = self._start_es_replication()

        rest_conn = RestConnection(self.src_master)
        rest_conn.pause_resume_repl_by_id(repl_id, REPL_PARAM.PAUSE_REQUESTED, 'true')

        gen = DocumentGenerator('es', '{{"key":"value","mutated":0}}',  xrange(100), start=0, end=self._num_items)
        self.src_cluster.load_all_buckets_from_generator(gen)

        self.perform_update_delete()

        rest_conn.pause_resume_repl_by_id(repl_id, REPL_PARAM.PAUSE_REQUESTED, 'false')

        self._wait_for_es_replication_to_catchup()

        rest_conn.pause_resume_repl_by_id(repl_id, REPL_PARAM.PAUSE_REQUESTED, 'true')

        self._install(servers=upgrade_nodes, version=upgrade_version)

        self.log.info("######### Upgrade of CB cluster completed ##########")

        repl_id = self._start_es_replication()

        rest_conn = RestConnection(self.src_master)
        rest_conn.pause_resume_repl_by_id(repl_id, REPL_PARAM.PAUSE_REQUESTED, 'true')

        gen = DocumentGenerator('es', '{{"key":"value"}}',  xrange(100), start=0, end=self._num_items)
        self.src_cluster.load_all_buckets_from_generator(gen)

        self.perform_update_delete()

        rest_conn.pause_resume_repl_by_id(repl_id, REPL_PARAM.PAUSE_REQUESTED, 'false')

        self._wait_for_es_replication_to_catchup()

        self._verify_es_results()

    def test_capi_with_online_upgrade(self):
        self._install(self._input.servers[:self.src_init + self.dest_init])
        upgrade_version = self._input.param("upgrade_version", "5.0.0-1797")
        upgrade_nodes = self.src_cluster.get_nodes()
        extra_nodes = self._input.servers[self.src_init + self.dest_init:]

        repl_id = self._start_es_replication()

        rest_conn = RestConnection(self.src_master)
        rest_conn.pause_resume_repl_by_id(repl_id, REPL_PARAM.PAUSE_REQUESTED, 'true')

        gen = DocumentGenerator('es', '{{"key":"value","mutated":0}}',  xrange(100), start=0, end=self._num_items)
        self.src_cluster.load_all_buckets_from_generator(gen)

        self.perform_update_delete()

        rest_conn.pause_resume_repl_by_id(repl_id, REPL_PARAM.PAUSE_REQUESTED, 'false')

        self._wait_for_es_replication_to_catchup()

        rest_conn.pause_resume_repl_by_id(repl_id, REPL_PARAM.PAUSE_REQUESTED, 'true')

        RestConnection(upgrade_nodes[0]).get_nodes_versions()
        added_versions = RestConnection(extra_nodes[0]).get_nodes_versions()
        self.cluster.rebalance(upgrade_nodes + extra_nodes, extra_nodes, [])
        self.log.info("Rebalance in all {0} nodes completed".format(added_versions[0]))
        RestConnection(upgrade_nodes[0]).get_nodes_versions()
        self.sleep(15)
        status, content = ClusterOperationHelper.find_orchestrator(upgrade_nodes[0])
        self.assertTrue(status, msg="Unable to find orchestrator: {0}:{1}".\
                        format(status, content))
        self.log.info("after rebalance in the master is {0}".format(content))
        find_master = False
        for new_server in extra_nodes:
            if content.find(new_server.ip) >= 0:
                find_master = True
                self.log.info("{0} Node {1} becomes the master".format(added_versions[0], new_server.ip))
                break
        if not find_master:
            raise Exception("After rebalance in {0} Nodes, one of them doesn't become the master".
                            format(added_versions[0]))
        self.log.info("Rebalancing out all old version nodes")
        self.cluster.rebalance(upgrade_nodes + extra_nodes, [], upgrade_nodes)
        self.src_master = self._input.servers[self.src_init + self.dest_init]

        self._install(self.src_cluster.get_nodes(), version=upgrade_version)
        upgrade_nodes = self._input.servers[self.src_init + self.dest_init:]
        extra_nodes = self.src_cluster.get_nodes()

        RestConnection(upgrade_nodes[0]).get_nodes_versions()
        added_versions = RestConnection(extra_nodes[0]).get_nodes_versions()
        self.cluster.rebalance(upgrade_nodes + extra_nodes, extra_nodes, [])
        self.log.info("Rebalance in all {0} nodes completed".format(added_versions[0]))
        RestConnection(upgrade_nodes[0]).get_nodes_versions()
        self.sleep(15)
        status, content = ClusterOperationHelper.find_orchestrator(upgrade_nodes[0])
        self.assertTrue(status, msg="Unable to find orchestrator: {0}:{1}".\
                        format(status, content))
        self.log.info("after rebalance in the master is {0}".format(content))
        self.log.info("Rebalancing out all old version nodes")
        self.cluster.rebalance(upgrade_nodes + extra_nodes, [], upgrade_nodes)
        self.src_master = self._input.servers[0]

        self.log.info("######### Upgrade of CB cluster completed ##########")

        rest_conn = RestConnection(self.src_master)
        rest_conn.pause_resume_repl_by_id(repl_id, REPL_PARAM.PAUSE_REQUESTED, 'true')

        gen = DocumentGenerator('es', '{{"key":"value"}}',  xrange(100), start=0, end=self._num_items)
        self.src_cluster.load_all_buckets_from_generator(gen)

        self.perform_update_delete()

        rest_conn.pause_resume_repl_by_id(repl_id, REPL_PARAM.PAUSE_REQUESTED, 'false')

        self._wait_for_es_replication_to_catchup()

        self._verify_es_results()

    def test_capi_with_cb_stop_and_start(self):
        bucket = self._input.param("bucket", 'default')
        repl_id = self._start_es_replication(bucket=bucket)

        rest_conn = RestConnection(self.src_master)
        rest_conn.pause_resume_repl_by_id(repl_id, REPL_PARAM.PAUSE_REQUESTED, 'true')

        gen = DocumentGenerator('es', '{{"key":"value","mutated":0}}',  xrange(100), start=0, end=self._num_items)
        self.src_cluster.load_all_buckets_from_generator(gen)

        rest_conn.pause_resume_repl_by_id(repl_id, REPL_PARAM.PAUSE_REQUESTED, 'false')

        self.async_perform_update_delete()

        conn = RemoteMachineShellConnection(self.src_master)
        conn.stop_couchbase()
        conn.start_couchbase()

        self.sleep(30)

        self._wait_for_es_replication_to_catchup()

        self._verify_es_results(bucket=bucket)

    def test_capi_with_erlang_crash(self):
        bucket = self._input.param("bucket", 'default')
        repl_id = self._start_es_replication(bucket=bucket)

        rest_conn = RestConnection(self.src_master)
        rest_conn.pause_resume_repl_by_id(repl_id, REPL_PARAM.PAUSE_REQUESTED, 'true')

        gen = DocumentGenerator('es', '{{"key":"value","mutated":0}}',  xrange(100), start=0, end=self._num_items)
        self.src_cluster.load_all_buckets_from_generator(gen)

        rest_conn.pause_resume_repl_by_id(repl_id, REPL_PARAM.PAUSE_REQUESTED, 'false')

        self.async_perform_update_delete()

        conn = RemoteMachineShellConnection(self.src_master)
        conn.kill_erlang()
        conn.start_couchbase()

        self.sleep(30)

        self._wait_for_es_replication_to_catchup()

        self._verify_es_results(bucket=bucket)

    def test_capi_with_memcached_crash(self):
        bucket = self._input.param("bucket", 'default')
        repl_id = self._start_es_replication(bucket=bucket)

        rest_conn = RestConnection(self.src_master)
        rest_conn.pause_resume_repl_by_id(repl_id, REPL_PARAM.PAUSE_REQUESTED, 'true')

        gen = DocumentGenerator('es', '{{"key":"value","mutated":0}}',  xrange(100), start=0, end=self._num_items)
        self.src_cluster.load_all_buckets_from_generator(gen)

        rest_conn.pause_resume_repl_by_id(repl_id, REPL_PARAM.PAUSE_REQUESTED, 'false')

        self.async_perform_update_delete()

        conn = RemoteMachineShellConnection(self.src_master)
        conn.pause_memcached()
        conn.unpause_memcached()

        self.sleep(30)

        self._wait_for_es_replication_to_catchup()

        self._verify_es_results(bucket=bucket)
class SGConfigTests(GatewayConfigBaseTest):
    def setUp(self):
        super(SGConfigTests, self).setUp()
        for server in self.servers:
            if self.case_number == 1:
                with open('pytests/sg/resources/gateway_config_walrus_template.json', 'r') as file:
                    filedata = file.read()
                    filedata = filedata.replace('LOCAL_IP', server.ip)
                with open('pytests/sg/resources/gateway_config_walrus.json', 'w') as file:
                    file.write(filedata)
                shell = RemoteMachineShellConnection(server)
                shell.execute_command("rm -rf {0}/tmp/*".format(self.folder_prefix))
                shell.copy_files_local_to_remote('pytests/sg/resources', '{0}/tmp'.format(self.folder_prefix))
                # will install sg only the first time
                self.install(shell)
                pid = self.is_sync_gateway_process_running(shell)
                self.assertNotEqual(pid, 0)
                exist = shell.file_exists('{0}/tmp/'.format(self.folder_prefix), 'gateway.log')
                self.assertTrue(exist)
                shell.disconnect()
        if self.case_number == 1:
            shutil.copy2('pytests/sg/resources/gateway_config_backup.json', 'pytests/sg/resources/gateway_config.json')
            BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self)
            self.cluster = Cluster()
            self.cluster.create_default_bucket(self.master, 150)
            task = self.cluster.async_create_sasl_bucket(self.master, 'test_%E-.5', 'password', 150, 1)
            task.result()
            task = self.cluster.async_create_standard_bucket(self.master, 'db', 11219, 150, 1)
            task.result()

    def tearDown(self):
        super(SGConfigTests, self).tearDown()
        if self.case_number == 1:
            self.cluster.shutdown(force=True)

    def configHelp(self):
        for server in self.servers:
            shell = RemoteMachineShellConnection(server)
            output, error = shell.execute_command_raw('/opt/couchbase-sync-gateway/bin/sync_gateway -help')
            for index, str in enumerate(help_string):
                if index != help_string[index]:
                    self.log.info('configHelp found unmatched help text. error({0}), help({1})'.format(error[index],
                                                                                                       help_string[
                                                                                                           index]))
                self.assertEqual(error[index], help_string[index])
            shell.disconnect()

    def configCreateUser(self):
        for server in self.servers:
            shell = RemoteMachineShellConnection(server)
            self.config = 'gateway_config_walrus.json'
            self.assertTrue(self.start_sync_gateway(shell))
            self.assertTrue(self.create_user(shell))
            if not self.expected_stdout:
                self.assertTrue(self.get_user(shell))
                self.delete_user(shell)
            shell.disconnect()

    def configGuestUser(self):
        for server in self.servers:
            shell = RemoteMachineShellConnection(server)
            self.config = 'gateway_config_walrus.json'
            self.assertTrue(self.start_sync_gateway(shell))
            self.assertTrue(self.get_user(shell))
            self.assertFalse(self.delete_user(shell))
            shell.disconnect()

    def configCreateRole(self):
        for server in self.servers:
            shell = RemoteMachineShellConnection(server)
            self.config = 'gateway_config_walrus.json'
            self.assertTrue(self.start_sync_gateway(shell))
            self.assertTrue(self.create_role(shell, self.role_name, self.admin_channels))
            if not self.expected_stdout:
                self.assertTrue(self.get_role(shell))
                self.delete_role(shell)
            shell.disconnect()

    def configUserRolesChannels(self):
        for server in self.servers:
            shell = RemoteMachineShellConnection(server)
            self.config = 'gateway_config_walrus.json'
            self.assertTrue(self.start_sync_gateway(shell))
            self.assertTrue(self.parse_input_create_roles(shell))
            self.assertTrue(self.create_user(shell))
            if not self.expected_stdout:
                self.assertTrue(self.get_user(shell))
                self.delete_user(shell)
            shell.disconnect()

    def configUserRolesNotExist(self):
        for server in self.servers:
            shell = RemoteMachineShellConnection(server)
            self.config = 'gateway_config_walrus.json'
            self.assertTrue(self.start_sync_gateway(shell))
            self.assertTrue(self.create_user(shell))
            if not self.expected_stdout:
                self.assertTrue(self.get_user(shell))
                self.delete_user(shell)
            shell.disconnect()

    def configInspectDocChannel(self):
        for server in self.servers:
            shell = RemoteMachineShellConnection(server)
            self.config = 'gateway_config_walrus.json'
            self.assertTrue(self.start_sync_gateway(shell))
            self.assertTrue(self.parse_input_create_roles(shell))
            if self.doc_channels:
                success, revision = self.create_doc(shell)
                self.assertTrue(success)
                self.assertTrue(self.get_all_docs(shell))
                self.assertTrue(self.delete_doc(shell, revision))
            shell.disconnect()

    def configCBS(self):
        for server in self.servers:
            shell = RemoteMachineShellConnection(server)
            shutil.copy2('pytests/sg/resources/gateway_config_backup.json', 'pytests/sg/resources/gateway_config.json')
            self.assertTrue(self.start_sync_gateway_template(shell, self.template))
            if not self.expected_error:
                time.sleep(5)
                success, revision = self.create_doc(shell)
                self.assertTrue(success)
                self.assertTrue(self.delete_doc(shell, revision))
            self.assertTrue(self.check_message_in_gatewaylog(shell, self.expected_log))
            shell.disconnect()

    def configStartSgw(self):
        for server in self.servers:
            shell = RemoteMachineShellConnection(server)
            shutil.copy2('pytests/sg/resources/gateway_config_backup.json', 'pytests/sg/resources/gateway_config.json')
            shell.copy_files_local_to_remote('pytests/sg/resources', '/tmp')
            self.assertTrue(self.start_sync_gateway(shell))
            self.assertTrue(self.check_message_in_gatewaylog(shell, self.expected_log))
            if not self.expected_error:
                if self.admin_port:
                    self.assertTrue(self.get_users(shell))
                if self.sync_port:
                    success, revision = self.create_doc(shell)
                    self.assertTrue(success)
                    self.assertTrue(self.delete_doc(shell, revision))
            shell.disconnect()
Exemple #45
0
class CliBaseTest(BaseTestCase):
    vbucketId = 0

    def setUp(self):
        self.times_teardown_called = 1
        super(CliBaseTest, self).setUp()
        self.r = random.Random()
        self.vbucket_count = 1024
        self.cluster = Cluster()
        self.clusters_dic = self.input.clusters
        if self.clusters_dic:
            if len(self.clusters_dic) > 1:
                self.dest_nodes = self.clusters_dic[1]
                self.dest_master = self.dest_nodes[0]
            elif len(self.clusters_dic) == 1:
                self.log.error("=== need 2 cluster to setup xdcr in ini file ===")
        else:
            self.log.error("**** Cluster config is setup in ini file. ****")
        self.shell = RemoteMachineShellConnection(self.master)
        if not self.skip_init_check_cbserver:
            self.rest = RestConnection(self.master)
            self.cb_version = self.rest.get_nodes_version()
            """ cli output message """
            self.cli_bucket_create_msg = "SUCCESS: Bucket created"
            self.cli_rebalance_msg = "SUCCESS: Rebalance complete"
            if self.cb_version[:3] == "4.6":
                self.cli_bucket_create_msg = "SUCCESS: bucket-create"
                self.cli_rebalance_msg = "SUCCESS: rebalanced cluster"
        self.import_back = self.input.param("import_back", False)
        if self.import_back:
            if len(self.servers) < 3:
                self.fail("This test needs minimum of 3 vms to run ")
        self.test_type = self.input.param("test_type", "import")
        self.import_file = self.input.param("import_file", None)
        self.imex_type = self.input.param("imex_type", "json")
        self.format_type = self.input.param("format_type", "lines")
        self.import_method = self.input.param("import_method", "file://")
        self.force_failover = self.input.param("force_failover", False)
        self.json_invalid_errors = self.input.param("json-invalid-errors", None)
        self.field_separator = self.input.param("field-separator", "comma")
        self.key_gen = self.input.param("key-gen", True)
        self.skip_docs = self.input.param("skip-docs", None)
        self.limit_docs = self.input.param("limit-docs", None)
        self.limit_rows = self.input.param("limit-rows", None)
        self.skip_rows = self.input.param("skip-rows", None)
        self.omit_empty = self.input.param("omit-empty", None)
        self.infer_types = self.input.param("infer-types", None)
        self.fx_generator = self.input.param("fx-generator", None)
        self.fx_gen_start = self.input.param("fx-gen-start", None)
        self.secure_conn = self.input.param("secure-conn", False)
        self.no_cacert = self.input.param("no-cacert", False)
        self.no_ssl_verify = self.input.param("no-ssl-verify", False)
        self.verify_data = self.input.param("verify-data", False)
        self.field_substitutions = self.input.param("field-substitutions", None)
        self.check_preload_keys = self.input.param("check-preload-keys", True)
        self.debug_logs = self.input.param("debug-logs", False)
        self.should_fail = self.input.param("should-fail", False)
        info = self.shell.extract_remote_info()
        self.os_version = info.distribution_version.lower()
        type = info.type.lower()
        self.excluded_commands = self.input.param("excluded_commands", None)
        self.os = 'linux'
        self.full_v = None
        self.short_v = None
        self.build_number = None
        cmd =  'curl -g %s:8091/diag/eval -u Administrator:password ' % self.master.ip
        cmd += '-d "path_config:component_path(bin)."'
        bin_path  = subprocess.check_output(cmd, shell=True)
        if "bin" not in bin_path:
            self.fail("Check if cb server install on %s" % self.master.ip)
        else:
            self.cli_command_path = bin_path.replace('"','') + "/"
        self.root_path = LINUX_ROOT_PATH
        self.tmp_path = "/tmp/"
        self.tmp_path_raw = "/tmp/"
        self.cmd_backup_path = LINUX_BACKUP_PATH
        self.backup_path = LINUX_BACKUP_PATH
        self.cmd_ext = ""
        self.src_file = ""
        self.des_file = ""
        self.sample_files_path = LINUX_COUCHBASE_SAMPLE_PATH
        self.log_path = LINUX_COUCHBASE_LOGS_PATH
        self.base_cb_path = LINUX_CB_PATH
        """ non root path """
        if self.nonroot:
            self.sample_files_path = "/home/%s%s" % (self.master.ssh_username,
                                                     LINUX_COUCHBASE_SAMPLE_PATH)
            self.log_path = "/home/%s%s" % (self.master.ssh_username,
                                            LINUX_COUCHBASE_LOGS_PATH)
            self.base_cb_path = "/home/%s%s" % (self.master.ssh_username,
                                                LINUX_CB_PATH)
            self.root_path = "/home/%s/" % self.master.ssh_username
        if type == 'windows':
            self.os = 'windows'
            self.cmd_ext = ".exe"
            self.root_path = WIN_ROOT_PATH
            self.tmp_path = WIN_TMP_PATH
            self.tmp_path_raw = WIN_TMP_PATH_RAW
            self.cmd_backup_path = WIN_BACKUP_C_PATH
            self.backup_path = WIN_BACKUP_PATH
            self.sample_files_path = WIN_COUCHBASE_SAMPLE_PATH_C
            self.log_path = WIN_COUCHBASE_LOGS_PATH
            win_format = "C:/Program Files"
            cygwin_format = "/cygdrive/c/Program\ Files"
            if win_format in self.cli_command_path:
                self.cli_command_path = self.cli_command_path.replace(win_format,
                                                                      cygwin_format)
        if info.distribution_type.lower() == 'mac':
            self.os = 'mac'
        self.full_v, self.short_v, self.build_number = self.shell.get_cbversion(type)
        self.couchbase_usrname = "%s" % (self.input.membase_settings.rest_username)
        self.couchbase_password = "******" % (self.input.membase_settings.rest_password)
        self.cb_login_info = "%s:%s" % (self.couchbase_usrname,
                                        self.couchbase_password)
        self.path_type = self.input.param("path_type", None)
        if self.path_type is None:
            self.log.info("Test command with absolute path ")
        elif self.path_type == "local":
            self.log.info("Test command at %s dir " % self.cli_command_path)
            self.cli_command_path = "cd %s; ./" % self.cli_command_path
        self.cli_command = self.input.param("cli_command", None)
        self.command_options = self.input.param("command_options", None)
        if self.command_options is not None:
            self.command_options = self.command_options.split(";")
        if str(self.__class__).find('couchbase_clitest.CouchbaseCliTest') == -1:
            if len(self.servers) > 1 and int(self.nodes_init) == 1:
                servers_in = [self.servers[i + 1] for i in range(self.num_servers - 1)]
                self.cluster.rebalance(self.servers[:1], servers_in, [])
        for bucket in self.buckets:
            testuser = [{'id': bucket.name, 'name': bucket.name, 'password': '******'}]
            rolelist = [{'id': bucket.name, 'name': bucket.name, 'roles': 'admin'}]
            self.add_built_in_server_user(testuser=testuser, rolelist=rolelist)


    def tearDown(self):
        if not self.input.param("skip_cleanup", True):
            if self.times_teardown_called > 1 :
                self.shell.disconnect()
        if self.input.param("skip_cleanup", True):
            if self.case_number > 1 or self.times_teardown_called > 1:
                self.shell.disconnect()
        self.times_teardown_called += 1
        serverInfo = self.servers[0]
        rest = RestConnection(serverInfo)
        zones = rest.get_zone_names()
        for zone in zones:
            if zone != "Group 1":
                rest.delete_zone(zone)
        self.clusters_dic = self.input.clusters
        if self.clusters_dic:
            if len(self.clusters_dic) > 1:
                self.dest_nodes = self.clusters_dic[1]
                self.dest_master = self.dest_nodes[0]
                if self.dest_nodes and len(self.dest_nodes) > 1:
                    self.log.info("======== clean up destination cluster =======")
                    rest = RestConnection(self.dest_nodes[0])
                    rest.remove_all_remote_clusters()
                    rest.remove_all_replications()
                    BucketOperationHelper.delete_all_buckets_or_assert(self.dest_nodes, self)
                    ClusterOperationHelper.cleanup_cluster(self.dest_nodes)
            elif len(self.clusters_dic) == 1:
                self.log.error("=== need 2 cluster to setup xdcr in ini file ===")
        else:
            self.log.info("**** If run xdcr test, need cluster config is setup in ini file. ****")
        super(CliBaseTest, self).tearDown()


    """ in sherlock, there is an extra value called runCmd in the 1st element """
    def del_runCmd_value(self, output):
        if "runCmd" in output[0]:
            output = output[1:]
        return output

    def verifyCommandOutput(self, output, expect_error, message):
        """Inspects each line of the output and checks to see if the expected error was found

        Options:
        output - A list of output lines
        expect_error - Whether or not the command should have succeeded or failed
        message - The success or error message

        Returns a boolean indicating whether or not the error/success message was found in the output
        """
        if expect_error:
            for line in output:
                if line == "ERROR: " + message:
                    return True
            log.info("Did not receive expected error message `ERROR: %s`", message)
            return False
        else:
            for line in output:
                if line == "SUCCESS: " + message:
                    return True
            log.info("Did not receive expected success message `SUCCESS: %s`", message)
            return False

    def verifyWarningOutput(self, output, message):
        for line in output:
            if line == "WARNING: " + message:
                return True
        log.info("Did not receive expected error message `WARNING: %s`", message)
        return False

    def verifyServices(self, server, expected_services):
        """Verifies that the services on a given node match the expected service

            Options:
            server - A TestInputServer object of the server to connect to
            expected_services - A comma separated list of services

            Returns a boolean corresponding to whether or not the expected services
            are available on the server.
        """
        rest = RestConnection(server)
        hostname = "%s:%s" % (server.ip, server.port)
        expected_services = expected_services.replace("data", "kv")
        expected_services = expected_services.replace("query", "n1ql")
        expected_services = expected_services.split(",")

        nodes_services = rest.get_nodes_services()
        for node, services in nodes_services.iteritems():
            if node.encode('ascii') == hostname:
                if len(services) != len(expected_services):
                    log.info("Services on %s do not match expected services (%s vs. %s)",
                             hostname, services, expected_services)
                    return False
                for service in services:
                    if service.encode("ascii") not in expected_services:
                        log.info("Services on %s do not match expected services (%s vs. %s)",
                                 hostname, services, expected_services)
                        return False
                return True

        log.info("Services on %s not found, the server may not exist", hostname)
        return False

    def verifyRamQuotas(self, server, data, index, fts):
        """Verifies that the RAM quotas for each service are set properly

        Options:
        server - A TestInputServer object of the server to connect to
        data - An int containing the data service RAM quota, None will skip the check
        index - An int containing the index service RAM quota, None will skip the check
        fts - An int containing the FTS service RAM quota, None will skip the check

        Returns a boolean corresponding to whether or not the RAM quotas were set properly
        """
        rest = RestConnection(server)
        settings = rest.get_pools_default()
        if data:
            if "memoryQuota" not in settings:
                log.info("Unable to get data service ram quota")
                return False
            if int(settings["memoryQuota"]) != int(data):
                log.info("Data service memory quota does not match (%d vs %d)",
                         int(settings["memoryQuota"]), int(data))
                return False

        if index:
            if "indexMemoryQuota" not in settings:
                log.info("Unable to get index service ram quota")
                return False
            if int(settings["indexMemoryQuota"]) != int(index):
                log.info(
                    "Index service memory quota does not match (%d vs %d)",
                    int(settings["indexMemoryQuota"]), int(index))
                return False

        if fts:
            if "ftsMemoryQuota" not in settings:
                log.info("Unable to get fts service ram quota")
                return False
            if int(settings["ftsMemoryQuota"]) != int(fts):
                log.info("FTS service memory quota does not match (%d vs %d)",
                         int(settings["ftsMemoryQuota"]), int(fts))
                return False

        return True

    def verifyBucketSettings(self, server, bucket_name, bucket_type, memory_quota,
                             eviction_policy, replica_count, enable_index_replica,
                             priority, enable_flush):
        rest = RestConnection(server)
        result = rest.get_bucket_json(bucket_name)

        if bucket_type == "couchbase":
            bucket_type = "membase"

        if bucket_type is not None and bucket_type != result["bucketType"]:
            log.info("Memory quota does not match (%s vs %s)", bucket_type,
                     result["bucketType"])
            return False

        quota = result["quota"]["rawRAM"] / 1024 / 1024
        if memory_quota is not None and memory_quota != quota:
            log.info("Bucket quota does not match (%s vs %s)", memory_quota,
                     quota)
            return False

        if eviction_policy is not None and eviction_policy != result[
            "evictionPolicy"]:
            log.info("Eviction policy does not match (%s vs %s)",
                     eviction_policy, result["evictionPolicy"])
            return False

        if replica_count is not None and replica_count != result[
            "replicaNumber"]:
            log.info("Replica count does not match (%s vs %s)", replica_count,
                     result["replicaNumber"])
            return False

        if enable_index_replica == 1:
            enable_index_replica = True
        elif enable_index_replica == 0:
            enable_index_replica = False

        if enable_index_replica is not None and enable_index_replica != result[
            "replicaIndex"]:
            log.info("Replica index enabled does not match (%s vs %s)",
                     enable_index_replica, result["replicaIndex"])
            return False

        if priority == "high":
            priority = 8
        elif priority == "low":
            priority = 3

        if priority is not None and priority != result["threadsNumber"]:
            log.info("Bucket priority does not match (%s vs %s)", priority,
                     result["threadsNumber"])
            return False

        if enable_flush is not None:
            if enable_flush == 1 and "flush" not in result["controllers"]:
                log.info("Bucket flush is not enabled, but it should be")
                return False
            elif enable_flush == 0 and "flush" in result["controllers"]:
                log.info("Bucket flush is not enabled, but it should be")
                return False

        return True

    def verifyContainsBucket(self, server, name):
        rest = RestConnection(server)
        buckets = rest.get_buckets()

        for bucket in buckets:
            if bucket.name == name:
                return True
        return False

    def verifyClusterName(self, server, name):
        rest = RestConnection(server)
        settings = rest.get_pools_default("waitChange=0")

        if name is None:
            name = ""

        if "clusterName" not in settings:
            log.info("Unable to get cluster name from server")
            return False
        if settings["clusterName"] != name:
            log.info("Cluster name does not match (%s vs %s)",
                     settings["clusterName"], name)
            return False

        return True

    def isClusterInitialized(self, server):
        """Checks whether or not the server is initialized

        Options:
        server - A TestInputServer object of the server to connect to

        Checks to see whether or not the default pool was created in order to
        determine whether or no the server was initialized. Returns a boolean value
        to indicate initialization.
        """
        rest = RestConnection(server)
        settings = rest.get_pools_info()
        if "pools" in settings and len(settings["pools"]) > 0:
            return True

        return False

    def verifyNotificationsEnabled(self, server):
        rest = RestConnection(server)
        enabled = rest.get_notifications()
        if enabled:
            return True
        return False

    def verifyIndexSettings(self, server, max_rollbacks, stable_snap_interval,
                            mem_snap_interval,
                            storage_mode, threads, log_level):
        rest = RestConnection(server)
        settings = rest.get_global_index_settings()

        if storage_mode == "default":
            storage_mode = "plasma"
        elif storage_mode == "memopt":
            storage_mode = "memory_optimized"

        if max_rollbacks and str(settings["maxRollbackPoints"]) != str(
                max_rollbacks):
            log.info("Max rollbacks does not match (%s vs. %s)",
                     str(settings["maxRollbackPoints"]), str(max_rollbacks))
            return False
        if stable_snap_interval and str(
                settings["stableSnapshotInterval"]) != str(
                stable_snap_interval):
            log.info("Stable snapshot interval does not match (%s vs. %s)",
                     str(settings["stableSnapshotInterval"]),
                     str(stable_snap_interval))
            return False
        if mem_snap_interval and str(
                settings["memorySnapshotInterval"]) != str(mem_snap_interval):
            log.info("Memory snapshot interval does not match (%s vs. %s)",
                     str(settings["memorySnapshotInterval"]),
                     str(mem_snap_interval))
            return False
        if storage_mode and str(settings["storageMode"]) != str(storage_mode):
            log.info("Storage mode does not match (%s vs. %s)",
                     str(settings["storageMode"]), str(storage_mode))
            return False
        if threads and str(settings["indexerThreads"]) != str(threads):
            log.info("Threads does not match (%s vs. %s)",
                     str(settings["indexerThreads"]), str(threads))
            return False
        if log_level and str(settings["logLevel"]) != str(log_level):
            log.info("Log level does not match (%s vs. %s)",
                     str(settings["logLevel"]), str(log_level))
            return False

        return True

    def verifyAutofailoverSettings(self, server, enabled, timeout):
        rest = RestConnection(server)
        settings = rest.get_autofailover_settings()

        if enabled and not ((str(enabled) == "1" and settings.enabled) or (
                str(enabled) == "0" and not settings.enabled)):
            log.info("Enabled does not match (%s vs. %s)", str(enabled),
                     str(settings.enabled))
            return False
        if timeout and str(settings.timeout) != str(timeout):
            log.info("Timeout does not match (%s vs. %s)", str(timeout),
                     str(settings.timeout))
            return False

        return True

    def verifyAutoreprovisionSettings(self, server, enabled, max_nodes):
        rest = RestConnection(server)
        settings = rest.get_autoreprovision_settings()

        if enabled and not ((str(enabled) == "1" and settings.enabled) or (
                str(enabled) == "0" and not settings.enabled)):
            log.info("Enabled does not match (%s vs. %s)", str(max_nodes),
                     str(settings.enabled))
            return False
        if max_nodes and str(settings.max_nodes) != str(max_nodes):
            log.info("max_nodes does not match (%s vs. %s)", str(max_nodes),
                     str(settings.max_nodes))
            return False

        return True

    def verifyAuditSettings(self, server, enabled, log_path, rotate_interval):
        rest = RestConnection(server)
        settings = rest.getAuditSettings()

        if enabled and not (
            (str(enabled) == "1" and settings["auditdEnabled"]) or (
                str(enabled) == "0" and not settings["auditdEnabled"])):
            log.info("Enabled does not match (%s vs. %s)", str(enabled),
                     str(settings["auditdEnabled"]))
            return False
        if log_path and str(str(settings["logPath"])) != str(log_path):
            log.info("Log path does not match (%s vs. %s)", str(log_path),
                     str(settings["logPath"]))
            return False

        if rotate_interval and str(str(settings["rotateInterval"])) != str(
                rotate_interval):
            log.info("Rotate interval does not match (%s vs. %s)",
                     str(rotate_interval), str(settings["rotateInterval"]))
            return False

        return True

    def verifyPendingServer(self, server, server_to_add, group_name, services):
        rest = RestConnection(server)
        settings = rest.get_all_zones_info()
        if not settings or "groups" not in settings:
            log.info("Group settings payload appears to be invalid")
            return False

        expected_services = services.replace("data", "kv")
        expected_services = expected_services.replace("query", "n1ql")
        expected_services = expected_services.split(",")

        for group in settings["groups"]:
            for node in group["nodes"]:
                if node["hostname"] == server_to_add:
                    if node["clusterMembership"] != "inactiveAdded":
                        log.info("Node `%s` not in pending status",
                                 server_to_add)
                        return False

                    if group["name"] != group_name:
                        log.info("Node `%s` not in correct group (%s vs %s)",
                                 node["hostname"], group_name,
                                 group["name"])
                        return False

                    if len(node["services"]) != len(expected_services):
                        log.info("Services do not match on %s (%s vs %s) ",
                                 node["hostname"], services,
                                 ",".join(node["services"]))
                        return False

                    for service in node["services"]:
                        if service not in expected_services:
                            log.info("Services do not match on %s (%s vs %s) ",
                                     node["hostname"], services,
                                     ",".join(node["services"]))
                            return False
                    return True

        log.info("Node `%s` not found in nodes list", server_to_add)
        return False

    def verifyPendingServerDoesNotExist(self, server, server_to_add):
        rest = RestConnection(server)
        settings = rest.get_all_zones_info()
        if not settings or "groups" not in settings:
            log.info("Group settings payload appears to be invalid")
            return False

        for group in settings["groups"]:
            for node in group["nodes"]:
                if node["hostname"] == server_to_add:
                    return False

        log.info("Node `%s` not found in nodes list", server_to_add)
        return True

    def verifyActiveServers(self, server, expected_num_servers):
        return self._verifyServersByStatus(server, expected_num_servers,
                                           "active")

    def verifyFailedServers(self, server, expected_num_servers):
        return self._verifyServersByStatus(server, expected_num_servers,
                                           "inactiveFailed")

    def _verifyServersByStatus(self, server, expected_num_servers, status):
        rest = RestConnection(server)
        settings = rest.get_pools_default()

        count = 0
        for node in settings["nodes"]:
            if node["clusterMembership"] == status:
                count += 1

        return count == expected_num_servers

    def verifyRecoveryType(self, server, recovery_servers, recovery_type):
        rest = RestConnection(server)
        settings = rest.get_all_zones_info()
        if not settings or "groups" not in settings:
            log.info("Group settings payload appears to be invalid")
            return False

        if not recovery_servers:
            return True

        num_found = 0
        recovery_servers = recovery_servers.split(",")
        for group in settings["groups"]:
            for node in group["nodes"]:
                for rs in recovery_servers:
                    if node["hostname"] == rs:
                        if node["recoveryType"] != recovery_type:
                            log.info(
                                "Node %s doesn't contain recovery type %s ",
                                rs, recovery_type)
                            return False
                        else:
                            num_found = num_found + 1

        if num_found == len(recovery_servers):
            return True

        log.info("Node `%s` not found in nodes list",
                 ",".join(recovery_servers))
        return False

    def verifyUserRoles(self, server, username, roles):
        rest = RestConnection(server)
        status, content, header = rbacmain(server)._retrieve_user_roles()
        content = json.loads(content)
        temp = rbacmain()._parse_get_user_response(content, username, username, roles)
        return temp

    def verifyLdapSettings(self, server, admins, ro_admins, default, enabled):
        rest = RestConnection(server)
        settings = rest.ldapRestOperationGetResponse()

        if admins is None:
            admins = []
        else:
            admins = admins.split(",")

        if ro_admins is None:
            ro_admins = []
        else:
            ro_admins = ro_admins.split(",")

        if str(enabled) == "0":
            admins = []
            ro_admins = []

        if default == "admins" and str(enabled) == "1":
            if settings["admins"] != "asterisk":
                log.info("Admins don't match (%s vs asterisk)",
                         settings["admins"])
                return False
        elif not self._list_compare(settings["admins"], admins):
            log.info("Admins don't match (%s vs %s)", settings["admins"],
                     admins)
            return False

        if default == "roadmins" and str(enabled) == "1":
            if settings["roAdmins"] != "asterisk":
                log.info("Read only admins don't match (%s vs asterisk)",
                         settings["roAdmins"])
                return False
        elif not self._list_compare(settings["roAdmins"], ro_admins):
            log.info("Read only admins don't match (%s vs %s)",
                     settings["roAdmins"], ro_admins)
            return False

        return True

    def verifyAlertSettings(self, server, enabled, email_recipients,
                            email_sender, email_username, email_password,
                            email_host,
                            email_port, encrypted, alert_af_node,
                            alert_af_max_reached, alert_af_node_down,
                            alert_af_small,
                            alert_af_disable, alert_ip_changed,
                            alert_disk_space, alert_meta_overhead,
                            alert_meta_oom,
                            alert_write_failed, alert_audit_dropped):
        rest = RestConnection(server)
        settings = rest.get_alerts_settings()
        print settings

        if not enabled:
            if not settings["enabled"]:
                return True
            else:
                log.info("Alerts should be disabled")
                return False

        if encrypted is None or encrypted == "0":
            encrypted = False
        else:
            encrypted = True

        if email_recipients is not None and not self._list_compare(
                email_recipients.split(","), settings["recipients"]):
            log.info("Email recipients don't match (%s vs %s)",
                     email_recipients.split(","), settings["recipients"])
            return False

        if email_sender is not None and email_sender != settings["sender"]:
            log.info("Email sender does not match (%s vs %s)", email_sender,
                     settings["sender"])
            return False

        if email_username is not None and email_username != \
                settings["emailServer"]["user"]:
            log.info("Email username does not match (%s vs %s)",
                     email_username, settings["emailServer"]["user"])
            return False

        if email_host is not None and email_host != settings["emailServer"][
            "host"]:
            log.info("Email host does not match (%s vs %s)", email_host,
                     settings["emailServer"]["host"])
            return False

        if email_port is not None and email_port != settings["emailServer"][
            "port"]:
            log.info("Email port does not match (%s vs %s)", email_port,
                     settings["emailServer"]["port"])
            return False

        if encrypted is not None and encrypted != settings["emailServer"][
            "encrypt"]:
            log.info("Email encryption does not match (%s vs %s)", encrypted,
                     settings["emailServer"]["encrypt"])
            return False

        alerts = list()
        if alert_af_node:
            alerts.append('auto_failover_node')
        if alert_af_max_reached:
            alerts.append('auto_failover_maximum_reached')
        if alert_af_node_down:
            alerts.append('auto_failover_other_nodes_down')
        if alert_af_small:
            alerts.append('auto_failover_cluster_too_small')
        if alert_af_disable:
            alerts.append('auto_failover_disabled')
        if alert_ip_changed:
            alerts.append('ip')
        if alert_disk_space:
            alerts.append('disk')
        if alert_meta_overhead:
            alerts.append('overhead')
        if alert_meta_oom:
            alerts.append('ep_oom_errors')
        if alert_write_failed:
            alerts.append('ep_item_commit_failed')
        if alert_audit_dropped:
            alerts.append('audit_dropped_events')

        if not self._list_compare(alerts, settings["alerts"]):
            log.info("Alerts don't match (%s vs %s)", alerts,
                     settings["alerts"])
            return False

        return True

    def verify_node_settings(self, server, data_path, index_path, hostname):
        rest = RestConnection(server)
        node_settings = rest.get_nodes_self()

        if data_path != node_settings.storage[0].path:
            log.info("Data path does not match (%s vs %s)", data_path,
                     node_settings.storage[0].path)
            return False
        if index_path != node_settings.storage[0].index_path:
            log.info("Index path does not match (%s vs %s)", index_path,
                     node_settings.storage[0].index_path)
            return False
        if hostname is not None:
            if hostname != node_settings.hostname:
                log.info("Hostname does not match (%s vs %s)", hostname,
                         node_settings.hostname)
                return True
        return True

    def verifyCompactionSettings(self, server, db_frag_perc, db_frag_size,
                                 view_frag_perc, view_frag_size, from_period,
                                 to_period, abort_outside, parallel_compact,
                                 purgeInt):
        rest = RestConnection(server)
        settings = rest.get_auto_compaction_settings()
        ac = settings["autoCompactionSettings"]

        if db_frag_perc is not None and str(db_frag_perc) != str(
                ac["databaseFragmentationThreshold"]["percentage"]):
            log.info("DB frag perc does not match (%s vs %s)",
                     str(db_frag_perc),
                     str(ac["databaseFragmentationThreshold"]["percentage"]))
            return False

        if db_frag_size is not None and str(db_frag_size * 1024 ** 2) != str(
                ac["databaseFragmentationThreshold"]["size"]):
            log.info("DB frag size does not match (%s vs %s)",
                     str(db_frag_size * 1024 ** 2),
                     str(ac["databaseFragmentationThreshold"]["size"]))
            return False

        if view_frag_perc is not None and str(view_frag_perc) != str(
                ac["viewFragmentationThreshold"]["percentage"]):
            log.info("View frag perc does not match (%s vs %s)",
                     str(view_frag_perc),
                     str(ac["viewFragmentationThreshold"]["percentage"]))
            return False

        if view_frag_size is not None and str(
                        view_frag_size * 1024 ** 2) != str(
                ac["viewFragmentationThreshold"]["size"]):
            log.info("View frag size does not match (%s vs %s)",
                     str(view_frag_size * 1024 ** 2),
                     str(ac["viewFragmentationThreshold"]["size"]))
            return False

        print from_period, to_period
        if from_period is not None:
            fromHour, fromMin = from_period.split(":", 1)
            if int(fromHour) != int(ac["allowedTimePeriod"]["fromHour"]):
                log.info("From hour does not match (%s vs %s)", str(fromHour),
                         str(ac["allowedTimePeriod"]["fromHour"]))
                return False
            if int(fromMin) != int(ac["allowedTimePeriod"]["fromMinute"]):
                log.info("From minute does not match (%s vs %s)", str(fromMin),
                         str(ac["allowedTimePeriod"]["fromMinute"]))
                return False

        if to_period is not None:
            toHour, toMin = to_period.split(":", 1)
            if int(toHour) != int(ac["allowedTimePeriod"]["toHour"]):
                log.info("To hour does not match (%s vs %s)", str(toHour),
                         str(ac["allowedTimePeriod"]["toHour"]))
                return False
            if int(toMin) != int(ac["allowedTimePeriod"]["toMinute"]):
                log.info("To minute does not match (%s vs %s)", str(toMin),
                         str(ac["allowedTimePeriod"]["toMinute"]))
                return False

        if str(abort_outside) == "1":
            abort_outside = True
        elif str(abort_outside) == "0":
            abort_outside = False

        if abort_outside is not None and abort_outside != \
                ac["allowedTimePeriod"]["abortOutside"]:
            log.info("Abort outside does not match (%s vs %s)", abort_outside,
                     ac["allowedTimePeriod"]["abortOutside"])
            return False

        if str(parallel_compact) == "1":
            parallel_compact = True
        elif str(parallel_compact) == "0":
            parallel_compact = False

        if parallel_compact is not None and parallel_compact != ac[
            "parallelDBAndViewCompaction"]:
            log.info("Parallel compact does not match (%s vs %s)",
                     str(parallel_compact),
                     str(ac["parallelDBAndViewCompaction"]))
            return False

        if purgeInt is not None and str(purgeInt) != str(
                settings["purgeInterval"]):
            log.info("Purge interval does not match (%s vs %s)", str(purgeInt),
                     str(settings["purgeInterval"]))
            return False

        return True

    def verify_gsi_compact_settings(self, compact_mode, compact_percent,
                                    compact_interval,
                                    from_period, to_period, enable_abort):
        rest = RestConnection(self.master)
        settings = rest.get_auto_compaction_settings()
        ac = settings["autoCompactionSettings"]["indexFragmentationThreshold"]
        cc = settings["autoCompactionSettings"]["indexCircularCompaction"]
        if compact_mode is not None:
            if compact_mode == "append":
                self.log.info("append compactino settings %s " % ac)
                if compact_percent is not None and \
                                compact_percent != ac["percentage"]:
                    raise Exception(
                        "setting percent does not match.  Set: %s vs %s :Actual"
                        % (compact_percent, ac["percentage"]))
            if compact_mode == "circular":
                self.log.info("circular compaction settings %s " % cc)
                if enable_abort and not cc["interval"]["abortOutside"]:
                    raise Exception("setting enable abort failed")
                if compact_interval is not None:
                    if compact_interval != cc["daysOfWeek"]:
                        raise Exception(
                            "Failed to set compaction on %s " % compact_interval)
                    elif from_period is None and int(
                            cc["interval"]["fromHour"]) != 0 and \
                                    int(cc["interval"]["fromMinute"]) != 0:
                        raise Exception(
                            "fromHour and fromMinute should be zero")
                if compact_interval is None:
                    if (from_period != str(cc["interval"][
                                                    "fromHour"]) + ":" + str(
                                cc["interval"]["fromMinute"])) \
                    and (to_period != str(cc["interval"]["toHour"]) + ":" + str(
                                cc["interval"]["toMinute"])):
                        raise Exception(
                            "fromHour and fromMinute do not set correctly")
        return True

    def verifyGroupExists(self, server, name):
        rest = RestConnection(server)
        groups = rest.get_zone_names()
        print groups

        for gname, _ in groups.iteritems():
            if name == gname:
                return True

        return False

    def _list_compare(self, list1, list2):
        if len(list1) != len(list2):
            return False
        for elem1 in list1:
            found = False
            for elem2 in list2:
                if elem1 == elem2:
                    found = True
                    break
            if not found:
                return False
        return True

    def waitForItemCount(self, server, bucket_name, count, timeout=30):
        rest = RestConnection(server)
        for sec in range(timeout):
            items = int(
                rest.get_bucket_json(bucket_name)["basicStats"]["itemCount"])
            if items != count:
                time.sleep(1)
            else:
                return True
        log.info("Waiting for item count to be %d timed out", count)
        return False
Exemple #46
0
 def setUp(self):
     self.times_teardown_called = 1
     super(CliBaseTest, self).setUp()
     self.r = random.Random()
     self.vbucket_count = 1024
     self.cluster = Cluster()
     self.clusters_dic = self.input.clusters
     if self.clusters_dic:
         if len(self.clusters_dic) > 1:
             self.dest_nodes = self.clusters_dic[1]
             self.dest_master = self.dest_nodes[0]
         elif len(self.clusters_dic) == 1:
             self.log.error("=== need 2 cluster to setup xdcr in ini file ===")
     else:
         self.log.error("**** Cluster config is setup in ini file. ****")
     self.shell = RemoteMachineShellConnection(self.master)
     if not self.skip_init_check_cbserver:
         self.rest = RestConnection(self.master)
         self.cb_version = self.rest.get_nodes_version()
         """ cli output message """
         self.cli_bucket_create_msg = "SUCCESS: Bucket created"
         self.cli_rebalance_msg = "SUCCESS: Rebalance complete"
         if self.cb_version[:3] == "4.6":
             self.cli_bucket_create_msg = "SUCCESS: bucket-create"
             self.cli_rebalance_msg = "SUCCESS: rebalanced cluster"
     self.import_back = self.input.param("import_back", False)
     if self.import_back:
         if len(self.servers) < 3:
             self.fail("This test needs minimum of 3 vms to run ")
     self.test_type = self.input.param("test_type", "import")
     self.import_file = self.input.param("import_file", None)
     self.imex_type = self.input.param("imex_type", "json")
     self.format_type = self.input.param("format_type", "lines")
     self.import_method = self.input.param("import_method", "file://")
     self.force_failover = self.input.param("force_failover", False)
     self.json_invalid_errors = self.input.param("json-invalid-errors", None)
     self.field_separator = self.input.param("field-separator", "comma")
     self.key_gen = self.input.param("key-gen", True)
     self.skip_docs = self.input.param("skip-docs", None)
     self.limit_docs = self.input.param("limit-docs", None)
     self.limit_rows = self.input.param("limit-rows", None)
     self.skip_rows = self.input.param("skip-rows", None)
     self.omit_empty = self.input.param("omit-empty", None)
     self.infer_types = self.input.param("infer-types", None)
     self.fx_generator = self.input.param("fx-generator", None)
     self.fx_gen_start = self.input.param("fx-gen-start", None)
     self.secure_conn = self.input.param("secure-conn", False)
     self.no_cacert = self.input.param("no-cacert", False)
     self.no_ssl_verify = self.input.param("no-ssl-verify", False)
     self.verify_data = self.input.param("verify-data", False)
     self.field_substitutions = self.input.param("field-substitutions", None)
     self.check_preload_keys = self.input.param("check-preload-keys", True)
     self.debug_logs = self.input.param("debug-logs", False)
     self.should_fail = self.input.param("should-fail", False)
     info = self.shell.extract_remote_info()
     self.os_version = info.distribution_version.lower()
     self.deliverable_type = info.deliverable_type.lower()
     type = info.type.lower()
     self.excluded_commands = self.input.param("excluded_commands", None)
     self.os = 'linux'
     self.full_v = None
     self.short_v = None
     self.build_number = None
     cmd =  'curl -g {0}:8091/diag/eval -u {1}:{2} '.format(self.master.ip,
                                                           self.master.rest_username,
                                                           self.master.rest_password)
     cmd += '-d "path_config:component_path(bin)."'
     bin_path  = subprocess.check_output(cmd, shell=True)
     if "bin" not in bin_path:
         self.fail("Check if cb server install on %s" % self.master.ip)
     else:
         self.cli_command_path = bin_path.replace('"','') + "/"
     self.root_path = LINUX_ROOT_PATH
     self.tmp_path = "/tmp/"
     self.tmp_path_raw = "/tmp/"
     self.cmd_backup_path = LINUX_BACKUP_PATH
     self.backup_path = LINUX_BACKUP_PATH
     self.cmd_ext = ""
     self.src_file = ""
     self.des_file = ""
     self.sample_files_path = LINUX_COUCHBASE_SAMPLE_PATH
     self.log_path = LINUX_COUCHBASE_LOGS_PATH
     self.base_cb_path = LINUX_CB_PATH
     """ non root path """
     if self.nonroot:
         self.sample_files_path = "/home/%s%s" % (self.master.ssh_username,
                                                  LINUX_COUCHBASE_SAMPLE_PATH)
         self.log_path = "/home/%s%s" % (self.master.ssh_username,
                                         LINUX_COUCHBASE_LOGS_PATH)
         self.base_cb_path = "/home/%s%s" % (self.master.ssh_username,
                                             LINUX_CB_PATH)
         self.root_path = "/home/%s/" % self.master.ssh_username
     if type == 'windows':
         self.os = 'windows'
         self.cmd_ext = ".exe"
         self.root_path = WIN_ROOT_PATH
         self.tmp_path = WIN_TMP_PATH
         self.tmp_path_raw = WIN_TMP_PATH_RAW
         self.cmd_backup_path = WIN_BACKUP_C_PATH
         self.backup_path = WIN_BACKUP_PATH
         self.sample_files_path = WIN_COUCHBASE_SAMPLE_PATH_C
         self.log_path = WIN_COUCHBASE_LOGS_PATH
         win_format = "C:/Program Files"
         cygwin_format = "/cygdrive/c/Program\ Files"
         if win_format in self.cli_command_path:
             self.cli_command_path = self.cli_command_path.replace(win_format,
                                                                   cygwin_format)
         self.base_cb_path = WIN_CB_PATH
     if info.distribution_type.lower() == 'mac':
         self.os = 'mac'
     self.full_v, self.short_v, self.build_number = self.shell.get_cbversion(type)
     self.couchbase_usrname = "%s" % (self.input.membase_settings.rest_username)
     self.couchbase_password = "******" % (self.input.membase_settings.rest_password)
     self.cb_login_info = "%s:%s" % (self.couchbase_usrname,
                                     self.couchbase_password)
     self.path_type = self.input.param("path_type", None)
     if self.path_type is None:
         self.log.info("Test command with absolute path ")
     elif self.path_type == "local":
         self.log.info("Test command at %s dir " % self.cli_command_path)
         self.cli_command_path = "cd %s; ./" % self.cli_command_path
     self.cli_command = self.input.param("cli_command", None)
     self.command_options = self.input.param("command_options", None)
     if self.command_options is not None:
         self.command_options = self.command_options.split(";")
     if str(self.__class__).find('couchbase_clitest.CouchbaseCliTest') == -1:
         if len(self.servers) > 1 and int(self.nodes_init) == 1:
             servers_in = [self.servers[i + 1] for i in range(self.num_servers - 1)]
             self.cluster.rebalance(self.servers[:1], servers_in, [])
     for bucket in self.buckets:
         testuser = [{'id': bucket.name, 'name': bucket.name, 'password': '******'}]
         rolelist = [{'id': bucket.name, 'name': bucket.name, 'roles': 'admin'}]
         self.add_built_in_server_user(testuser=testuser, rolelist=rolelist)
class XDCRCallable:
    def __init__(self, nodes, num_clusters=2):
        self.log = logger.Logger.get_logger()
        self.cluster_list = []
        self.__clusterop = Cluster()
        self.setup_xdcr(nodes, num_clusters)

    def setup_xdcr(self, nodes, num_clusters):
        self._setup_xdcr_topology(nodes, num_clusters)

    def __assign_nodes_to_clusters(self, nodes, num_nodes_per_cluster):
        for _ in range(0, len(nodes), num_nodes_per_cluster):
            yield nodes[_:_ + num_nodes_per_cluster]

    def _setup_xdcr_topology(self, nodes, num_clusters):
        num_nodes_per_cluster = len(nodes) // num_clusters
        count = 1
        for cluster_nodes in list(self.__assign_nodes_to_clusters(nodes, num_nodes_per_cluster)):
            cluster = CouchbaseCluster(name="C" + str(count), nodes=cluster_nodes, log=self.log)
            self.cleanup_cluster(cluster)
            #cluster.cleanup_cluster(test_case="xdcr upgrade")
            self.__init_cluster(cluster)
            self.log.info("Cluster {0}:{1} created".format(cluster.get_name(), cluster.get_nodes()))
            self.cluster_list.append(cluster)
            count += 1

        # TODO: implementing chain topology for now, need to extend to other xdcr topologies
        # C1->C2, C2->C3..
        for count, cluster in enumerate(self.cluster_list):
            if count < len(self.cluster_list) - 1:
                cluster.add_remote_cluster(self.cluster_list[count + 1],
                                           'C' + str(count) + "-to-" + 'C' + str(count + 1))

    def ___init_nodes(self, cluster, disabled_consistent_view=None):
        """Initialize all nodes.
        """
        tasks = []
        for node in cluster.get_nodes():
            tasks.append(
                self.__clusterop.async_init_node(
                    node))
        for task in tasks:
            task.result(60)

    def __init_cluster(self, cluster):
        """Initialize cluster.
        1. Initialize all nodes.
        2. Add all nodes to the cluster.
        """
        self.___init_nodes(cluster)
        self.__clusterop.async_rebalance(
            cluster.get_nodes(),
            cluster.get_nodes()[1:],
            []).result()

    def cleanup_cluster(self, cluster):
        """Cleanup cluster.
        1. Remove all remote cluster references.
        2. Remove all replications.
        3. Remove all buckets.
        """
        self.log.info("removing xdcr/nodes settings")
        rest = RestConnection(cluster.get_master_node())
        rest.remove_all_replications()
        rest.remove_all_remote_clusters()
        rest.remove_all_recoveries()
        cluster.cleanup_cluster("upgradeXDCR")

    def _create_replication(self):
        for cluster in self.cluster_list:
            self.log.info("Creating replication from {0}->{1}".format(cluster.get_name, cluster.get_remote_clusters()))

    def _set_replication_properties(self, param_str):
        pass

    def _get_replication_properties(self, replid):
        pass

    def __del__(self):
        for cluster in self.cluster_list:
            self.cleanup_cluster(cluster)
class SpatialQueryTests(unittest.TestCase):
    def setUp(self):
        self.log = logger.Logger.get_logger()
        self.helper = SpatialHelper(self, "default")
        self.helper.setup_cluster()
        self.cluster = Cluster()
        self.servers = self.helper.servers

    def tearDown(self):
        self.helper.cleanup_cluster()

    def test_simple_dataset_limit_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Make limit queries on a simple "
                      "dataset with {0} docs".format(num_docs))

        data_set = SimpleDataSet(self.helper, num_docs)
        data_set.add_limit_queries()
        self._query_test_init(data_set)

    def test_simple_dataset_skip_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Make skip (and limit) queries on a "
                      "simple dataset with {0} docs".format(num_docs))

        data_set = SimpleDataSet(self.helper, num_docs)
        data_set.add_skip_queries()
        self._query_test_init(data_set)

    def test_simple_dataset_bbox_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Make bounding box queries on a simple "
                      "dataset with {0} docs".format(num_docs))

        data_set = SimpleDataSet(self.helper, num_docs)
        data_set.add_bbox_queries()
        self._query_test_init(data_set)

    def test_simple_dataset_range_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Make range queries on a simple "
                      "dataset with {0} docs".format(num_docs))

        data_set = SimpleDataSet(self.helper, num_docs)
        data_set.add_range_queries()
        self._query_test_init(data_set)

    def test_multidim_dataset_limit_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Make limit queries on a multidimensional "
                      "dataset with {0} docs".format(num_docs))

        data_set = MultidimDataSet(self.helper, num_docs)
        data_set.add_limit_queries()
        self._query_test_init(data_set)

    def test_multidim_dataset_skip_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Make skip (and limit) queries on a "
                      "multidimensional dataset with {0} docs".format(num_docs))

        data_set = MultidimDataSet(self.helper, num_docs)
        data_set.add_skip_queries()
        self._query_test_init(data_set)

    def test_multidim_dataset_range_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Make range queries on a "
                      "multidimensional with {0} docs".format(num_docs))

        data_set = MultidimDataSet(self.helper, num_docs)
        data_set.add_range_queries()
        self._query_test_init(data_set)

    def test_multidim_dataset_range_and_limit_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Make range queries with limits on a "
                      "multidimensional with {0} docs".format(num_docs))

        data_set = MultidimDataSet(self.helper, num_docs)
        data_set.add_range_and_limit_queries()
        self._query_test_init(data_set)

## Rebalance In
    def test_rebalance_in_simple_dataset_limit_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Rebalance In and limit queries on a simple "
                      "dataset with {0} docs".format(num_docs))

        data_set = SimpleDataSet(self.helper, num_docs)
        data_set.add_limit_queries()
        self._rebalance_cluster(data_set)

    def test_rebalance_in_simple_dataset_skip_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Rebalance In and skip (and limit) queries on a "
                      "simple dataset with {0} docs".format(num_docs))

        data_set = SimpleDataSet(self.helper, num_docs)
        data_set.add_skip_queries()
        self._rebalance_cluster(data_set)

    def test_rebalance_in_simple_dataset_bbox_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Rebalance In and bounding box queries on a simple "
                      "dataset with {0} docs".format(num_docs))

        data_set = SimpleDataSet(self.helper, num_docs)
        data_set.add_bbox_queries()
        self._rebalance_cluster(data_set)

    def test_rebalance_in_simple_dataset_range_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Rebalance In and range queries on a simple "
                      "dataset with {0} docs".format(num_docs))

        data_set = SimpleDataSet(self.helper, num_docs)
        data_set.add_range_queries()
        self._rebalance_cluster(data_set)

    def test_rebalance_in_multidim_dataset_limit_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Rebalance In and limit queries on a multidimensional "
                      "dataset with {0} docs".format(num_docs))

        data_set = MultidimDataSet(self.helper, num_docs)
        data_set.add_limit_queries()
        self._rebalance_cluster(data_set)

    def test_rebalance_in_multidim_dataset_skip_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Rebalance In and skip (and limit) queries on a "
                      "multidimensional dataset with {0} docs".format(num_docs))

        data_set = MultidimDataSet(self.helper, num_docs)
        data_set.add_skip_queries()
        self._rebalance_cluster(data_set)

    def test_rebalance_in_multidim_dataset_range_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Rebalance In and range queries on a "
                      "multidimensional with {0} docs".format(num_docs))

        data_set = MultidimDataSet(self.helper, num_docs)
        data_set.add_range_queries()
        self._rebalance_cluster(data_set)

    def test_rebalance_in_multidim_dataset_range_and_limit_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Rebalance In and range queries with limits on a "
                      "multidimensional with {0} docs".format(num_docs))

        data_set = MultidimDataSet(self.helper, num_docs)
        data_set.add_range_and_limit_queries()
        self._rebalance_cluster(data_set)

#Rebalance Out
    def test_rebalance_out_simple_dataset_limit_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Rebalance Out and  limit queries on a simple "
                      "dataset with {0} docs".format(num_docs))

        data_set = SimpleDataSet(self.helper, num_docs)
        data_set.add_limit_queries()
        self._rebalance_cluster(data_set)

    def test_rebalance_out_simple_dataset_skip_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Rebalance Out and skip (and limit) queries on a "
                      "simple dataset with {0} docs".format(num_docs))

        data_set = SimpleDataSet(self.helper, num_docs)
        data_set.add_skip_queries()
        self._rebalance_cluster(data_set)

    def test_rebalance_out_simple_dataset_bbox_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Rebalance Out and bounding box queries on a simple "
                      "dataset with {0} docs".format(num_docs))

        data_set = SimpleDataSet(self.helper, num_docs)
        data_set.add_bbox_queries()
        self._rebalance_cluster(data_set)

    def test_rebalance_out_simple_dataset_range_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Rebalance Out and range queries on a simple "
                      "dataset with {0} docs".format(num_docs))

        data_set = SimpleDataSet(self.helper, num_docs)
        data_set.add_range_queries()
        self._rebalance_cluster(data_set)

    def test_rebalance_out_multidim_dataset_limit_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Rebalance Out and limit queries on a multidimensional "
                      "dataset with {0} docs".format(num_docs))

        data_set = MultidimDataSet(self.helper, num_docs)
        data_set.add_limit_queries()
        self._rebalance_cluster(data_set)

    def test_rebalance_out_multidim_dataset_skip_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Rebalance Out and skip (and limit) queries on a "
                      "multidimensional dataset with {0} docs".format(num_docs))

        data_set = MultidimDataSet(self.helper, num_docs)
        data_set.add_skip_queries()
        self._rebalance_cluster(data_set)

    def test_rebalance_out_multidim_dataset_range_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Rebalance Out and range queries on a "
                      "multidimensional with {0} docs".format(num_docs))

        data_set = MultidimDataSet(self.helper, num_docs)
        data_set.add_range_queries()
        self._rebalance_cluster(data_set)

    def test_rebalance_out_multidim_dataset_range_and_limit_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Rebalance Out and range queries with limits on a "
                      "multidimensional with {0} docs".format(num_docs))

        data_set = MultidimDataSet(self.helper, num_docs)
        data_set.add_range_and_limit_queries()
        self._rebalance_cluster(data_set)

# Warmup Tests

    def test_warmup_simple_dataset_limit_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Warmup with skip and limit queries on a simple "
                      "dataset with {0} docs".format(num_docs))

        data_set = SimpleDataSet(self.helper, num_docs)
        data_set.add_limit_queries()
        self._query_test_init_integration(data_set)

    def test_warmup_simple_dataset_skip_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Warmup with  skip (and limit) queries on a "
                      "simple dataset with {0} docs".format(num_docs))

        data_set = SimpleDataSet(self.helper, num_docs)
        data_set.add_skip_queries()
        self._query_test_init_integration(data_set)

    def test_warmup_simple_dataset_bbox_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Warmup with  bounding box queries on a simple "
                      "dataset with {0} docs".format(num_docs))

        data_set = SimpleDataSet(self.helper, num_docs)
        data_set.add_bbox_queries()
        self._query_test_init_integration(data_set)

    def test_warmup_simple_dataset_range_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Warmup with  range queries on a simple "
                      "dataset with {0} docs".format(num_docs))

        data_set = SimpleDataSet(self.helper, num_docs)
        data_set.add_range_queries()
        self._query_test_init_integration(data_set)

    def test_warmup_multidim_dataset_limit_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Warmup with  limit queries on a multidimensional "
                      "dataset with {0} docs".format(num_docs))

        data_set = MultidimDataSet(self.helper, num_docs)
        data_set.add_limit_queries()
        self._query_test_init_integration(data_set)

    def test_warmup_multidim_dataset_skip_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Warmup with  skip (and limit) queries on a "
                      "multidimensional dataset with {0} docs".format(num_docs))

        data_set = MultidimDataSet(self.helper, num_docs)
        data_set.add_skip_queries()
        self._query_test_init_integration(data_set)

    def test_warmup_multidim_dataset_range_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Warmup with  range queries on a "
                      "multidimensional with {0} docs".format(num_docs))

        data_set = MultidimDataSet(self.helper, num_docs)
        data_set.add_range_queries()
        self._query_test_init_integration(data_set)

    def test_warmup_multidim_dataset_range_and_limit_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Warmup with  range queries with limits on a "
                      "multidimensional with {0} docs".format(num_docs))

        data_set = MultidimDataSet(self.helper, num_docs)
        data_set.add_range_and_limit_queries()
        self._query_test_init_integration(data_set)


# Reboot Tests
    def test_reboot_simple_dataset_limit_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Reboot and limit queries on a simple "
                      "dataset with {0} docs".format(num_docs))

        data_set = SimpleDataSet(self.helper, num_docs)
        data_set.add_limit_queries()
        self._query_test_init_integration(data_set)

    def test_reboot_simple_dataset_skip_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Reboot with  skip (and limit) queries on a "
                      "simple dataset with {0} docs".format(num_docs))

        data_set = SimpleDataSet(self.helper, num_docs)
        data_set.add_skip_queries()
        self._query_test_init_integration(data_set)

    def test_reboot_simple_dataset_bbox_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Reboot with  bounding box queries on a simple "
                      "dataset with {0} docs".format(num_docs))

        data_set = SimpleDataSet(self.helper, num_docs)
        data_set.add_bbox_queries()
        self._query_test_init_integration(data_set)

    def test_reboot_simple_dataset_range_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Reboot with  range queries on a simple "
                      "dataset with {0} docs".format(num_docs))

        data_set = SimpleDataSet(self.helper, num_docs)
        data_set.add_range_queries()
        self._query_test_init_integration(data_set)

    def test_reboot_multidim_dataset_limit_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Reboot with  limit queries on a multidimensional "
                      "dataset with {0} docs".format(num_docs))

        data_set = MultidimDataSet(self.helper, num_docs)
        data_set.add_limit_queries()
        self._query_test_init_integration(data_set)

    def test_reboot_multidim_dataset_skip_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Reboot with  skip (and limit) queries on a "
                      "multidimensional dataset with {0} docs".format(num_docs))

        data_set = MultidimDataSet(self.helper, num_docs)
        data_set.add_skip_queries()
        self._query_test_init_integration(data_set)

    def test_reboot_multidim_dataset_range_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Reboot with  range queries on a "
                      "multidimensional with {0} docs".format(num_docs))

        data_set = MultidimDataSet(self.helper, num_docs)
        data_set.add_range_queries()
        self._query_test_init_integration(data_set)

    def test_reboot_multidim_dataset_range_and_limit_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Reboot with  range queries with limits on a "
                      "multidimensional with {0} docs".format(num_docs))

        data_set = MultidimDataSet(self.helper, num_docs)
        data_set.add_range_and_limit_queries()
        self._query_test_init_integration(data_set)

# Failover Tests
    def test_failover_simple_dataset_limit_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Failover and limit queries on a simple "
                      "dataset with {0} docs".format(num_docs))

        data_set = SimpleDataSet(self.helper, num_docs)
        data_set.add_limit_queries()
        self._failover_cluster(data_set)

    def test_failover_simple_dataset_skip_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Rebalance Out and skip (and limit) queries on a "
                      "simple dataset with {0} docs".format(num_docs))

        data_set = SimpleDataSet(self.helper, num_docs)
        data_set.add_skip_queries()
        self._failover_cluster(data_set)

    def test_failover_simple_dataset_bbox_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Rebalance Out and bounding box queries on a simple "
                      "dataset with {0} docs".format(num_docs))

        data_set = SimpleDataSet(self.helper, num_docs)
        data_set.add_bbox_queries()
        self._failover_cluster(data_set)

    def test_failover_simple_dataset_range_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Rebalance Out and range queries on a simple "
                      "dataset with {0} docs".format(num_docs))

        data_set = SimpleDataSet(self.helper, num_docs)
        data_set.add_range_queries()
        self._failover_cluster(data_set)

    def test_failover_multidim_dataset_limit_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Rebalance Out and limit queries on a multidimensional "
                      "dataset with {0} docs".format(num_docs))

        data_set = MultidimDataSet(self.helper, num_docs)
        data_set.add_limit_queries()
        self._failover_cluster(data_set)

    def test_failover_multidim_dataset_skip_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Rebalance Out and skip (and limit) queries on a "
                      "multidimensional dataset with {0} docs".format(num_docs))

        data_set = MultidimDataSet(self.helper, num_docs)
        data_set.add_skip_queries()
        self._failover_cluster(data_set)

    def test_failover_multidim_dataset_range_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Rebalance Out and range queries on a "
                      "multidimensional with {0} docs".format(num_docs))

        data_set = MultidimDataSet(self.helper, num_docs)
        data_set.add_range_queries()
        self._failover_cluster(data_set)

    def test_failover_multidim_dataset_range_and_limit_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Rebalance Out and range queries with limits on a "
                      "multidimensional with {0} docs".format(num_docs))

        data_set = MultidimDataSet(self.helper, num_docs)
        data_set.add_range_and_limit_queries()
        self._failover_cluster(data_set)

    ###
    # load the data defined for this dataset.
    # create views and query the data as it loads.
    # verification is optional, and best practice is to
    # set to False if you plan on running _query_all_views()
    # later in the test case
    ###
    def _query_test_init(self, data_set, verify_results = True):
        views = data_set.views

        # start loading data
        t = Thread(target=data_set.load,
                   name="load_data_set",
                   args=())
        t.start()

        # run queries while loading data
        while(t.is_alive()):
            self._query_all_views(views, False)
            time.sleep(5)
        t.join()

        # results will be verified if verify_results set
        if verify_results:
            self._query_all_views(views, verify_results)
        else:
            self._check_view_intergrity(views)

    def _query_test_init_integration(self, data_set, verify_results = True):
        views = data_set.views
        inserted_keys = data_set.load()
        target_fn = ()

        if self.helper.num_nodes_reboot >= 1:
            target_fn = self._reboot_cluster(data_set)
        elif self.helper.num_nodes_warmup >= 1:
            target_fn = self._warmup_cluster(data_set)
        elif self.helper.num_nodes_to_add >= 1 or self.helper.num_nodes_to_remove >= 1:
            target_fn = self._rebalance_cluster(data_set)

        t = Thread(target=self._query_all_views(views, False))
        t.start()
        # run queries while loading data
        while t.is_alive():
            self._rebalance_cluster(data_set)
            time.sleep(5)
        t.join()

        # results will be verified if verify_results set
        if verify_results:
            self._query_all_views(views, verify_results)
        else:
            self._check_view_intergrity(views)

    ##
    # run all queries for all views in parallel
    ##
    def _query_all_views(self, views, verify_results = True):
        query_threads = []
        for view in views:
            t = RunQueriesThread(view, verify_results)
            query_threads.append(t)
            t.start()

        [t.join() for t in query_threads]

        self._check_view_intergrity(query_threads)

    ##
    # If an error occured loading or querying data for a view
    # it is queued and checked here. Fail on the first one that
    # occurs.
    ##
    def _check_view_intergrity(self, thread_results):
        for result in thread_results:
            if result.test_results.errors:
                self.fail(result.test_results.errors[0][1])
            if result.test_results.failures:
                self.fail(result.test_results.failures[0][1])

    ###
    # Rebalance
    ###
    def _rebalance_cluster(self, data_set):
        if self.helper.num_nodes_to_add >= 1:
            rebalance = self.cluster.async_rebalance(self.servers[:1],
                self.servers[1:self.helper.num_nodes_to_add + 1],
                [])
            self._query_test_init(data_set)
            rebalance.result()

        elif self.helper.num_nodes_to_remove >= 1:
            rebalance = self.cluster.async_rebalance(self.servers[:1],[],
                self.servers[1:self.helper.num_nodes_to_add + 1])
            self._query_test_init(data_set)
            rebalance.result()

    def _failover_cluster(self, data_set):
        failover_nodes = self.servers[1 : self.helper.failover_factor + 1]
        try:
            # failover and verify loaded data
            #self.cluster.failover(self.servers, failover_nodes)
            self.cluster.failover(self.servers, self.servers[1:2])
            self.log.info("120 seconds sleep after failover before invoking rebalance...")
            time.sleep(120)
            rebalance = self.cluster.async_rebalance(self.servers,
                [], self.servers[1:2])

            self._query_test_init(data_set)

            msg = "rebalance failed while removing failover nodes {0}".format(failover_nodes)
            self.assertTrue(rebalance.result(), msg=msg)

            #verify queries after failover
            self._query_test_init(data_set)
        finally:
            self.log.info("Completed the failover testing for spatial querying")

    ###
    # Warmup
    ###
    def _warmup_cluster(self, data_set):
        for server in self.servers[0:self.helper.num_nodes_warmup]:
            remote = RemoteMachineShellConnection(server)
            remote.stop_server()
            remote.start_server()
            remote.disconnect()
            self.log.info("Node {0} should be warming up ".format(server.ip))
            time.sleep(120)
        self._query_test_init(data_set)

    # REBOOT
    def _reboot_cluster(self, data_set):
        try:
            for server in self.servers[0:self.helper.num_nodes_reboot]:
                shell = RemoteMachineShellConnection(server)
                if shell.extract_remote_info().type.lower() == 'windows':
                    o, r = shell.execute_command("shutdown -r -f -t 0")
                    shell.log_command_output(o, r)
                    shell.disconnect()
                    self.log.info("Node {0} is being stopped".format(server.ip))
                elif shell.extract_remote_info().type.lower() == 'linux':
                    o, r = shell.execute_command("reboot")
                    shell.log_command_output(o, r)
                    shell.disconnect()
                    self.log.info("Node {0} is being stopped".format(server.ip))

                    time.sleep(120)
                    shell = RemoteMachineShellConnection(server)
                    command = "/sbin/iptables -F"
                    o, r = shell.execute_command(command)
                    shell.log_command_output(o, r)
                    shell.disconnect()
                    self.log.info("Node {0} backup".format(server.ip))
        finally:
            self.log.info("Warming-up server ..".format(server.ip))
            time.sleep(100)
Exemple #49
0
class SGConfigTests(GatewayConfigBaseTest):
    def setUp(self):
        super(SGConfigTests, self).setUp()
        for server in self.servers:
            if self.case_number == 1:
                with open('pytests/sg/resources/gateway_config_walrus_template.json', 'r') as file:
                    filedata = file.read()
                    filedata = filedata.replace('LOCAL_IP', server.ip)
                with open('pytests/sg/resources/gateway_config_walrus.json', 'w') as file:
                    file.write(filedata)
                shell = RemoteMachineShellConnection(server)
                shell.execute_command("rm -rf {0}/tmp/*".format(self.folder_prefix))
                shell.copy_files_local_to_remote('pytests/sg/resources', '{0}/tmp'.format(self.folder_prefix))
                # will install sg only the first time
                self.install(shell)
                pid = self.is_sync_gateway_process_running(shell)
                self.assertNotEqual(pid, 0)
                exist = shell.file_exists('{0}/tmp/'.format(self.folder_prefix), 'gateway.log')
                self.assertTrue(exist)
                shell.disconnect()
        if self.case_number == 1:
            shutil.copy2('pytests/sg/resources/gateway_config_backup.json', 'pytests/sg/resources/gateway_config.json')
            BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self)
            self.cluster = Cluster()
            shared_params=self._create_bucket_params(server=self.master, size=150)
            self.cluster.create_default_bucket(shared_params)
            task = self.cluster.async_create_sasl_bucket(name='test_%E-.5',password='******',
                                                         bucket_params=shared_params)
            task.result()
            task = self.cluster.async_create_standard_bucket(name='db',port=11219,bucket_params=shared_params)

            task.result()

    def tearDown(self):
        super(SGConfigTests, self).tearDown()
        if self.case_number == 1:
            self.cluster.shutdown(force=True)

    def _create_bucket_params(self, server, replicas=1, size=0, port=11211, password=None,
                             bucket_type='membase', enable_replica_index=1, eviction_policy='valueOnly',
                             bucket_priority=None, flush_enabled=1, lww=False):
        """Create a set of bucket_parameters to be sent to all of the bucket_creation methods
        Parameters:
            server - The server to create the bucket on. (TestInputServer)
            bucket_name - The name of the bucket to be created. (String)
            port - The port to create this bucket on. (String)
            password - The password for this bucket. (String)
            size - The size of the bucket to be created. (int)
            enable_replica_index - can be 0 or 1, 1 enables indexing of replica bucket data (int)
            replicas - The number of replicas for this bucket. (int)
            eviction_policy - The eviction policy for the bucket, can be valueOnly or fullEviction. (String)
            bucket_priority - The priority of the bucket:either none, low, or high. (String)
            bucket_type - The type of bucket. (String)
            flushEnabled - Enable or Disable the flush functionality of the bucket. (int)
            lww = determine the conflict resolution type of the bucket. (Boolean)

        Returns:
            bucket_params - A dictionary containing the parameters needed to create a bucket."""

        bucket_params = {}
        bucket_params['server'] = server
        bucket_params['replicas'] = replicas
        bucket_params['size'] = size
        bucket_params['port'] = port
        bucket_params['password'] = password
        bucket_params['bucket_type'] = bucket_type
        bucket_params['enable_replica_index'] = enable_replica_index
        bucket_params['eviction_policy'] = eviction_policy
        bucket_params['bucket_priority'] = bucket_priority
        bucket_params['flush_enabled'] = flush_enabled
        bucket_params['lww'] = lww
        return bucket_params

    def configHelp(self):
        for server in self.servers:
            shell = RemoteMachineShellConnection(server)
            output, error = shell.execute_command_raw('/opt/couchbase-sync-gateway/bin/sync_gateway -help')
            for index, str in enumerate(help_string):
                if index != help_string[index]:
                    self.log.info('configHelp found unmatched help text. error({0}), help({1})'.format(error[index],
                                                                                                       help_string[
                                                                                                           index]))
                self.assertEqual(error[index], help_string[index])
            shell.disconnect()

    def configCreateUser(self):
        for server in self.servers:
            shell = RemoteMachineShellConnection(server)
            self.config = 'gateway_config_walrus.json'
            self.assertTrue(self.start_sync_gateway(shell))
            self.assertTrue(self.create_user(shell))
            if not self.expected_stdout:
                self.assertTrue(self.get_user(shell))
                self.delete_user(shell)
            shell.disconnect()

    def configGuestUser(self):
        for server in self.servers:
            shell = RemoteMachineShellConnection(server)
            self.config = 'gateway_config_walrus.json'
            self.assertTrue(self.start_sync_gateway(shell))
            self.assertTrue(self.get_user(shell))
            self.assertFalse(self.delete_user(shell))
            shell.disconnect()

    def configCreateRole(self):
        for server in self.servers:
            shell = RemoteMachineShellConnection(server)
            self.config = 'gateway_config_walrus.json'
            self.assertTrue(self.start_sync_gateway(shell))
            self.assertTrue(self.create_role(shell, self.role_name, self.admin_channels))
            if not self.expected_stdout:
                self.assertTrue(self.get_role(shell))
                self.delete_role(shell)
            shell.disconnect()

    def configUserRolesChannels(self):
        for server in self.servers:
            shell = RemoteMachineShellConnection(server)
            self.config = 'gateway_config_walrus.json'
            self.assertTrue(self.start_sync_gateway(shell))
            self.assertTrue(self.parse_input_create_roles(shell))
            self.assertTrue(self.create_user(shell))
            if not self.expected_stdout:
                self.assertTrue(self.get_user(shell))
                self.delete_user(shell)
            shell.disconnect()

    def configUserRolesNotExist(self):
        for server in self.servers:
            shell = RemoteMachineShellConnection(server)
            self.config = 'gateway_config_walrus.json'
            self.assertTrue(self.start_sync_gateway(shell))
            self.assertTrue(self.create_user(shell))
            if not self.expected_stdout:
                self.assertTrue(self.get_user(shell))
                self.delete_user(shell)
            shell.disconnect()

    def configInspectDocChannel(self):
        for server in self.servers:
            shell = RemoteMachineShellConnection(server)
            self.config = 'gateway_config_walrus.json'
            self.assertTrue(self.start_sync_gateway(shell))
            self.assertTrue(self.parse_input_create_roles(shell))
            if self.doc_channels:
                success, revision = self.create_doc(shell)
                self.assertTrue(success)
                self.assertTrue(self.get_all_docs(shell))
                self.assertTrue(self.delete_doc(shell, revision))
            shell.disconnect()

    def configCBS(self):
        for server in self.servers:
            shell = RemoteMachineShellConnection(server)
            shutil.copy2('pytests/sg/resources/gateway_config_backup.json', 'pytests/sg/resources/gateway_config.json')
            self.assertTrue(self.start_sync_gateway_template(shell, self.template))
            if not self.expected_error:
                time.sleep(5)
                success, revision = self.create_doc(shell)
                self.assertTrue(success)
                self.assertTrue(self.delete_doc(shell, revision))
            self.assertTrue(self.check_message_in_gatewaylog(shell, self.expected_log))
            shell.disconnect()

    def configStartSgw(self):
        for server in self.servers:
            shell = RemoteMachineShellConnection(server)
            shutil.copy2('pytests/sg/resources/gateway_config_backup.json', 'pytests/sg/resources/gateway_config.json')
            shell.copy_files_local_to_remote('pytests/sg/resources', '/tmp')
            self.assertTrue(self.start_sync_gateway(shell))
            self.assertTrue(self.check_message_in_gatewaylog(shell, self.expected_log))
            if not self.expected_error:
                if self.admin_port:
                    self.assertTrue(self.get_users(shell))
                if self.sync_port:
                    success, revision = self.create_doc(shell)
                    self.assertTrue(success)
                    self.assertTrue(self.delete_doc(shell, revision))
            shell.disconnect()