def replace_services(self, servers, server, services):
     """ Changes the services of a server
     """
     # Remove server and rebalance
     Cluster().rebalance(servers, [], [server])
     # Add back with new services and rebalance
     Cluster().rebalance(servers, [server], [], services=services)
     # A little sleep for services to warmup
     self.sleep(15)
Exemple #2
0
 def setUp(self):
     super(NodeServiceTests, self).setUp()
     self.helper = ServerHelper(self)
     num_buckets = self.input.param("num_buckets", 1)
     compression = self.input.param("sdk_compression", True)
     for i in range(num_buckets):
         RestConnection(self.servers[0]).create_bucket(
             bucket='bucket%s' % i,
             ramQuotaMB=100,
             proxyPort=STANDARD_BUCKET_PORT + i + 1)
         gen_load = BlobGenerator('ui', 'ui-', 256, start=0, end=10)
         cluster = Cluster()
         try:
             gen = copy.deepcopy(gen_load)
             task = cluster.async_load_gen_docs(self.servers[0],
                                                'bucket%s' % i,
                                                gen,
                                                Bucket().kvs[1],
                                                'create',
                                                0,
                                                0,
                                                True,
                                                1,
                                                1,
                                                30,
                                                compression=compression)
             task.result()
         finally:
             cluster.shutdown()
     BaseHelper(self).login()
Exemple #3
0
    def test_load_collections_in_bucket(self):
        import time
        start = time.time()
        self.scope_num = self.input.param("num_scopes", 2)
        self.collection_num = self.input.param("num_collections", 2)
        self.bucket_name = self.input.param("bucket", self.default_bucket_name)
        try:
            self.rest.async_create_scope_collection(self.scope_num,
                                                    self.collection_num,
                                                    self.bucket_name)
        except:
            pass
        create = time.time()
        self.log.info(
            "{} scopes with {} collections each created in {} s".format(
                self.scope_num, self.collection_num, round(create - start)))
        time.sleep(5)

        self.enable_bloom_filter = self.input.param("enable_bloom_filter",
                                                    False)
        self.buckets = self.conn.get_buckets()
        self.cluster = Cluster()
        self.active_resident_threshold = 100

        self.gen_create = SDKDataLoader(num_ops=self.num_items,
                                        percent_create=80,
                                        percent_update=20,
                                        percent_delete=20)
        self._load_all_buckets(self.master, self.gen_create)
        load = time.time()
        self.log.info("Done loading {} collections in bucket {} in {}s".format(
            self.collection_num, self.bucket_name, round(load - create)))
        for bkt in self.buckets:
            print(self.stat.get_collection_stats(bkt))
Exemple #4
0
 def wait_for_replication(servers, cluster_helper=None, timeout=600):
     if cluster_helper is None:
         cluster = Cluster()
     else:
         cluster = cluster_helper
     tasks = []
     rest = RestConnection(servers[0])
     buckets = rest.get_buckets()
     for server in servers:
         for bucket in buckets:
             for server_repl in list(set(servers) - {server}):
                 tasks.append(
                     cluster.async_wait_for_stats(
                         [server], bucket, 'tap',
                         'eq_tapq:replication_ns_1@' + server_repl.ip +
                         ':idle', '==', 'true'))
                 tasks.append(
                     cluster.async_wait_for_stats(
                         [server], bucket, 'tap',
                         'eq_tapq:replication_ns_1@' + server_repl.ip +
                         ':backfill_completed', '==', 'true'))
     try:
         for task in tasks:
             task.result(timeout)
     finally:
         if cluster_helper is None:
             # stop all newly created task manager threads
             cluster.shutdown()
         return True
 def setUp(self):
     super(compression, self).setUp()
     self.src_cluster = self.get_cb_cluster_by_name('C1')
     self.src_master = self.src_cluster.get_master_node()
     self.dest_cluster = self.get_cb_cluster_by_name('C2')
     self.dest_master = self.dest_cluster.get_master_node()
     self.cluster = Cluster()
Exemple #6
0
 def test_multiple_backups_merge_with_tombstoning(self):
     self.log.info("*** start to load items to all buckets")
     self.expected_error = self.input.param("expected_error", None)
     if int(self.active_resident_threshold) > 0:
         self.log.info("Disable compaction to speed up dgm")
         RestConnection(self.master).disable_auto_compaction()
     if self.expires:
         for bucket in self.buckets:
             cb = self._get_python_sdk_client(self.master.ip, bucket)
             for i in range(1, self.num_items + 1):
                 cb.upsert("doc" + str(i), {"key": "value"})
     else:
         self._load_all_buckets(self.master, self.initial_load_gen,
                                "create", self.expires)
     self.log.info("*** done to load items to all buckets")
     self.backup_create_validate()
     self.backup()
     self.set_meta_purge_interval()
     self._load_all_buckets(self.master, self.delete_gen, "delete",
                            self.expires)
     self.sleep(
         360, "Sleep for 6 minutes for the meta-data purge "
         "interval to be completed")
     self.compact_buckets()
     self.backup()
     self.backupset.start = 1
     self.backupset.end = len(self.backups)
     self.merge()
     start = self.number_of_backups_taken
     end = self.number_of_backups_taken
     if self.reset_restore_cluster:
         self.log.info("*** start to reset cluster")
         self.backup_reset_clusters(self.cluster_to_restore)
         if self.same_cluster:
             self._initialize_nodes(Cluster(),
                                    self.servers[:self.nodes_init])
         else:
             self._initialize_nodes(
                 Cluster(), self.input.clusters[0][:self.nodes_init])
         self.log.info("Done reset cluster")
     self.sleep(10)
     self.backupset.start = start
     self.backupset.end = end
     self.log.info("*** start restore validation")
     self.backup_restore_validate(compare_uuid=False,
                                  seqno_compare_function=">=",
                                  expected_error=self.expected_error)
Exemple #7
0
    def setUp(self):
        self.helper = SpatialHelper(self, "default")
        super(SpatialQueryTests, self).setUp()
        self.log = logger.Logger.get_logger()

        self.helper.setup_cluster()
        self.cluster = Cluster()
        self.servers = self.helper.servers
Exemple #8
0
 def setUp(self):
     super(BaseSecondaryIndexingTests, self).setUp()
     self.index_lost_during_move_out = []
     self.verify_using_index_status = self.input.param(
         "verify_using_index_status", False)
     self.use_replica_when_active_down = self.input.param(
         "use_replica_when_active_down", True)
     self.use_where_clause_in_index = self.input.param(
         "use_where_clause_in_index", False)
     self.scan_consistency = self.input.param("scan_consistency",
                                              "request_plus")
     self.scan_vector_per_values = self.input.param(
         "scan_vector_per_values", None)
     self.timeout_for_index_online = self.input.param(
         "timeout_for_index_online", 600)
     self.verify_query_result = self.input.param("verify_query_result",
                                                 True)
     self.verify_explain_result = self.input.param("verify_explain_result",
                                                   True)
     self.defer_build = self.input.param("defer_build", True)
     self.run_query_with_explain = self.input.param(
         "run_query_with_explain", True)
     self.run_query = self.input.param("run_query", True)
     self.graceful = self.input.param("graceful", False)
     self.groups = self.input.param("groups", "all").split(":")
     self.use_rest = self.input.param("use_rest", False)
     if not self.use_rest:
         query_definition_generator = SQLDefinitionGenerator()
         if self.dataset == "default" or self.dataset == "employee":
             self.query_definitions = query_definition_generator.generate_employee_data_query_definitions(
             )
         if self.dataset == "simple":
             self.query_definitions = query_definition_generator.generate_simple_data_query_definitions(
             )
         if self.dataset == "sabre":
             self.query_definitions = query_definition_generator.generate_sabre_data_query_definitions(
             )
         if self.dataset == "bigdata":
             self.query_definitions = query_definition_generator.generate_big_data_query_definitions(
             )
         if self.dataset == "array":
             self.query_definitions = query_definition_generator.generate_airlines_data_query_definitions(
             )
         self.query_definitions = query_definition_generator.filter_by_group(
             self.groups, self.query_definitions)
     self.ops_map = self._create_operation_map()
     self.find_nodes_in_list()
     self.generate_map_nodes_out_dist()
     self.memory_create_list = []
     self.memory_drop_list = []
     self.skip_cleanup = self.input.param("skip_cleanup", False)
     self.index_loglevel = self.input.param("index_loglevel", None)
     if self.index_loglevel:
         self.set_indexer_logLevel(self.index_loglevel)
     if self.dgm_run:
         self._load_doc_data_all_buckets(gen_load=self.gens_load)
     self.gsi_thread = Cluster()
     self.defer_build = self.defer_build and self.use_gsi_for_secondary
Exemple #9
0
    def test_multiple_backups_merges(self):
        self.log.info("*** start to load items to all buckets")
        self.expected_error = self.input.param("expected_error", None)
        if int(self.active_resident_threshold) > 0:
            self.log.info("Disable compaction to speed up dgm")
            RestConnection(self.master).disable_auto_compaction()
        if self.expires:
            for bucket in self.buckets:
                cb = self._get_python_sdk_client(self.master.ip, bucket,
                                                 self.backupset.cluster_host)
                for i in range(1, self.num_items + 1):
                    cb.upsert("doc" + str(i), {"key": "value"})
        else:
            self._load_all_buckets(self.master, self.initial_load_gen,
                                   "create", self.expires)
        self.log.info("*** done to load items to all buckets")
        self.backup_create_validate()
        for i in range(1, self.number_of_repeats + 1):
            self.do_backup_merge_actions()
        start = self.number_of_backups_taken
        end = self.number_of_backups_taken
        if self.backup_corrupted:
            self.log.info(
                "Stop restore due to backup files corrupted as intended")
            return
        if self.reset_restore_cluster:
            self.log.info("*** start to reset cluster")
            self.backup_reset_clusters(self.cluster_to_restore)
            if self.same_cluster:
                self._initialize_nodes(Cluster(),
                                       self.servers[:self.nodes_init])
            else:
                shell = RemoteMachineShellConnection(self.input.clusters[0][0])
                shell.enable_diag_eval_on_non_local_hosts()
                shell.disconnect()
                rest = RestConnection(self.input.clusters[0][0])
                rest.force_eject_node()
                master_services = self.get_services(
                    [self.backupset.cluster_host],
                    self.services_init,
                    start_node=0)
                info = rest.get_nodes_self()
                if info.memoryQuota and int(info.memoryQuota) > 0:
                    self.quota = info.memoryQuota
                rest.init_node()
            self.log.info("Done reset cluster")
        self.sleep(10)
        """ Add built-in user cbadminbucket to second cluster """
        self.add_built_in_server_user(
            node=self.input.clusters[0][:self.nodes_init][0])

        self.backupset.start = start
        self.backupset.end = end
        self.log.info("*** start restore validation")
        self.backup_restore_validate(compare_uuid=False,
                                     seqno_compare_function=">=",
                                     expected_error=self.expected_error)
Exemple #10
0
 def setUp(self):
     super(nwusage, self).setUp()
     self.src_cluster = self.get_cb_cluster_by_name('C1')
     self.src_master = self.src_cluster.get_master_node()
     self.dest_cluster = self.get_cb_cluster_by_name('C2')
     self.dest_master = self.dest_cluster.get_master_node()
     self.cluster = Cluster()
     self.num_src_nodes = len(self.src_cluster.get_nodes())
     self.num_dest_nodes = len(self.dest_cluster.get_nodes())
Exemple #11
0
    def test_multiple_backups_merges(self):
        if self.data_type == "binary":
            gen = BlobGenerator("ent-backup",
                                "ent-backup-",
                                self.value_size,
                                end=self.num_items)
        elif self.data_type == "json":
            gen = DocumentGenerator("ent-backup",
                                    '{{"key":"value"}}',
                                    xrange(100),
                                    start=0,
                                    end=self.num_items)
        self.log.info("*** start to load items to all buckets")
        self.expected_error = self.input.param("expected_error", None)
        self._load_all_buckets(self.master, gen, "create", self.expires)
        self.log.info("*** done to load items to all buckets")
        self.backup_create_validate()
        for i in range(1, self.number_of_repeats + 1):
            self.do_backup_merge_actions()
        start = self.number_of_backups_taken
        end = self.number_of_backups_taken
        if self.reset_restore_cluster:
            self.log.info("*** start to reset cluster")
            self.backup_reset_clusters(self.cluster_to_restore)
            if self.same_cluster:
                self._initialize_nodes(Cluster(),
                                       self.servers[:self.nodes_init])
            else:
                self._initialize_nodes(
                    Cluster(), self.input.clusters[0][:self.nodes_init])
            self.log.info("Done reset cluster")
        self.sleep(10)
        """ Add built-in user cbadminbucket to second cluster """
        self.add_built_in_server_user(
            node=self.input.clusters[0][:self.nodes_init][0])

        self.backupset.start = start
        self.backupset.end = end
        self.log.info("*** start restore validation")
        self.backup_restore_validate(compare_uuid=False,
                                     seqno_compare_function=">=",
                                     expected_error=self.expected_error)
 def setUp(self):
     super(compression, self).setUp()
     self.src_cluster = self.get_cb_cluster_by_name('C1')
     self.src_master = self.src_cluster.get_master_node()
     self.dest_cluster = self.get_cb_cluster_by_name('C2')
     self.dest_master = self.dest_cluster.get_master_node()
     self.chain_length = self._input.param("chain_length", 2)
     self.topology = self._input.param("ctopology", "chain")
     if self.chain_length > 2:
         self.c3_cluster = self.get_cb_cluster_by_name('C3')
         self.c3_master = self.c3_cluster.get_master_node()
     self.cluster = Cluster()
Exemple #13
0
    def test_multiple_backups_merges(self):
        self.log.info("*** start to load items to all buckets")
        self.expected_error = self.input.param("expected_error", None)
        if self.expires:
            for bucket in self.buckets:
                cb = self._get_python_sdk_client(self.master.ip, bucket,
                                                 self.backupset.cluster_host)
                for i in range(1, self.num_items + 1):
                    cb.upsert("doc" + str(i), {"key": "value"})
        else:
            self._load_all_buckets(self.master, self.initial_load_gen,
                                   "create", self.expires)
        self.log.info("*** done to load items to all buckets")
        self.backup_create_validate()
        for i in range(1, self.number_of_repeats + 1):
            self.do_backup_merge_actions()
        start = self.number_of_backups_taken
        end = self.number_of_backups_taken
        if self.reset_restore_cluster:
            self.log.info("*** start to reset cluster")
            self.backup_reset_clusters(self.cluster_to_restore)
            if self.same_cluster:
                self._initialize_nodes(Cluster(),
                                       self.servers[:self.nodes_init])
            else:
                self._initialize_nodes(
                    Cluster(), self.input.clusters[0][:self.nodes_init])
            self.log.info("Done reset cluster")
        self.sleep(10)
        """ Add built-in user cbadminbucket to second cluster """
        self.add_built_in_server_user(
            node=self.input.clusters[0][:self.nodes_init][0])

        self.backupset.start = start
        self.backupset.end = end
        self.log.info("*** start restore validation")
        self.backup_restore_validate(compare_uuid=False,
                                     seqno_compare_function=">=",
                                     expected_error=self.expected_error)
def main():
    try:
        (opts, args) = getopt.getopt(sys.argv[1:], 'hi:p', [])
        for o, a in opts:
            if o == "-h":
                usage()

        input = TestInput.TestInputParser.get_test_input(sys.argv)
        if not input.servers:
            usage("ERROR: no servers specified. Please use the -i parameter.")
    except IndexError:
        usage()
    except getopt.GetoptError as error:
        usage("ERROR: " + str(error))

    docs_per_day = input.param("doc_per_day", 49)
    years = input.param("years", 2)
    bucket_name = input.param("bucket_name", "default")
    bucket_port = input.param("bucket_port", None)
    bucket_sasl_pass = input.param("bucket_sasl_pass", None)
    flag = input.param("flags", 0)
    to_directory = input.param("to_dir", '')
    loader_type = input.param("loader_type", 'default')

    if to_directory:
        loader = DocLoaderDirectory(input.servers[0], to_directory,
                                    bucket_name)
        generators_load = loader.generate_docs(docs_per_day, years)
        loader.load(generators_load)
    else:
        cluster = Cluster()
        try:
            bucket = initialize_bucket(bucket_name, bucket_port,
                                       bucket_sasl_pass)
            if loader_type == 'default':
                loader = DocLoaderCouchbase(input.servers, cluster)
            elif loader_type == 'join':
                loader = JoinDocLoader(input.servers, cluster)
            elif loader_type == 'sabre':
                loader = SabreDocLoader(input.servers, cluster)
            elif loader_type == 'base64':
                loader = Base64DocLoader(input.servers, cluster)
            elif loader_type == 'nondoc':
                loader = NonDocLoader(input.servers, cluster)
            generators_load = loader.generate_docs(docs_per_day, years)
            loader.load(generators_load, bucket, flag=flag)
        finally:
            cluster.shutdown()
    def setUp(self):
        super(ObserveXdcrTest, self).setUp()
        self.src_cluster = self.get_cb_cluster_by_name('C1')
        self.src_master = self.src_cluster.get_master_node()
        self.dest_cluster = self.get_cb_cluster_by_name('C2')
        self.dest_master = self.dest_cluster.get_master_node()

        # Variables defined for _run_observe() in observetest.
        self.observe_with = self._input.param("observe_with", "")
        self.default_map_func = 'function (doc) { emit(doc.age, doc.first_name);}'
        self.default_design_doc = "Doc1"
        map_func = 'function (doc) { emit(null, doc);}'
        self.default_view = View("default_view", map_func, None)
        self.mutate_by = self._input.param("mutate_by", "set")
        self.cluster = Cluster()
        self.wait_timeout = self._wait_timeout
        self.num_items = self._num_items
Exemple #16
0
 def setUp(self):
     super(Capi, self).setUp()
     self.cluster = Cluster()
     self.src_cluster = self.get_cb_cluster_by_name('C1')
     self.src_master = self.src_cluster.get_master_node()
     self.dest_cluster = self.get_cb_cluster_by_name('C2')
     self.dest_master = self.dest_cluster.get_master_node()
     self.use_hostnames = self._input.param("use_hostnames", False)
     self.src_init = self._input.param('src_init', 2)
     self.dest_init = self._input.param('dest_init', 1)
     self.product = self._input.param('product', 'couchbase-server')
     self.initial_version = self._input.param('initial_version', '2.5.1-1083')
     self.initial_vbuckets = self._input.param('initial_vbuckets', 1024)
     self.init_nodes = self._input.param('init_nodes', True)
     self.initial_build_type = self._input.param('initial_build_type', None)
     self.upgrade_build_type = self._input.param('upgrade_build_type', self.initial_build_type)
     self.master = self.src_master
     self.rest = RestConnection(self.src_master)
Exemple #17
0
 def setUp(self):
     super(QueryMagmaTests, self).setUp()
     self.log.info(
         "==============  QueryMagmaTests setup has started ==============")
     self.bucket_name = self.input.param("bucket", self.default_bucket_name)
     self.active_resident_threshold = self.input.param(
         "resident_ratio", 100)
     self.num_items = self.input.param("num_items", 10000)
     self.expiry = self.input.param("expiry", 0)
     self.rollback = self.input.param("rollback", False)
     self.conn = RestConnection(self.master)
     self.stat = CollectionsStats(self.master)
     self.cbqpath = '{0}cbq -quiet -u {1} -p {2} -e=localhost:8093 '.format(
         self.path, self.username, self.password)
     self.cluster = Cluster()
     self.log.info(
         "==============  QueryMagmaTests setup has completed =============="
     )
Exemple #18
0
    def setUp(self):
        super(SGConfigTests, self).setUp()
        for server in self.servers:
            if self.case_number == 1:
                with open(
                        'pytests/sg/resources/gateway_config_walrus_template.json',
                        'r') as file:
                    filedata = file.read()
                    filedata = filedata.replace('LOCAL_IP', server.ip)
                with open('pytests/sg/resources/gateway_config_walrus.json',
                          'w') as file:
                    file.write(filedata)
                shell = RemoteMachineShellConnection(server)
                shell.execute_command("rm -rf {0}/tmp/*".format(
                    self.folder_prefix))
                shell.copy_files_local_to_remote(
                    'pytests/sg/resources',
                    '{0}/tmp'.format(self.folder_prefix))
                # will install sg only the first time
                self.install(shell)
                pid = self.is_sync_gateway_process_running(shell)
                self.assertNotEqual(pid, 0)
                exist = shell.file_exists(
                    '{0}/tmp/'.format(self.folder_prefix), 'gateway.log')
                self.assertTrue(exist)
                shell.disconnect()
        if self.case_number == 1:
            shutil.copy2('pytests/sg/resources/gateway_config_backup.json',
                         'pytests/sg/resources/gateway_config.json')
            BucketOperationHelper.delete_all_buckets_or_assert(
                self.servers, self)
            self.cluster = Cluster()
            shared_params = self._create_bucket_params(server=self.master,
                                                       size=150)
            self.cluster.create_default_bucket(shared_params)
            task = self.cluster.async_create_sasl_bucket(
                name='test_%E-.5',
                password='******',
                bucket_params=shared_params)
            task.result()
            task = self.cluster.async_create_standard_bucket(
                name='db', port=11219, bucket_params=shared_params)

            task.result()
Exemple #19
0
 def setUp(self):
     for server in TestInputSingleton.input.servers:
         remote = RemoteMachineShellConnection(server)
         remote.enable_diag_eval_on_non_local_hosts()
         remote.disconnect()
     super(AlternateAddressTests, self).setUp()
     self.remove_all_alternate_address_settings()
     self.cluster_helper = Cluster()
     self.ex_path = self.tmp_path + "export{0}/".format(self.master.ip)
     self.num_items = self.input.param("items", 1000)
     self.client_os = self.input.param("client_os", "linux")
     self.localhost = self.input.param("localhost", False)
     self.json_create_gen = JsonDocGenerator("altaddr",
                                             op_type="create",
                                             encoding="utf-8",
                                             start=0,
                                             end=self.num_items)
     self.json_delete_gen = JsonDocGenerator("imex",
                                             op_type="delete",
                                             encoding="utf-8",
                                             start=0,
                                             end=self.num_items)
Exemple #20
0
 def setUp(self):
     self.input = TestInputSingleton.input
     self.servers = self.input.servers
     self.master = self.servers[0]
     self.log = logger.Logger.get_logger()
     self.helper = SpatialHelper(self, "default")
     self.helper.setup_cluster()
     self.cluster = Cluster()
     self.default_bucket = self.input.param("default_bucket", True)
     self.sasl_buckets = self.input.param("sasl_buckets", 0)
     self.standard_buckets = self.input.param("standard_buckets", 0)
     self.memcached_buckets = self.input.param("memcached_buckets", 0)
     self.servers = self.helper.servers
     self.shell = RemoteMachineShellConnection(self.master)
     info = self.shell.extract_remote_info()
     self.os = info.type.lower()
     self.couchbase_login_info = "%s:%s" % (
         self.input.membase_settings.rest_username,
         self.input.membase_settings.rest_password)
     self.backup_location = self.input.param("backup_location",
                                             "/tmp/backup")
     self.command_options = self.input.param("command_options", '')
    def setUp(self):
        super(BucketConfig, self).setUp()
        self.testcase = '2'
        self.log = logger.Logger.get_logger()
        self.input = TestInputSingleton.input
        self.servers = self.input.servers
        #self.time_synchronization = self.input.param("time_sync", "enabledWithoutDrift")
        self.lww = self.input.param("lww", True)
        self.drift = self.input.param("drift", False)
        self.bucket='bucket-1'
        self.master = self.servers[0]
        self.rest = RestConnection(self.master)
        self.cluster = Cluster()
        self.skip_rebalance = self.input.param("skip_rebalance", False)

        node_ram_ratio = BucketOperationHelper.base_bucket_ratio(self.servers)
        mem_quota = int(self.rest.get_nodes_self().mcdMemoryReserved *
                        node_ram_ratio)

        if not self.skip_rebalance:
            self.rest.init_cluster(self.master.rest_username,
                self.master.rest_password)
            self.rest.init_cluster_memoryQuota(self.master.rest_username,
                self.master.rest_password,
                memoryQuota=mem_quota)
            for server in self.servers:
                ClusterOperationHelper.cleanup_cluster([server])
                ClusterOperationHelper.wait_for_ns_servers_or_assert(
                    [self.master], self.testcase)
            try:
                rebalanced = ClusterOperationHelper.add_and_rebalance(
                    self.servers)

            except Exception as e:
                self.fail(e, 'cluster is not rebalanced')

        self._create_bucket(self.lww, self.drift)
Exemple #22
0
 def setUp(self):
     self.times_teardown_called = 1
     super(CliBaseTest, self).setUp()
     self.r = random.Random()
     self.vbucket_count = 1024
     self.cluster = Cluster()
     self.clusters_dic = self.input.clusters
     if self.clusters_dic:
         if len(self.clusters_dic) > 1:
             self.dest_nodes = self.clusters_dic[1]
             self.dest_master = self.dest_nodes[0]
         elif len(self.clusters_dic) == 1:
             self.log.error("=== need 2 cluster to setup xdcr in ini file ===")
     else:
         self.log.error("**** Cluster config is setup in ini file. ****")
     self.shell = RemoteMachineShellConnection(self.master)
     if not self.skip_init_check_cbserver:
         self.rest = RestConnection(self.master)
         self.cb_version = self.rest.get_nodes_version()
         """ cli output message """
         self.cli_bucket_create_msg = "SUCCESS: Bucket created"
         self.cli_rebalance_msg = "SUCCESS: Rebalance complete"
         if self.cb_version[:3] == "4.6":
             self.cli_bucket_create_msg = "SUCCESS: bucket-create"
             self.cli_rebalance_msg = "SUCCESS: rebalanced cluster"
     self.import_back = self.input.param("import_back", False)
     if self.import_back:
         if len(self.servers) < 3:
             self.fail("This test needs minimum of 3 vms to run ")
     self.test_type = self.input.param("test_type", "import")
     self.import_file = self.input.param("import_file", None)
     self.imex_type = self.input.param("imex_type", "json")
     self.format_type = self.input.param("format_type", "lines")
     self.import_method = self.input.param("import_method", "file://")
     self.force_failover = self.input.param("force_failover", False)
     self.json_invalid_errors = self.input.param("json-invalid-errors", None)
     self.field_separator = self.input.param("field-separator", "comma")
     self.key_gen = self.input.param("key-gen", True)
     self.skip_docs = self.input.param("skip-docs", None)
     self.limit_docs = self.input.param("limit-docs", None)
     self.limit_rows = self.input.param("limit-rows", None)
     self.skip_rows = self.input.param("skip-rows", None)
     self.omit_empty = self.input.param("omit-empty", None)
     self.infer_types = self.input.param("infer-types", None)
     self.fx_generator = self.input.param("fx-generator", None)
     self.fx_gen_start = self.input.param("fx-gen-start", None)
     self.secure_conn = self.input.param("secure-conn", False)
     self.no_cacert = self.input.param("no-cacert", False)
     self.no_ssl_verify = self.input.param("no-ssl-verify", False)
     self.verify_data = self.input.param("verify-data", False)
     self.field_substitutions = self.input.param("field-substitutions", None)
     self.check_preload_keys = self.input.param("check-preload-keys", True)
     self.debug_logs = self.input.param("debug-logs", False)
     self.should_fail = self.input.param("should-fail", False)
     info = self.shell.extract_remote_info()
     self.os_version = info.distribution_version.lower()
     self.deliverable_type = info.deliverable_type.lower()
     type = info.type.lower()
     self.excluded_commands = self.input.param("excluded_commands", None)
     self.os = 'linux'
     self.full_v = None
     self.short_v = None
     self.build_number = None
     cmd =  'curl -g {0}:8091/diag/eval -u {1}:{2} '.format(self.master.ip,
                                                           self.master.rest_username,
                                                           self.master.rest_password)
     cmd += '-d "path_config:component_path(bin)."'
     bin_path  = subprocess.check_output(cmd, shell=True)
     if "bin" not in bin_path:
         self.fail("Check if cb server install on %s" % self.master.ip)
     else:
         self.cli_command_path = bin_path.replace('"','') + "/"
     self.root_path = LINUX_ROOT_PATH
     self.tmp_path = "/tmp/"
     self.tmp_path_raw = "/tmp/"
     self.cmd_backup_path = LINUX_BACKUP_PATH
     self.backup_path = LINUX_BACKUP_PATH
     self.cmd_ext = ""
     self.src_file = ""
     self.des_file = ""
     self.sample_files_path = LINUX_COUCHBASE_SAMPLE_PATH
     self.log_path = LINUX_COUCHBASE_LOGS_PATH
     self.base_cb_path = LINUX_CB_PATH
     """ non root path """
     if self.nonroot:
         self.sample_files_path = "/home/%s%s" % (self.master.ssh_username,
                                                  LINUX_COUCHBASE_SAMPLE_PATH)
         self.log_path = "/home/%s%s" % (self.master.ssh_username,
                                         LINUX_COUCHBASE_LOGS_PATH)
         self.base_cb_path = "/home/%s%s" % (self.master.ssh_username,
                                             LINUX_CB_PATH)
         self.root_path = "/home/%s/" % self.master.ssh_username
     if type == 'windows':
         self.os = 'windows'
         self.cmd_ext = ".exe"
         self.root_path = WIN_ROOT_PATH
         self.tmp_path = WIN_TMP_PATH
         self.tmp_path_raw = WIN_TMP_PATH_RAW
         self.cmd_backup_path = WIN_BACKUP_C_PATH
         self.backup_path = WIN_BACKUP_PATH
         self.sample_files_path = WIN_COUCHBASE_SAMPLE_PATH_C
         self.log_path = WIN_COUCHBASE_LOGS_PATH
         win_format = "C:/Program Files"
         cygwin_format = "/cygdrive/c/Program\ Files"
         if win_format in self.cli_command_path:
             self.cli_command_path = self.cli_command_path.replace(win_format,
                                                                   cygwin_format)
         self.base_cb_path = WIN_CB_PATH
     if info.distribution_type.lower() == 'mac':
         self.os = 'mac'
     self.full_v, self.short_v, self.build_number = self.shell.get_cbversion(type)
     self.couchbase_usrname = "%s" % (self.input.membase_settings.rest_username)
     self.couchbase_password = "******" % (self.input.membase_settings.rest_password)
     self.cb_login_info = "%s:%s" % (self.couchbase_usrname,
                                     self.couchbase_password)
     self.path_type = self.input.param("path_type", None)
     if self.path_type is None:
         self.log.info("Test command with absolute path ")
     elif self.path_type == "local":
         self.log.info("Test command at %s dir " % self.cli_command_path)
         self.cli_command_path = "cd %s; ./" % self.cli_command_path
     self.cli_command = self.input.param("cli_command", None)
     self.command_options = self.input.param("command_options", None)
     if self.command_options is not None:
         self.command_options = self.command_options.split(";")
     if str(self.__class__).find('couchbase_clitest.CouchbaseCliTest') == -1:
         if len(self.servers) > 1 and int(self.nodes_init) == 1:
             servers_in = [self.servers[i + 1] for i in range(self.num_servers - 1)]
             self.cluster.rebalance(self.servers[:1], servers_in, [])
     for bucket in self.buckets:
         testuser = [{'id': bucket.name, 'name': bucket.name, 'password': '******'}]
         rolelist = [{'id': bucket.name, 'name': bucket.name, 'roles': 'admin'}]
         self.add_built_in_server_user(testuser=testuser, rolelist=rolelist)
    def setUp(self):
        self.times_teardown_called = 1
        super(AltAddrBaseTest, self).setUp()
        self.r = random.Random()
        self.cluster = Cluster()
        self.clusters_dic = self.input.clusters
        self.client_os = self.input.param("client_os", "linux")
        self.alt_addr_with_xdcr = self.input.param("alt_addr_with_xdcr", False)
        if self.clusters_dic:
            if len(self.clusters_dic) > 1:
                self.dest_nodes = self.clusters_dic[1]
                self.dest_master = self.dest_nodes[0]
            elif len(self.clusters_dic) == 1:
                self.log.error(
                    "=== need 2 cluster to setup xdcr in ini file ===")
            if self.alt_addr_with_xdcr:
                self.des_name = "des_cluster"
                self.delete_xdcr_reference(self.clusters_dic[0][0].ip,
                                           self.clusters_dic[1][0].ip)
                if self.skip_init_check_cbserver:
                    for key in self.clusters_dic.keys():
                        servers = self.clusters_dic[key]
                        try:
                            self.backup_reset_clusters(servers)
                        except:
                            self.log.error(
                                "was not able to cleanup cluster the first time"
                            )
                            self.backup_reset_clusters(servers)
        else:
            self.log.error("**** Cluster config is setup in ini file. ****")

        self.shell = RemoteMachineShellConnection(self.master)
        if not self.skip_init_check_cbserver:
            self.rest = RestConnection(self.master)
            self.cb_version = self.rest.get_nodes_version()

        self.key_gen = self.input.param("key-gen", True)
        self.secure_conn = self.input.param("secure-conn", False)
        self.no_cacert = self.input.param("no-cacert", False)
        self.no_ssl_verify = self.input.param("no-ssl-verify", False)
        self.verify_data = self.input.param("verify-data", False)
        self.debug_logs = self.input.param("debug-logs", False)
        self.should_fail = self.input.param("should-fail", False)
        self.add_hostname_node = self.input.param("add_hostname_node", False)
        self.add_hostname_node_at_src = self.input.param(
            "add_hostname_node_at_src", False)
        self.add_hostname_node_at_des = self.input.param(
            "add_hostname_node_at_des", False)
        self.num_hostname_add = self.input.param("num_hostname_add", 1)
        self.alt_addr_services_in = self.input.param("alt_addr_services_in",
                                                     "kv")
        self.alt_addr_rebalance_out = self.input.param(
            "alt_addr_rebalance_out", False)
        self.alt_addr_rebalance_in = self.input.param("alt_addr_rebalance_in",
                                                      False)
        self.alt_addr_rebalance_in_services = self.input.param(
            "alt_addr_rebalance_in_services", "kv")
        self.alt_addr_use_public_dns = self.input.param(
            "alt_addr_use_public_dns", False)
        self.alt_addr_kv_loader = self.input.param("alt_addr_kv_loader", False)
        self.alt_addr_n1ql_query = self.input.param("alt_addr_n1ql_query",
                                                    False)
        self.alt_addr_eventing_function = self.input.param(
            "alt_addr_eventing_function", False)
        self.alt_addr_fts_loader = self.input.param("alt_addr_fts_loader",
                                                    False)
        self.run_alt_addr_loader = self.input.param("run_alt_addr_loader",
                                                    False)
        self.all_alt_addr_set = False

        info = self.shell.extract_remote_info()
        self.os_version = info.distribution_version.lower()
        self.deliverable_type = info.deliverable_type.lower()
        type = info.type.lower()
        self.excluded_commands = self.input.param("excluded_commands", None)
        self.os = 'linux'
        self.full_v = None
        self.short_v = None
        self.build_number = None
        cmd = 'curl -g {0}:8091/diag/eval -u {1}:{2} '.format(
            self.master.ip, self.master.rest_username,
            self.master.rest_password)
        cmd += '-d "path_config:component_path(bin)."'
        bin_path = check_output(cmd, shell=True)
        bin_path = bin_path.decode()
        if "bin" not in bin_path:
            self.fail("Check if cb server install on %s" % self.master.ip)
        else:
            self.cli_command_path = bin_path.replace('"', '') + "/"
        self.root_path = LINUX_ROOT_PATH
        self.tmp_path = "/tmp/"
        self.tmp_path_raw = "/tmp/"
        self.cmd_ext = ""
        self.src_file = ""
        self.des_file = ""
        self.log_path = LINUX_COUCHBASE_LOGS_PATH
        self.base_cb_path = LINUX_CB_PATH
        """ non root path """
        if self.nonroot:
            self.log_path = "/home/%s%s" % (self.master.ssh_username,
                                            LINUX_COUCHBASE_LOGS_PATH)
            self.base_cb_path = "/home/%s%s" % (self.master.ssh_username,
                                                LINUX_CB_PATH)
            self.root_path = "/home/%s/" % self.master.ssh_username
        if type == 'windows':
            self.os = 'windows'
            self.cmd_ext = ".exe"
            self.root_path = WIN_ROOT_PATH
            self.tmp_path = WIN_TMP_PATH
            self.tmp_path_raw = WIN_TMP_PATH_RAW
            win_format = "C:/Program Files"
            cygwin_format = "/cygdrive/c/Program\ Files"
            if win_format in self.cli_command_path:
                self.cli_command_path = self.cli_command_path.replace(
                    win_format, cygwin_format)
            self.base_cb_path = WIN_CB_PATH
        if info.distribution_type.lower() == 'mac':
            self.os = 'mac'
        self.full_v, self.short_v, self.build_number = self.shell.get_cbversion(
            type)
        self.couchbase_usrname = "%s" % (
            self.input.membase_settings.rest_username)
        self.couchbase_password = "******" % (
            self.input.membase_settings.rest_password)
        self.cb_login_info = "%s:%s" % (self.couchbase_usrname,
                                        self.couchbase_password)
        self.path_type = self.input.param("path_type", None)
        if self.path_type is None:
            self.log.info("Test command with absolute path ")
        elif self.path_type == "local":
            self.log.info("Test command at %s dir " % self.cli_command_path)
            self.cli_command_path = "cd %s; ./" % self.cli_command_path
        self.cli_command = self.input.param("cli_command", None)

        self.start_with_cluster = self.input.param("start_with_cluster", True)
        if str(self.__class__).find(
                'couchbase_clitest.CouchbaseCliTest') == -1:
            if len(self.servers) > 1 and int(
                    self.nodes_init) == 1 and self.start_with_cluster:
                servers_in = [
                    self.servers[i + 1] for i in range(self.num_servers - 1)
                ]
                self.cluster.rebalance(self.servers[:1], servers_in, [])
        for bucket in self.buckets:
            testuser = [{
                'id': bucket.name,
                'name': bucket.name,
                'password': '******'
            }]
            rolelist = [{
                'id': bucket.name,
                'name': bucket.name,
                'roles': 'admin'
            }]
            self.add_built_in_server_user(testuser=testuser, rolelist=rolelist)
 def setUp(self):
     self.log = logger.Logger.get_logger()
     self.helper = SubdocHelper(self, "default")
     self.helper.setup_cluster()
     self.cluster = Cluster()
     self.servers = self.helper.servers
    def test_backup_restore_sanity_bwc(self):
        """
        1. Create default bucket on the cluster and loads it with given number of items
        2. Perform updates and create backups for number of times (param number_of_backups)
           If bkrs client needs to upgrade, test will perform upgrade bkrs client version
        3. Perform restores for the same number of times with random start and end values
        """

        gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
        self.log.info("*** start to load items to all buckets")
        self._load_all_buckets(self.master, gen, "create", self.expires)
        self.log.info("*** done to load items to all buckets")
        self.ops_type = self.input.param("ops-type", "update")
        self.expected_error = self.input.param("expected_error", None)
        if self.auto_failover:
            self.log.info("Enabling auto failover on " + str(self.backupset.cluster_host))
            rest_conn = RestConnection(self.backupset.cluster_host)
            rest_conn.update_autofailover_settings(self.auto_failover, self.auto_failover_timeout)
        self.backup_create_validate()
        for i in range(1, self.backupset.number_of_backups + 1):
            if self.ops_type == "update":
                self.log.info("*** start to update items in all buckets")
                self._load_all_buckets(self.master, gen, "update", self.expires)
                self.log.info("*** done update items in all buckets")
            elif self.ops_type == "delete":
                self.log.info("*** start to delete items in all buckets")
                self._load_all_buckets(self.master, gen, "delete", self.expires)
                self.log.info("*** done to delete items in all buckets")
            self.sleep(10)
            if self.backupset.bkrs_client_upgrade:
                self.backupset.current_bkrs_client_version = \
                    self._get_current_bkrs_client_version()
                if i == 3 and self.backupset.number_of_backups >= 5 and \
                    self.backupset.current_bkrs_client_version[:5] == self.bk_cluster_version[:5]:
                    self.log.info("\nNeed to upgrade CBM version to {0} to run bkrs bwc upgrade"\
                                      .format(self.latest_bkrs_version))
                    self._install([self.backupset.backup_host],
                                  self.latest_bkrs_version)
            self.log.info("*** start to validate backup cluster")
            self.backup_cluster_validate()
            i += 1

        self.targetMaster = True
        start = randrange(1, self.backupset.number_of_backups + 1)
        if start == self.backupset.number_of_backups:
            end = start
        else:
            end = randrange(start, self.backupset.number_of_backups + 1)
        self.log.info("*** start to restore cluster")
        restored = {"{0}/{1}".format(start, end): ""}
        for i in range(1, self.backupset.number_of_backups + 1):
            if self.reset_restore_cluster:
                self.log.info("*** start to reset cluster")
                self.backup_reset_clusters(self.cluster_to_restore)
                if self.same_cluster:
                    self._initialize_nodes(Cluster(), self.servers[:self.nodes_init])
                else:
                    self._initialize_nodes(Cluster(), self.input.clusters[0][:self.nodes_init])
                self.log.info("Done reset cluster")
            self.sleep(10)

            """ Add built-in user cbadminbucket to second cluster """
            self.add_built_in_server_user(node=self.input.clusters[0][:self.nodes_init][0])

            self.backupset.start = start
            self.backupset.end = end
            self.log.info("*** start restore validation")
            self.backup_restore_validate(compare_uuid=False,
                                         seqno_compare_function=">=",
                                         expected_error=self.expected_error)
            if self.backupset.number_of_backups == 1:
                continue
            while "{0}/{1}".format(start, end) in restored:
                start = randrange(1, self.backupset.number_of_backups + 1)
                if start == self.backupset.number_of_backups:
                    end = start
                else:
                    end = randrange(start, self.backupset.number_of_backups + 1)
            restored["{0}/{1}".format(start, end)] = ""
Exemple #26
0
    def common_setup(self):
        self.cluster_helper = Cluster()
        self.log = logger.Logger.get_logger()
        self.cluster_run = False
        self.input = TestInputSingleton.input
        self.servers = self.input.servers
        serverInfo = self.servers[0]
        rest = RestConnection(serverInfo)
        if len(set([server.ip for server in self.servers])) == 1:
            ip = rest.get_nodes_self().ip
            for server in self.servers:
                server.ip = ip
            self.cluster_run = True
        self.case_number = self.input.param("case_number", 0)
        self.replica = self.input.param("replica", 1)
        self.keys_count = self.input.param("keys-count", 1000)
        self.load_ratio = self.input.param("load-ratio", 1)
        self.ratio_expiry = self.input.param("ratio-expiry", 0.03)
        self.ratio_deletes = self.input.param("ratio-deletes", 0.13)
        self.num_buckets = self.input.param("num-buckets", 1)
        self.failover_factor = self.num_swap = self.input.param("num-swap", 1)
        self.num_initial_servers = self.input.param("num-initial-servers", 3)
        self.fail_orchestrator = self.swap_orchestrator = self.input.param(
            "swap-orchestrator", False)
        self.do_access = self.input.param("do-access", True)
        self.load_started = False
        self.loaders = []
        try:
            # Clear the state from Previous invalid run
            if rest._rebalance_progress_status() == 'running':
                self.log.warning(
                    "rebalancing is still running, previous test should be verified"
                )
                stopped = rest.stop_rebalance()
                self.assertTrue(stopped, msg="unable to stop rebalance")
            self.log.info("==============  SwapRebalanceBase setup was started for test #{0} {1}=============="\
                      .format(self.case_number, self._testMethodName))
            SwapRebalanceBase.reset(self)

            # Make sure the test is setup correctly
            min_servers = int(self.num_initial_servers) + int(self.num_swap)
            msg = "minimum {0} nodes required for running swap rebalance"
            self.assertTrue(len(self.servers) >= min_servers,
                            msg=msg.format(min_servers))

            self.log.info(
                'picking server : {0} as the master'.format(serverInfo))
            node_ram_ratio = BucketOperationHelper.base_bucket_ratio(
                self.servers)
            info = rest.get_nodes_self()
            rest.init_cluster(username=serverInfo.rest_username,
                              password=serverInfo.rest_password)
            rest.init_cluster_memoryQuota(
                memoryQuota=int(info.mcdMemoryReserved * node_ram_ratio))
            SwapRebalanceBase.enable_diag_eval_on_non_local_hosts(
                self, serverInfo)
            # Add built-in user
            testuser = [{
                'id': 'cbadminbucket',
                'name': 'cbadminbucket',
                'password': '******'
            }]
            RbacBase().create_user_source(testuser, 'builtin', self.servers[0])

            # Assign user to role
            role_list = [{
                'id': 'cbadminbucket',
                'name': 'cbadminbucket',
                'roles': 'admin'
            }]
            RbacBase().add_user_role(role_list,
                                     RestConnection(self.servers[0]),
                                     'builtin')

            if self.num_buckets > 10:
                BaseTestCase.change_max_buckets(self, self.num_buckets)
            self.log.info(
                "==============  SwapRebalanceBase setup was finished for test #{0} {1} =============="
                .format(self.case_number, self._testMethodName))
            SwapRebalanceBase._log_start(self)
        except Exception, e:
            self.cluster_helper.shutdown()
            self.fail(e)
Exemple #27
0
    def test_backup_restore_collection_sanity(self):
        """
        1. Create default bucket on the cluster and loads it with given number of items
        2. Perform updates and create backups for specified number of times (test param number_of_backups)
        3. Perform restores for the same number of times with random start and end values
        """
        self.log.info("*** create collection in all buckets")
        self.log.info("*** start to load items to all buckets")
        self.active_resident_threshold = 100
        self.load_all_buckets(self.backupset.cluster_host)
        self.log.info("*** done to load items to all buckets")
        self.ops_type = self.input.param("ops-type", "update")
        self.expected_error = self.input.param("expected_error", None)
        self.create_scope_cluster_host()
        self.create_collection_cluster_host(self.backupset.col_per_scope)
        backup_scopes = self.get_bucket_scope_cluster_host()
        backup_collections = self.get_bucket_collection_cluster_host()
        col_stats = self.get_collection_stats_cluster_host()
        for backup_scope in backup_scopes:
            bk_scope_id = self.get_scopes_id_cluster_host(backup_scope)
        if self.auto_failover:
            self.log.info("Enabling auto failover on " +
                          str(self.backupset.cluster_host))
            rest_conn = RestConnection(self.backupset.cluster_host)
            rest_conn.update_autofailover_settings(self.auto_failover,
                                                   self.auto_failover_timeout)
        self.backup_create_validate()
        for i in range(1, self.backupset.number_of_backups + 1):
            if self.ops_type == "update":
                self.log.info("*** start to update items in all buckets")
                self.load_all_buckets(self.backupset.cluster_host, ratio=0.1)
                self.log.info("*** done update items in all buckets")
            self.sleep(10)
            self.log.info("*** start to validate backup cluster")
            self.backup_cluster_validate()
        self.targetMaster = True
        start = randrange(1, self.backupset.number_of_backups + 1)
        if start == self.backupset.number_of_backups:
            end = start
        else:
            end = randrange(start, self.backupset.number_of_backups + 1)
        self.log.info("*** start to restore cluster")
        restored = {"{0}/{1}".format(start, end): ""}
        for i in range(1, self.backupset.number_of_backups + 1):
            if self.reset_restore_cluster:
                self.log.info("*** start to reset cluster")
                self.backup_reset_clusters(self.cluster_to_restore)
                if self.same_cluster:
                    self._initialize_nodes(Cluster(),
                                           self.servers[:self.nodes_init])
                else:
                    shell = RemoteMachineShellConnection(
                        self.backupset.restore_cluster_host)
                    shell.enable_diag_eval_on_non_local_hosts()
                    shell.disconnect()
                    rest = RestConnection(self.backupset.restore_cluster_host)
                    rest.force_eject_node()
                    rest.init_node()
                self.log.info("Done reset cluster")
            self.sleep(10)
            """ Add built-in user cbadminbucket to second cluster """
            self.add_built_in_server_user(
                node=self.input.clusters[0][:self.nodes_init][0])

            self.backupset.start = start
            self.backupset.end = end
            self.log.info("*** start restore validation")
            data_map_collection = []
            for scope in backup_scopes:
                if "default" in scope:
                    continue
                data_map_collection.append(self.buckets[0].name + "." + scope + "=" + \
                                           self.buckets[0].name + "." + scope)
            self.bucket_map_collection = ",".join(data_map_collection)
            self.backup_restore_validate(compare_uuid=False,
                                         seqno_compare_function=">=",
                                         expected_error=self.expected_error)
            if self.backupset.number_of_backups == 1:
                continue
            while "{0}/{1}".format(start, end) in restored:
                start = randrange(1, self.backupset.number_of_backups + 1)
                if start == self.backupset.number_of_backups:
                    end = start
                else:
                    end = randrange(start,
                                    self.backupset.number_of_backups + 1)
            restored["{0}/{1}".format(start, end)] = ""
        restore_scopes = self.get_bucket_scope_restore_cluster_host()
        restore_collections = self.get_bucket_collection_restore_cluster_host()
        self.verify_collections_in_restore_cluster_host()
 def __init__(self, nodes, num_clusters=2):
     self.log = logger.Logger.get_logger()
     self.cluster_list = []
     self.__clusterop = Cluster()
     self.setup_xdcr(nodes, num_clusters)
Exemple #29
0
        usage("ERROR: " + str(error))

    docs_per_day = input.param("doc_per_day", 49)
    years = input.param("years", 2)
    bucket_name = input.param("bucket_name", "default")
    bucket_port = input.param("bucket_port", None)
    bucket_sasl_pass = input.param("bucket_sasl_pass", None)
    flag = input.param("flags", 0)
    to_directory = input.param("to_dir", '')
    loader_type = input.param("loader_type", 'default')

    if to_directory:
        loader = DocLoaderDirectory(input.servers[0], to_directory, bucket_name)
        generators_load = loader.generate_docs(docs_per_day, years)
        loader.load(generators_load)
    else:
        cluster = Cluster()
        try:
            bucket = initialize_bucket(bucket_name, bucket_port, bucket_sasl_pass)
            if loader_type == 'default':
                loader = DocLoaderCouchbase(input.servers, cluster)
            elif loader_type == 'join':
                loader = JoinDocLoader(input.servers, cluster)
            generators_load = loader.generate_docs(docs_per_day, years)
            loader.load(generators_load, bucket, flag=flag)
        finally:
            cluster.shutdown()

if __name__ == "__main__":
    main()
Exemple #30
0
    def setUp(self):
        self.failover_util = failover_utils()
        self.node_util = node_utils()
        self.views_util = views_utils()

        self.log = logger.Logger.get_logger()
        self.input = TestInputSingleton.input
        self.primary_index_created = False
        self.use_sdk_client = self.input.param("use_sdk_client", False)
        self.analytics = self.input.param("analytics", False)
        if self.input.param("log_level", None):
            log.setLevel(level=0)
            for hd in log.handlers:
                if str(hd.__class__).find('FileHandler') != -1:
                    hd.setLevel(level=logging.DEBUG)
                else:
                    hd.setLevel(level=getattr(
                        logging, self.input.param("log_level", None)))
        self.servers = self.input.servers
        if str(self.__class__).find('moxitests') != -1:
            self.moxi_server = self.input.moxis[0]
            self.servers = [
                server for server in self.servers
                if server.ip != self.moxi_server.ip
            ]
        self.buckets = []
        self.bucket_base_params = {}
        self.bucket_base_params['membase'] = {}
        self.master = self.servers[0]
        self.bucket_util = bucket_utils(self.master)
        self.cluster_util = cluster_utils(self.master)
        self.indexManager = self.servers[0]
        if not hasattr(self, 'cluster'):
            self.cluster = Cluster()
        self.pre_warmup_stats = {}
        self.cleanup = False
        self.nonroot = False
        shell = RemoteMachineShellConnection(self.master)
        self.os_info = shell.extract_remote_info().type.lower()
        if self.os_info != 'windows':
            if self.master.ssh_username != "root":
                self.nonroot = True
        shell.disconnect()
        """ some tests need to bypass checking cb server at set up
            to run installation """
        self.skip_init_check_cbserver = \
            self.input.param("skip_init_check_cbserver", False)
        self.data_collector = DataCollector()
        self.data_analyzer = DataAnalyzer()
        self.result_analyzer = DataAnalysisResultAnalyzer()
        #         self.set_testrunner_client()
        self.change_bucket_properties = False
        self.cbas_node = self.input.cbas
        self.cbas_servers = []
        self.kv_servers = []
        self.otpNodes = []
        for server in self.servers:
            if "cbas" in server.services:
                self.cbas_servers.append(server)
            if "kv" in server.services:
                self.kv_servers.append(server)
        if not self.cbas_node and len(self.cbas_servers) >= 1:
            self.cbas_node = self.cbas_servers[0]

        try:
            self.skip_setup_cleanup = self.input.param("skip_setup_cleanup",
                                                       False)
            self.vbuckets = self.input.param("vbuckets", 1024)
            self.upr = self.input.param("upr", None)
            self.index_quota_percent = self.input.param(
                "index_quota_percent", None)
            self.targetIndexManager = self.input.param("targetIndexManager",
                                                       False)
            self.targetMaster = self.input.param("targetMaster", False)
            self.reset_services = self.input.param("reset_services", False)
            self.auth_mech = self.input.param("auth_mech", "PLAIN")
            self.wait_timeout = self.input.param("wait_timeout", 60)
            # number of case that is performed from testrunner( increment each time)
            self.case_number = self.input.param("case_number", 0)
            self.default_bucket = self.input.param("default_bucket", True)
            self.parallelism = self.input.param("parallelism", False)
            if self.default_bucket:
                self.default_bucket_name = "default"
            self.standard_buckets = self.input.param("standard_buckets", 0)
            self.sasl_buckets = self.input.param("sasl_buckets", 0)
            self.num_buckets = self.input.param("num_buckets", 0)
            self.verify_unacked_bytes = self.input.param(
                "verify_unacked_bytes", False)
            self.memcached_buckets = self.input.param("memcached_buckets", 0)
            self.enable_flow_control = self.input.param(
                "enable_flow_control", False)
            self.total_buckets = self.sasl_buckets + self.default_bucket + self.standard_buckets + self.memcached_buckets
            self.num_servers = self.input.param("servers", len(self.servers))
            # initial number of items in the cluster
            self.nodes_init = self.input.param("nodes_init", 1)
            self.nodes_in = self.input.param("nodes_in", 1)
            self.nodes_out = self.input.param("nodes_out", 1)
            self.services_init = self.input.param("services_init", None)
            self.services_in = self.input.param("services_in", None)
            self.forceEject = self.input.param("forceEject", False)
            self.force_kill_memcached = TestInputSingleton.input.param(
                'force_kill_memcached', False)
            self.num_items = self.input.param("items", 1000)
            self.value_size = self.input.param("value_size", 512)
            self.dgm_run = self.input.param("dgm_run", False)
            self.active_resident_threshold = int(
                self.input.param("active_resident_threshold", 0))
            # max items number to verify in ValidateDataTask, None - verify all
            self.max_verify = self.input.param("max_verify", None)
            # we don't change consistent_view on server by default
            self.disabled_consistent_view = self.input.param(
                "disabled_consistent_view", None)
            self.rebalanceIndexWaitingDisabled = self.input.param(
                "rebalanceIndexWaitingDisabled", None)
            self.rebalanceIndexPausingDisabled = self.input.param(
                "rebalanceIndexPausingDisabled", None)
            self.maxParallelIndexers = self.input.param(
                "maxParallelIndexers", None)
            self.maxParallelReplicaIndexers = self.input.param(
                "maxParallelReplicaIndexers", None)
            self.quota_percent = self.input.param("quota_percent", None)
            self.port = None
            self.log_message = self.input.param("log_message", None)
            self.log_info = self.input.param("log_info", None)
            self.log_location = self.input.param("log_location", None)
            self.stat_info = self.input.param("stat_info", None)
            self.port_info = self.input.param("port_info", None)
            if not hasattr(self, 'skip_buckets_handle'):
                self.skip_buckets_handle = self.input.param(
                    "skip_buckets_handle", False)
            self.nodes_out_dist = self.input.param("nodes_out_dist", None)
            self.absolute_path = self.input.param("absolute_path", True)
            self.test_timeout = self.input.param(
                "test_timeout", 3600)  # kill hang test and jump to next one.
            self.enable_bloom_filter = self.input.param(
                "enable_bloom_filter", False)
            self.enable_time_sync = self.input.param("enable_time_sync", False)
            self.gsi_type = self.input.param("gsi_type", 'plasma')
            # bucket parameters go here,
            self.bucket_size = self.input.param("bucket_size", None)
            self.bucket_type = self.input.param("bucket_type", 'membase')
            self.num_replicas = self.input.param("replicas", 1)
            self.enable_replica_index = self.input.param("index_replicas", 1)
            self.eviction_policy = self.input.param(
                "eviction_policy", 'valueOnly')  # or 'fullEviction'
            # for ephemeral bucket is can be noEviction or nruEviction
            if self.bucket_type == 'ephemeral' and self.eviction_policy == 'valueOnly':
                # use the ephemeral bucket default
                self.eviction_policy = 'noEviction'

            # for ephemeral buckets it
            self.sasl_password = self.input.param("sasl_password", 'password')
            self.lww = self.input.param(
                "lww", False
            )  # only applies to LWW but is here because the bucket is created here
            self.maxttl = self.input.param("maxttl", None)
            self.compression_mode = self.input.param("compression_mode",
                                                     'passive')
            self.sdk_compression = self.input.param("sdk_compression", True)
            self.sasl_bucket_name = "bucket"
            self.sasl_bucket_priority = self.input.param(
                "sasl_bucket_priority", None)
            self.standard_bucket_priority = self.input.param(
                "standard_bucket_priority", None)
            # end of bucket parameters spot (this is ongoing)

            if self.skip_setup_cleanup:
                self.buckets = BucketHelper(self.master).get_buckets()
                return
            if not self.skip_init_check_cbserver:
                self.cb_version = None
                if RestHelper(RestConnection(
                        self.master)).is_ns_server_running():
                    """ since every new couchbase version, there will be new features
                        that test code will not work on previous release.  So we need
                        to get couchbase version to filter out those tests. """
                    self.cb_version = RestConnection(
                        self.master).get_nodes_version()
                else:
                    log.info("couchbase server does not run yet")
                self.protocol = self.get_protocol_type()
            self.services_map = None
            if self.sasl_bucket_priority is not None:
                self.sasl_bucket_priority = self.sasl_bucket_priority.split(
                    ":")
            if self.standard_bucket_priority is not None:
                self.standard_bucket_priority = self.standard_bucket_priority.split(
                    ":")

            log.info("==============  basetestcase setup was started for test #{0} {1}==============" \
                          .format(self.case_number, self._testMethodName))
            if not self.skip_buckets_handle and not self.skip_init_check_cbserver:
                self._cluster_cleanup()

            shared_params = self._create_bucket_params(
                server=self.master,
                size=self.bucket_size,
                replicas=self.num_replicas,
                enable_replica_index=self.enable_replica_index,
                eviction_policy=self.eviction_policy,
                bucket_priority=None,
                lww=self.lww,
                maxttl=self.maxttl,
                compression_mode=self.compression_mode)

            membase_params = copy.deepcopy(shared_params)
            membase_params['bucket_type'] = 'membase'
            self.bucket_base_params['membase'][
                'non_ephemeral'] = membase_params

            membase_ephemeral_params = copy.deepcopy(shared_params)
            membase_ephemeral_params['bucket_type'] = 'ephemeral'
            self.bucket_base_params['membase'][
                'ephemeral'] = membase_ephemeral_params

            memcached_params = copy.deepcopy(shared_params)
            memcached_params['bucket_type'] = 'memcached'
            self.bucket_base_params['memcached'] = memcached_params

            # avoid any cluster operations in setup for new upgrade
            #  & upgradeXDCR tests
            if str(self.__class__).find('newupgradetests') != -1 or \
                            str(self.__class__).find('upgradeXDCR') != -1 or \
                            str(self.__class__).find('Upgrade_EpTests') != -1 or \
                            hasattr(self, 'skip_buckets_handle') and \
                            self.skip_buckets_handle:
                log.info("any cluster operation in setup will be skipped")
                self.primary_index_created = True
                log.info("==============  basetestcase setup was finished for test #{0} {1} ==============" \
                              .format(self.case_number, self._testMethodName))
                return
            # avoid clean up if the previous test has been tear down
            if self.case_number == 1 or self.case_number > 1000:
                if self.case_number > 1000:
                    log.warn(
                        "teardDown for previous test failed. will retry..")
                    self.case_number -= 1000
                self.cleanup = True
                if not self.skip_init_check_cbserver:
                    self.tearDownEverything()
                self.cluster = Cluster()
            if not self.skip_init_check_cbserver:
                log.info("initializing cluster")
                self.reset_cluster()
                master_services = self.get_services(self.servers[:1], \
                                                    self.services_init, \
                                                    start_node=0)
                if master_services != None:
                    master_services = master_services[0].split(",")

                self.quota = self._initialize_nodes(self.cluster, self.servers, \
                                                    self.disabled_consistent_view, \
                                                    self.rebalanceIndexWaitingDisabled, \
                                                    self.rebalanceIndexPausingDisabled, \
                                                    self.maxParallelIndexers, \
                                                    self.maxParallelReplicaIndexers, \
                                                    self.port, \
                                                    self.quota_percent, \
                                                    services=master_services)

                self.change_env_variables()
                self.change_checkpoint_params()

                # Add built-in user
                if not self.skip_init_check_cbserver:
                    self.add_built_in_server_user(node=self.master)
                log.info("done initializing cluster")
            else:
                self.quota = ""
            if self.input.param("log_info", None):
                self.change_log_info()
            if self.input.param("log_location", None):
                self.change_log_location()
            if self.input.param("stat_info", None):
                self.change_stat_info()
            if self.input.param("port_info", None):
                self.change_port_info()
            if self.input.param("port", None):
                self.port = str(self.input.param("port", None))
            try:
                if (str(self.__class__).find('rebalanceout.RebalanceOutTests') != -1) or \
                        (str(self.__class__).find('memorysanitytests.MemorySanity') != -1) or \
                                str(self.__class__).find('negativetests.NegativeTests') != -1 or \
                                str(self.__class__).find('warmuptest.WarmUpTests') != -1 or \
                                str(self.__class__).find('failover.failovertests.FailoverTests') != -1 or \
                                str(self.__class__).find('observe.observeseqnotests.ObserveSeqNoTests') != -1 or \
                                str(self.__class__).find('epengine.lwwepengine.LWW_EP_Engine') != -1:

                    self.services = self.get_services(self.servers,
                                                      self.services_init)
                    # rebalance all nodes into the cluster before each test
                    self.cluster.rebalance(self.servers[:self.num_servers],
                                           self.servers[1:self.num_servers],
                                           [],
                                           services=self.services)
                elif self.nodes_init > 1 and not self.skip_init_check_cbserver:
                    self.services = self.get_services(
                        self.servers[:self.nodes_init], self.services_init)
                    self.cluster.rebalance(self.servers[:1], \
                                           self.servers[1:self.nodes_init], \
                                           [], services=self.services)
                elif str(self.__class__).find('ViewQueryTests') != -1 and \
                        not self.input.param("skip_rebalance", False):
                    self.services = self.get_services(self.servers,
                                                      self.services_init)
                    self.cluster.rebalance(self.servers,
                                           self.servers[1:], [],
                                           services=self.services)
                self.setDebugLevel(service_type="index")
            except BaseException, e:
                # increase case_number to retry tearDown in setup for the next test
                self.case_number += 1000
                self.fail(e)

            if self.dgm_run:
                self.quota = 256
            if self.total_buckets > 10:
                log.info("================== changing max buckets from 10 to {0} =================" \
                              .format(self.total_buckets))
                self.change_max_buckets(self, self.total_buckets)
            if self.total_buckets > 0 and not self.skip_init_check_cbserver:
                """ from sherlock, we have index service that could take some
                    RAM quota from total RAM quota for couchbase server.  We need
                    to get the correct RAM quota available to create bucket(s)
                    after all services were set """
                node_info = RestConnection(self.master).get_nodes_self()
                if node_info.memoryQuota and int(node_info.memoryQuota) > 0:
                    ram_available = node_info.memoryQuota
                else:
                    ram_available = self.quota
                if self.bucket_size is None:
                    if self.dgm_run:
                        """ if dgm is set,
                            we need to set bucket size to dgm setting """
                        self.bucket_size = self.quota
                    else:
                        self.bucket_size = self._get_bucket_size(ram_available, \
                                                                 self.total_buckets)

            self.bucket_base_params['membase']['non_ephemeral'][
                'size'] = self.bucket_size
            self.bucket_base_params['membase']['ephemeral'][
                'size'] = self.bucket_size
            self.bucket_base_params['memcached']['size'] = self.bucket_size

            if str(self.__class__).find('upgrade_tests') == -1 and \
                            str(self.__class__).find('newupgradetests') == -1:
                self._bucket_creation()
            log.info("==============  basetestcase setup was finished for test #{0} {1} ==============" \
                          .format(self.case_number, self._testMethodName))

            if not self.skip_init_check_cbserver:
                self._log_start(self)
                self.sleep(10)