Пример #1
0
 def setUp(self):
     super(AutoFailoverBaseTest, self).setUp()
     self._get_params()
     self.rest = RestConnection(self.orchestrator)
     self.task_manager = TaskManager("Autofailover_thread")
     self.task_manager.start()
     self.node_failure_task_manager = TaskManager(
         "Nodes_failure_detector_thread")
     self.node_failure_task_manager.start()
     self.initial_load_gen = BlobGenerator('auto-failover',
                                           'auto-failover-',
                                           self.value_size,
                                           end=self.num_items)
     self.update_load_gen = BlobGenerator('auto-failover',
                                          'auto-failover-',
                                          self.value_size,
                                          end=self.update_items)
     self.delete_load_gen = BlobGenerator('auto-failover',
                                          'auto-failover-',
                                          self.value_size,
                                          start=self.update_items,
                                          end=self.delete_items)
     self._load_all_buckets(self.servers[0], self.initial_load_gen,
                            "create", 0)
     self._async_load_all_buckets(self.orchestrator,
                                  self.update_load_gen, "update", 0)
     self._async_load_all_buckets(self.orchestrator,
                                  self.delete_load_gen, "delete", 0)
     self.server_to_fail = self._servers_to_fail()
     self.servers_to_add = self.servers[self.nodes_init:self.nodes_init +
                                                        self.nodes_in]
     self.servers_to_remove = self.servers[self.nodes_init -
                                           self.nodes_out:self.nodes_init]
Пример #2
0
    def test_kill_indexer_create_drop_indexes_simple(self):
        self.test_fail = False
        self.concur_system_failure = self.input.param("concur_system_failure", False)
        self.errors = []
        self.index_create_task_manager = TaskManager(
            "index_create_task_manager")
        self.index_create_task_manager.start()
        self.system_failure_task_manager = TaskManager(
            "system_failure_detector_thread")
        self.system_failure_task_manager.start()
        self.sdk_loader_manager = TaskManager(
            "sdk_loader_manager")
        self.sdk_loader_manager.start()
        if self.num_failure_iteration:
            self.test_timeout = self.failure_timeout * len(self.index_nodes)

        self._prepare_collection_for_indexing(num_scopes=self.num_scopes, num_collections=self.num_collections)
        self.run_tasks = True

        index_create_tasks = self.create_indexes(num=self.num_pre_indexes)
        for task in index_create_tasks:
            task.result()

        load_doc_thread = threading.Thread(name="load_doc_thread",
                                           target=self.load_docs)
        load_doc_thread.start()

        self.sleep(60, "sleeping for 60 sec for index to start processing docs")

        if not self.check_if_indexes_in_dgm():
            self.log.error("indexes not in dgm even after {}".format(self.dgm_check_timeout))

        index_create_tasks = self.create_indexes(itr=300, num=25)

        self.kill_index = True
        index_node = self.get_nodes_from_services_map(service_type="index")
        system_failure_thread = threading.Thread(name="kill_indexer_thread",
                                                 target=self._kill_all_processes_index_with_sleep,
                                                 args=(index_node, 1, 600))
        system_failure_thread.start()

        for task in index_create_tasks:
            task.result()

        self.kill_index = False
        self.index_ops_obj.update_stop_create_index(True)
        self.kill_loader_process()
        self.sdk_loader_manager.shutdown(True)
        self.index_create_task_manager.shutdown(True)
        self.system_failure_task_manager.shutdown(True)
        system_failure_thread.join()

        self.wait_until_indexes_online()
        self.sleep(120, "sleep for 120 secs before validation")
        self.verify_index_ops_obj()

        self.n1ql_helper.drop_all_indexes_on_keyspace()

        if self.index_ops_obj.get_errors():
            self.fail(str(self.index_ops_obj.get_errors()))
Пример #3
0
    def test_shard_json_corruption(self):
        self.test_fail = False
        self.concur_system_failure = self.input.param("concur_system_failure", False)
        self.errors = []
        self.index_create_task_manager = TaskManager(
            "index_create_task_manager")
        self.index_create_task_manager.start()
        self.system_failure_task_manager = TaskManager(
            "system_failure_detector_thread")
        self.system_failure_task_manager.start()
        self.sdk_loader_manager = TaskManager(
            "sdk_loader_manager")
        self.sdk_loader_manager.start()
        if self.num_failure_iteration:
            self.test_timeout = self.failure_timeout * len(self.index_nodes)

        self._prepare_collection_for_indexing(num_scopes=self.num_scopes, num_collections=self.num_collections)
        self.run_tasks = True

        index_create_tasks = self.create_indexes(num=self.num_pre_indexes)
        for task in index_create_tasks:
            task.result()

        load_doc_thread = threading.Thread(name="load_doc_thread",
                                           target=self.load_docs)
        load_doc_thread.start()

        self.sleep(60, "sleeping for 60 sec for index to start processing docs")

        #if not self.check_if_indexes_in_dgm():
            #self.log.error("indexes not in dgm even after {}".format(self.dgm_check_timeout))

        self.kill_loader_process()
        self.wait_for_mutation_processing(self.index_nodes)

        self.induce_schedule_system_failure(self.failure_map[self.system_failure]["failure_task"])
        self.sleep(90, "sleeping for  mins for mutation processing during system failure ")

        remote = RemoteMachineShellConnection(self.index_nodes[0])
        remote.terminate_process(process_name="indexer")
        self.sleep(60, "sleeping for 60 sec for indexer to come back")

        self.index_ops_obj.update_stop_create_index(True)
        self.sdk_loader_manager.shutdown(True)
        self.index_create_task_manager.shutdown(True)
        self.system_failure_task_manager.shutdown(True)

        self.wait_until_indexes_online()
        indexes_created = self.check_if_indexes_not_created(self.index_ops_obj.get_create_index_list())
        if indexes_created:
            self.fail(f'{indexes_created} are not dropped')

        if self.check_if_shard_exists("shard1", self.index_nodes[0]):
            self.fail('shard1 is not cleaned on disk')
Пример #4
0
 def setUp(self):
     super(AutoFailoverBaseTest, self).setUp()
     self._get_params()
     self.rest = RestConnection(self.orchestrator)
     self.task_manager = TaskManager("Autofailover_thread")
     self.task_manager.start()
     self.node_failure_task_manager = TaskManager(
         "Nodes_failure_detector_thread")
     self.node_failure_task_manager.start()
     self.initial_load_gen = BlobGenerator('auto-failover',
                                           'auto-failover-',
                                           self.value_size,
                                           end=self.num_items)
     self.update_load_gen = BlobGenerator('auto-failover',
                                          'auto-failover-',
                                          self.value_size,
                                          end=self.update_items)
     self.delete_load_gen = BlobGenerator('auto-failover',
                                          'auto-failover-',
                                          self.value_size,
                                          start=self.update_items,
                                          end=self.delete_items)
     if self.skip_load:
         self._load_all_buckets(self.servers[0], self.initial_load_gen,
                                "create", 0)
         self._async_load_all_buckets(self.orchestrator,
                                      self.update_load_gen, "update", 0)
         self._async_load_all_buckets(self.orchestrator,
                                      self.delete_load_gen, "delete", 0)
     self.server_index_to_fail = self.input.param("server_index_to_fail",
                                                  None)
     if self.server_index_to_fail is None:
         self.server_to_fail = self._servers_to_fail()
     else:
         if isinstance(self.server_index_to_fail, str):
             self.server_to_fail = [
                 self.servers[int(node_item)]
                 for node_item in self.server_index_to_fail.split(":")
             ]
         else:
             self.server_to_fail = [self.servers[self.server_index_to_fail]]
     self.servers_to_add = self.servers[self.nodes_init:self.nodes_init +
                                        self.nodes_in]
     self.servers_to_remove = self.servers[self.nodes_init -
                                           self.nodes_out:self.nodes_init]
Пример #5
0
 def setUp(self):
     super(AutoFailoverAbortsRebalance, self).setUp()
     self.master = self.servers[0]
     self._get_params()
     self.rest = RestConnection(self.orchestrator)
     self.task_manager = TaskManager("Autofailover_thread")
     self.task_manager.start()
     self.node_failure_task_manager = TaskManager(
         "Nodes_failure_detector_thread")
     self.node_failure_task_manager.start()
     node_ram_ratio = BucketOperationHelper.base_bucket_ratio(self.servers)
     self.num_buckets = self.num_buckets - 1  # this is done as default is created by base class
     if self.num_buckets:
         BucketOperationHelper.create_multiple_buckets(
             self.master,
             self.num_replicas,
             node_ram_ratio * (2.0 / 3.0),
             howmany=self.num_buckets)
     self.buckets = self.rest.get_buckets()
     for bucket in self.buckets:
         ready = BucketOperationHelper.wait_for_memcached(
             self.master, bucket.name)
         self.assertTrue(ready, "wait_for_memcached failed")
     self.initial_load_gen = BlobGenerator('auto-failover',
                                           'auto-failover-',
                                           self.value_size,
                                           end=self.num_items)
     self.update_load_gen = BlobGenerator('auto-failover',
                                          'auto-failover-',
                                          self.value_size,
                                          end=self.update_items)
     self.delete_load_gen = BlobGenerator('auto-failover',
                                          'auto-failover-',
                                          self.value_size,
                                          start=self.update_items,
                                          end=self.delete_items)
     self._load_all_buckets(self.servers[0], self.initial_load_gen,
                            "create", 0)
     self._async_load_all_buckets(self.orchestrator, self.update_load_gen,
                                  "update", 0)
     self._async_load_all_buckets(self.orchestrator, self.delete_load_gen,
                                  "delete", 0)
Пример #6
0
    def test_autocompaction_forestdb(self):
        self.run_tasks = True
        self.test_fail = False
        self.index_create_task_manager = TaskManager(
            "index_create_task_manager")
        self.index_create_task_manager.start()

        self._prepare_collection_for_indexing(num_scopes=self.num_scopes, num_collections=self.num_collections)

        sdk_data_loader = SDKDataLoader(start_seq_num=self.start_doc, num_ops=self.num_items_in_collection,
                                        percent_create=self.percent_create,
                                        percent_update=self.percent_update, percent_delete=self.percent_delete,
                                        all_collections=self.all_collections, timeout=self.test_timeout,
                                        json_template=self.dataset_template)

        self.data_ops_javasdk_loader_in_batches(sdk_data_loader, self.batch_size)

        create_thread = threading.Thread(name="create_thread",
                                         target=self.create_indexes,
                                         args=(self.num_of_indexes, False))
        self.tasks.append(create_thread)

        drop_thread = threading.Thread(name="drop_thread",
                                       target=self.drop_indexes,
                                       args=[self.drop_sleep])
        self.tasks.append(drop_thread)

        verify_fdb_compaction = threading.Thread(name="verify_fdb_compaction",
                                                 target=self.verify_fdb_compaction)
        self.tasks.append(verify_fdb_compaction)

        self.run_tasks = True

        for task in self.tasks:
            task.start()

        self.sleep(self.test_timeout)

        self.run_tasks = False

        for task in self.tasks:
            task.join()

        self.index_ops_obj.update_stop_create_index(True)
        self.index_create_task_manager.shutdown(True)


        if self.test_fail:
            self.fail("Auto compaction did not trigger for expected number of times")
Пример #7
0
 def __init__(self, host, logger):
     #host is in the form IP address
     self.__log = logger
     self.__host = host
     self.__document = {}
     self.__mapping = {}
     self.__STATUSOK = 200
     self.__indices = []
     self.__index_types = {}
     self.__connection_url = 'http://{0}:{1}/'.format(
         self.__host.ip, self.__host.port)
     self.es_queries = []
     self.task_manager = TaskManager("ES_Thread")
     self.task_manager.start()
     self.http = httplib2.Http
Пример #8
0
 def tearDown(self):
     self.log.info("============AutoFailoverBaseTest teardown============")
     self._get_params()
     self.task_manager = TaskManager("Autofailover_thread")
     self.task_manager.start()
     self.server_to_fail = self._servers_to_fail()
     self.start_couchbase_server()
     self.sleep(10)
     self.disable_firewall()
     self.rest = RestConnection(self.orchestrator)
     self.rest.reset_autofailover()
     self.disable_autofailover()
     self._cleanup_cluster()
     super(AutoFailoverBaseTest, self).tearDown()
     if hasattr(self, "node_monitor_task"):
         if self.node_monitor_task._exception:
             self.fail("{}".format(self.node_monitor_task._exception))
         self.node_monitor_task.stop = True
     self.task_manager.shutdown(force=True)
Пример #9
0
 def __init__(self, task_manager=jython_task_manager()):
     self.task_manager = TaskManager("Cluster_Thread")
     self.jython_task_manager = task_manager
Пример #10
0
 def __init__(self):
     self.task_manager = TaskManager()
     self.task_manager.start()
Пример #11
0
 def __init__(self):
     self.task_manager = TaskManager("Cluster_Thread")
     self.task_manager.start()
Пример #12
0
    def test_system_failure_create_drop_indexes_simple(self):
        self.test_fail = False
        self.concur_system_failure = self.input.param("concur_system_failure", False)
        self.errors = []
        self.index_create_task_manager = TaskManager(
            "index_create_task_manager")
        self.index_create_task_manager.start()
        self.system_failure_task_manager = TaskManager(
            "system_failure_detector_thread")
        self.system_failure_task_manager.start()
        self.sdk_loader_manager = TaskManager(
            "sdk_loader_manager")
        self.sdk_loader_manager.start()
        if self.num_failure_iteration:
            self.test_timeout = self.failure_timeout * len(self.index_nodes)

        self._prepare_collection_for_indexing(num_scopes=self.num_scopes, num_collections=self.num_collections)
        self.run_tasks = True

        index_create_tasks = self.create_indexes(num=self.num_pre_indexes)
        for task in index_create_tasks:
            task.result()

        load_doc_thread = threading.Thread(name="load_doc_thread",
                                           target=self.load_docs)
        load_doc_thread.start()

        self.sleep(60, "sleeping for 60 sec for index to start processing docs")

        if not self.check_if_indexes_in_dgm():
            self.log.error("indexes not in dgm even after {}".format(self.dgm_check_timeout))

        if self.concur_system_failure:
            system_failure_thread = threading.Thread(name="system_failure_thread",
                                                     target=self.induce_schedule_system_failure,
                                                     args=[self.failure_map[self.system_failure]["failure_task"]])
            system_failure_thread.start()
            self.sleep(20)
        else:
            self.induce_schedule_system_failure(self.failure_map[self.system_failure]["failure_task"])
            self.sleep(90, "sleeping for  mins for mutation processing during system failure ")

        if self.simple_create_index:
            index_create_tasks = self.create_indexes(num=1,defer_build=False,itr=300,
                                                     expected_failure=self.failure_map[self.system_failure]["expected_failure"])
            for task in index_create_tasks:
                task.result()
            self.sleep(60, "sleeping for 1 min after creation of indexes")

        if self.simple_drop_index:
            task_thread = threading.Thread(name="drop_thread",
                                           target=self.drop_indexes,
                                           args=(2, False))
            task_thread.start()
            self.sleep(15, "sleeping for 15 sec")
            self.run_tasks = False
            task_thread.join()

        if self.simple_scan_index:
            self.run_tasks = True
            task_thread = threading.Thread(name="scan_thread",
                                           target=self.scan_indexes)
            task_thread.start()
            self.sleep(30, "sleeping for 10 sec")
            self.run_tasks = False
            task_thread.join()

        if self.simple_kill_indexer:
            remote = RemoteMachineShellConnection(self.index_nodes[0])
            remote.terminate_process(process_name="indexer")
            self.sleep(60, "sleeping for 60 sec for indexer to come back")

        if self.simple_kill_memcached:
            remote = RemoteMachineShellConnection(self.data_nodes[1])
            remote.kill_memcached()
            self.sleep(60, "sleeping for 60 sec for memcached to come back")

        if self.concur_system_failure:
            system_failure_thread.join()
        else:
            self.induce_schedule_system_failure(self.failure_map[self.system_failure]["recover_task"])
        self.index_ops_obj.update_stop_create_index(True)
        self.kill_loader_process()
        self.sdk_loader_manager.shutdown(True)
        self.index_create_task_manager.shutdown(True)
        self.system_failure_task_manager.shutdown(True)

        self.wait_until_indexes_online()
        self.sleep(120, "sleep for 120 secs before validation")
        self.verify_index_ops_obj()

        self.n1ql_helper.drop_all_indexes_on_keyspace()

        if self.index_ops_obj.get_errors():
            self.fail(str(self.index_ops_obj.get_errors()))
Пример #13
0
    def test_system_failure_create_drop_indexes(self):
        self.test_fail = False
        self.errors = []
        self.index_create_task_manager = TaskManager(
            "index_create_task_manager")
        self.index_create_task_manager.start()
        self.system_failure_task_manager = TaskManager(
            "system_failure_detector_thread")
        self.system_failure_task_manager.start()
        self.sdk_loader_manager = TaskManager(
            "sdk_loader_manager")
        self.sdk_loader_manager.start()
        if self.num_failure_iteration:
            self.test_timeout = self.failure_timeout * len(self.index_nodes)

        self._prepare_collection_for_indexing(num_scopes=self.num_scopes, num_collections=self.num_collections)
        self.run_tasks = True

        index_create_tasks = self.create_indexes(num=self.num_pre_indexes)
        for task in index_create_tasks:
            task.result()

        load_doc_thread = threading.Thread(name="load_doc_thread",
                                           target=self.load_docs)
        load_doc_thread.start()

        self.sleep(60, "sleeping for 60 sec for index to start processing docs")

        if not self.check_if_indexes_in_dgm():
            self.log.error("indexes not in dgm even after {}".format(self.dgm_check_timeout))

        if self.concur_create_indexes:
            create_thread = threading.Thread(name="create_thread",
                                             target=self.create_indexes,
                                             args=(self.num_of_indexes, "", 30))
            self.tasks.append(create_thread)

        if self.concur_drop_indexes:
            drop_thread = threading.Thread(name="drop_thread",
                                           target=self.drop_indexes,
                                           args=[self.drop_sleep])
            self.tasks.append(drop_thread)

        if self.concur_build_indexes:
            build_index_thread = threading.Thread(name="build_index_thread",
                                                  target=self.build_indexes)
            self.tasks.append(build_index_thread)

        if self.concur_scan_indexes:
            scan_thread = threading.Thread(name="scan_thread",
                                           target=self.scan_indexes)

            self.tasks.append(scan_thread)

        if self.concur_system_failure:
            system_failure_thread = threading.Thread(name="system_failure_thread",
                                                     target=self.schedule_system_failure)
            self.tasks.append(system_failure_thread)

        for task in self.tasks:
            task.start()

        self.tasks.append(load_doc_thread)

        self.sleep(self.test_timeout)

        self.run_tasks = False

        self.index_ops_obj.update_stop_create_index(True)
        self.kill_loader_process()
        self.sdk_loader_manager.shutdown(True)
        self.index_create_task_manager.shutdown(True)
        self.system_failure_task_manager.shutdown(True)

        for task in self.tasks:
            task.join()

        self.wait_until_indexes_online()
        self.sleep(600, "sleep for 10 mins before validation")
        self.verify_index_ops_obj()

        self.n1ql_helper.drop_all_indexes_on_keyspace()

        if self.index_ops_obj.get_errors():
            self.fail(str(self.index_ops_obj.get_errors()))