def setUp(self):
     super(AutoFailoverBaseTest, self).setUp()
     self._get_params()
     self.rest = RestConnection(self.orchestrator)
     self.task_manager = TaskManager("Autofailover_thread")
     self.task_manager.start()
     self.node_failure_task_manager = TaskManager(
         "Nodes_failure_detector_thread")
     self.node_failure_task_manager.start()
     self.initial_load_gen = BlobGenerator('auto-failover',
                                           'auto-failover-',
                                           self.value_size,
                                           end=self.num_items)
     self.update_load_gen = BlobGenerator('auto-failover',
                                          'auto-failover-',
                                          self.value_size,
                                          end=self.update_items)
     self.delete_load_gen = BlobGenerator('auto-failover',
                                          'auto-failover-',
                                          self.value_size,
                                          start=self.update_items,
                                          end=self.delete_items)
     self._load_all_buckets(self.servers[0], self.initial_load_gen,
                            "create", 0)
     self._async_load_all_buckets(self.orchestrator,
                                  self.update_load_gen, "update", 0)
     self._async_load_all_buckets(self.orchestrator,
                                  self.delete_load_gen, "delete", 0)
     self.server_to_fail = self._servers_to_fail()
     self.servers_to_add = self.servers[self.nodes_init:self.nodes_init +
                                                        self.nodes_in]
     self.servers_to_remove = self.servers[self.nodes_init -
                                           self.nodes_out:self.nodes_init]
Exemple #2
0
 def setUp(self):
     super(AutoFailoverBaseTest, self).setUp()
     self._get_params()
     self.rest = RestConnection(self.orchestrator)
     self.task_manager = TaskManager("Autofailover_thread")
     self.task_manager.start()
     self.node_failure_task_manager = TaskManager(
         "Nodes_failure_detector_thread")
     self.node_failure_task_manager.start()
     self.initial_load_gen = BlobGenerator('auto-failover',
                                           'auto-failover-',
                                           self.value_size,
                                           end=self.num_items)
     self.update_load_gen = BlobGenerator('auto-failover',
                                          'auto-failover-',
                                          self.value_size,
                                          end=self.update_items)
     self.delete_load_gen = BlobGenerator('auto-failover',
                                          'auto-failover-',
                                          self.value_size,
                                          start=self.update_items,
                                          end=self.delete_items)
     self._load_all_buckets(self.servers[0], self.initial_load_gen,
                            "create", 0)
     self._async_load_all_buckets(self.orchestrator,
                                  self.update_load_gen, "update", 0)
     self._async_load_all_buckets(self.orchestrator,
                                  self.delete_load_gen, "delete", 0)
     self.server_to_fail = self._servers_to_fail()
     self.servers_to_add = self.servers[self.nodes_init:self.nodes_init +
                                                        self.nodes_in]
     self.servers_to_remove = self.servers[self.nodes_init -
                                           self.nodes_out:self.nodes_init]
    def test_autocompaction_forestdb(self):
        self.run_tasks = True
        self.test_fail = False
        self.index_create_task_manager = TaskManager(
            "index_create_task_manager")
        self.index_create_task_manager.start()

        self._prepare_collection_for_indexing(num_scopes=self.num_scopes, num_collections=self.num_collections)

        sdk_data_loader = SDKDataLoader(start_seq_num=self.start_doc, num_ops=self.num_items_in_collection,
                                        percent_create=self.percent_create,
                                        percent_update=self.percent_update, percent_delete=self.percent_delete,
                                        all_collections=self.all_collections, timeout=self.test_timeout,
                                        json_template=self.dataset_template)

        self.data_ops_javasdk_loader_in_batches(sdk_data_loader, self.batch_size)

        create_thread = threading.Thread(name="create_thread",
                                         target=self.create_indexes,
                                         args=(self.num_of_indexes, False))
        self.tasks.append(create_thread)

        drop_thread = threading.Thread(name="drop_thread",
                                       target=self.drop_indexes,
                                       args=[self.drop_sleep])
        self.tasks.append(drop_thread)

        verify_fdb_compaction = threading.Thread(name="verify_fdb_compaction",
                                                 target=self.verify_fdb_compaction)
        self.tasks.append(verify_fdb_compaction)

        self.run_tasks = True

        for task in self.tasks:
            task.start()

        self.sleep(self.test_timeout)

        self.run_tasks = False

        for task in self.tasks:
            task.join()

        self.index_ops_obj.update_stop_create_index(True)
        self.index_create_task_manager.shutdown(True)


        if self.test_fail:
            self.fail("Auto compaction did not trigger for expected number of times")
Exemple #4
0
 def __init__(self, host, logger):
     #host is in the form IP address
     self.__log = logger
     self.__host = host
     self.__document = {}
     self.__mapping = {}
     self.__STATUSOK = 200
     self.__indices = []
     self.__index_types = {}
     self.__connection_url = 'http://{0}:{1}/'.format(
         self.__host.ip, self.__host.port)
     self.es_queries = []
     self.task_manager = TaskManager("ES_Thread")
     self.task_manager.start()
     self.http = httplib2.Http
 def setUp(self):
     super(AutoFailoverBaseTest, self).setUp()
     self._get_params()
     self.rest = RestConnection(self.orchestrator)
     self.task_manager = TaskManager("Autofailover_thread")
     self.task_manager.start()
     self.node_failure_task_manager = TaskManager(
         "Nodes_failure_detector_thread")
     self.node_failure_task_manager.start()
     self.initial_load_gen = BlobGenerator('auto-failover',
                                           'auto-failover-',
                                           self.value_size,
                                           end=self.num_items)
     self.update_load_gen = BlobGenerator('auto-failover',
                                          'auto-failover-',
                                          self.value_size,
                                          end=self.update_items)
     self.delete_load_gen = BlobGenerator('auto-failover',
                                          'auto-failover-',
                                          self.value_size,
                                          start=self.update_items,
                                          end=self.delete_items)
     if self.skip_load:
         self._load_all_buckets(self.servers[0], self.initial_load_gen,
                                "create", 0)
         self._async_load_all_buckets(self.orchestrator,
                                      self.update_load_gen, "update", 0)
         self._async_load_all_buckets(self.orchestrator,
                                      self.delete_load_gen, "delete", 0)
     self.server_index_to_fail = self.input.param("server_index_to_fail",
                                                  None)
     if self.server_index_to_fail is None:
         self.server_to_fail = self._servers_to_fail()
     else:
         if isinstance(self.server_index_to_fail, str):
             self.server_to_fail = [
                 self.servers[int(node_item)]
                 for node_item in self.server_index_to_fail.split(":")
             ]
         else:
             self.server_to_fail = [self.servers[self.server_index_to_fail]]
     self.servers_to_add = self.servers[self.nodes_init:self.nodes_init +
                                        self.nodes_in]
     self.servers_to_remove = self.servers[self.nodes_init -
                                           self.nodes_out:self.nodes_init]
Exemple #6
0
 def tearDown(self):
     self.log.info("============AutoFailoverBaseTest teardown============")
     self._get_params()
     self.task_manager = TaskManager("Autofailover_thread")
     self.task_manager.start()
     self.server_to_fail = self._servers_to_fail()
     self.start_couchbase_server()
     self.sleep(10)
     self.disable_firewall()
     self.rest = RestConnection(self.orchestrator)
     self.rest.reset_autofailover()
     self.disable_autofailover()
     self._cleanup_cluster()
     super(AutoFailoverBaseTest, self).tearDown()
     if hasattr(self, "node_monitor_task"):
         if self.node_monitor_task._exception:
             self.fail("{}".format(self.node_monitor_task._exception))
         self.node_monitor_task.stop = True
     self.task_manager.shutdown(force=True)
Exemple #7
0
 def setUp(self):
     super(AutoFailoverAbortsRebalance, self).setUp()
     self.master = self.servers[0]
     self._get_params()
     self.rest = RestConnection(self.orchestrator)
     self.task_manager = TaskManager("Autofailover_thread")
     self.task_manager.start()
     self.node_failure_task_manager = TaskManager(
         "Nodes_failure_detector_thread")
     self.node_failure_task_manager.start()
     node_ram_ratio = BucketOperationHelper.base_bucket_ratio(self.servers)
     self.num_buckets = self.num_buckets - 1  # this is done as default is created by base class
     if self.num_buckets:
         BucketOperationHelper.create_multiple_buckets(
             self.master,
             self.num_replicas,
             node_ram_ratio * (2.0 / 3.0),
             howmany=self.num_buckets)
     self.buckets = self.rest.get_buckets()
     for bucket in self.buckets:
         ready = BucketOperationHelper.wait_for_memcached(
             self.master, bucket.name)
         self.assertTrue(ready, "wait_for_memcached failed")
     self.initial_load_gen = BlobGenerator('auto-failover',
                                           'auto-failover-',
                                           self.value_size,
                                           end=self.num_items)
     self.update_load_gen = BlobGenerator('auto-failover',
                                          'auto-failover-',
                                          self.value_size,
                                          end=self.update_items)
     self.delete_load_gen = BlobGenerator('auto-failover',
                                          'auto-failover-',
                                          self.value_size,
                                          start=self.update_items,
                                          end=self.delete_items)
     self._load_all_buckets(self.servers[0], self.initial_load_gen,
                            "create", 0)
     self._async_load_all_buckets(self.orchestrator, self.update_load_gen,
                                  "update", 0)
     self._async_load_all_buckets(self.orchestrator, self.delete_load_gen,
                                  "delete", 0)
    def test_kill_indexer_create_drop_indexes_simple(self):
        self.test_fail = False
        self.concur_system_failure = self.input.param("concur_system_failure", False)
        self.errors = []
        self.index_create_task_manager = TaskManager(
            "index_create_task_manager")
        self.index_create_task_manager.start()
        self.system_failure_task_manager = TaskManager(
            "system_failure_detector_thread")
        self.system_failure_task_manager.start()
        self.sdk_loader_manager = TaskManager(
            "sdk_loader_manager")
        self.sdk_loader_manager.start()
        if self.num_failure_iteration:
            self.test_timeout = self.failure_timeout * len(self.index_nodes)

        self._prepare_collection_for_indexing(num_scopes=self.num_scopes, num_collections=self.num_collections)
        self.run_tasks = True

        index_create_tasks = self.create_indexes(num=self.num_pre_indexes)
        for task in index_create_tasks:
            task.result()

        load_doc_thread = threading.Thread(name="load_doc_thread",
                                           target=self.load_docs)
        load_doc_thread.start()

        self.sleep(60, "sleeping for 60 sec for index to start processing docs")

        if not self.check_if_indexes_in_dgm():
            self.log.error("indexes not in dgm even after {}".format(self.dgm_check_timeout))

        index_create_tasks = self.create_indexes(itr=300, num=25)

        self.kill_index = True
        index_node = self.get_nodes_from_services_map(service_type="index")
        system_failure_thread = threading.Thread(name="kill_indexer_thread",
                                                 target=self._kill_all_processes_index_with_sleep,
                                                 args=(index_node, 1, 600))
        system_failure_thread.start()

        for task in index_create_tasks:
            task.result()

        self.kill_index = False
        self.index_ops_obj.update_stop_create_index(True)
        self.kill_loader_process()
        self.sdk_loader_manager.shutdown(True)
        self.index_create_task_manager.shutdown(True)
        self.system_failure_task_manager.shutdown(True)
        system_failure_thread.join()

        self.wait_until_indexes_online()
        self.sleep(120, "sleep for 120 secs before validation")
        self.verify_index_ops_obj()

        self.n1ql_helper.drop_all_indexes_on_keyspace()

        if self.index_ops_obj.get_errors():
            self.fail(str(self.index_ops_obj.get_errors()))
Exemple #9
0
 def __init__(self, host, logger):
     #host is in the form IP address
     self.__log = logger
     self.__host = host
     self.__document = {}
     self.__mapping = {}
     self.__STATUSOK = 200
     self.__indices = []
     self.__index_types = {}
     self.__connection_url = 'http://{0}:{1}/'.format(self.__host.ip,
                                                     self.__host.port)
     self.es_queries = []
     self.task_manager = TaskManager("ES_Thread")
     self.task_manager.start()
     self.http = httplib2.Http
    def test_shard_json_corruption(self):
        self.test_fail = False
        self.concur_system_failure = self.input.param("concur_system_failure", False)
        self.errors = []
        self.index_create_task_manager = TaskManager(
            "index_create_task_manager")
        self.index_create_task_manager.start()
        self.system_failure_task_manager = TaskManager(
            "system_failure_detector_thread")
        self.system_failure_task_manager.start()
        self.sdk_loader_manager = TaskManager(
            "sdk_loader_manager")
        self.sdk_loader_manager.start()
        if self.num_failure_iteration:
            self.test_timeout = self.failure_timeout * len(self.index_nodes)

        self._prepare_collection_for_indexing(num_scopes=self.num_scopes, num_collections=self.num_collections)
        self.run_tasks = True

        index_create_tasks = self.create_indexes(num=self.num_pre_indexes)
        for task in index_create_tasks:
            task.result()

        load_doc_thread = threading.Thread(name="load_doc_thread",
                                           target=self.load_docs)
        load_doc_thread.start()

        self.sleep(60, "sleeping for 60 sec for index to start processing docs")

        #if not self.check_if_indexes_in_dgm():
            #self.log.error("indexes not in dgm even after {}".format(self.dgm_check_timeout))

        self.kill_loader_process()
        self.wait_for_mutation_processing(self.index_nodes)

        self.induce_schedule_system_failure(self.failure_map[self.system_failure]["failure_task"])
        self.sleep(90, "sleeping for  mins for mutation processing during system failure ")

        remote = RemoteMachineShellConnection(self.index_nodes[0])
        remote.terminate_process(process_name="indexer")
        self.sleep(60, "sleeping for 60 sec for indexer to come back")

        self.index_ops_obj.update_stop_create_index(True)
        self.sdk_loader_manager.shutdown(True)
        self.index_create_task_manager.shutdown(True)
        self.system_failure_task_manager.shutdown(True)

        self.wait_until_indexes_online()
        indexes_created = self.check_if_indexes_not_created(self.index_ops_obj.get_create_index_list())
        if indexes_created:
            self.fail(f'{indexes_created} are not dropped')

        if self.check_if_shard_exists("shard1", self.index_nodes[0]):
            self.fail('shard1 is not cleaned on disk')
 def tearDown(self):
     self.log.info("============AutoFailoverBaseTest teardown============")
     self._get_params()
     self.task_manager = TaskManager("Autofailover_thread")
     self.task_manager.start()
     self.server_to_fail = self._servers_to_fail()
     self.start_couchbase_server()
     self.sleep(10)
     self.disable_firewall()
     self.rest = RestConnection(self.orchestrator)
     self.rest.reset_autofailover()
     self.disable_autofailover()
     self._cleanup_cluster()
     super(AutoFailoverBaseTest, self).tearDown()
     if hasattr(self, "node_monitor_task"):
         if self.node_monitor_task._exception:
             self.fail("{}".format(self.node_monitor_task._exception))
         self.node_monitor_task.stop = True
     self.task_manager.shutdown(force=True)
Exemple #12
0
 def __init__(self, task_manager=jython_task_manager()):
     self.task_manager = TaskManager("Cluster_Thread")
     self.jython_task_manager = task_manager
class AutoFailoverBaseTest(BaseTestCase):
    MAX_FAIL_DETECT_TIME = 120
    ORCHESTRATOR_TIMEOUT_BUFFER = 60

    def setUp(self):
        super(AutoFailoverBaseTest, self).setUp()
        self._get_params()
        self.rest = RestConnection(self.orchestrator)
        self.task_manager = TaskManager("Autofailover_thread")
        self.task_manager.start()
        self.node_failure_task_manager = TaskManager(
            "Nodes_failure_detector_thread")
        self.node_failure_task_manager.start()
        self.initial_load_gen = BlobGenerator('auto-failover',
                                              'auto-failover-',
                                              self.value_size,
                                              end=self.num_items)
        self.update_load_gen = BlobGenerator('auto-failover',
                                             'auto-failover-',
                                             self.value_size,
                                             end=self.update_items)
        self.delete_load_gen = BlobGenerator('auto-failover',
                                             'auto-failover-',
                                             self.value_size,
                                             start=self.update_items,
                                             end=self.delete_items)
        self._load_all_buckets(self.servers[0], self.initial_load_gen,
                               "create", 0)
        self._async_load_all_buckets(self.orchestrator, self.update_load_gen,
                                     "update", 0)
        self._async_load_all_buckets(self.orchestrator, self.delete_load_gen,
                                     "delete", 0)
        self.server_index_to_fail = self.input.param("server_index_to_fail",
                                                     None)
        if self.server_index_to_fail is None:
            self.server_to_fail = self._servers_to_fail()
        else:
            self.server_to_fail = [self.servers[self.server_index_to_fail]]
        self.servers_to_add = self.servers[self.nodes_init:self.nodes_init +
                                           self.nodes_in]
        self.servers_to_remove = self.servers[self.nodes_init -
                                              self.nodes_out:self.nodes_init]

    def bareSetUp(self):
        super(AutoFailoverBaseTest, self).setUp()
        self._get_params()
        self.rest = RestConnection(self.orchestrator)
        self.task_manager = TaskManager("Autofailover_thread")
        self.task_manager.start()
        self.node_failure_task_manager = TaskManager(
            "Nodes_failure_detector_thread")
        self.node_failure_task_manager.start()
        self.initial_load_gen = BlobGenerator('auto-failover',
                                              'auto-failover-',
                                              self.value_size,
                                              end=self.num_items)
        self.update_load_gen = BlobGenerator('auto-failover',
                                             'auto-failover-',
                                             self.value_size,
                                             end=self.update_items)
        self.delete_load_gen = BlobGenerator('auto-failover',
                                             'auto-failover-',
                                             self.value_size,
                                             start=self.update_items,
                                             end=self.delete_items)
        self.server_to_fail = self._servers_to_fail()
        self.servers_to_add = self.servers[self.nodes_init:self.nodes_init +
                                           self.nodes_in]
        self.servers_to_remove = self.servers[self.nodes_init -
                                              self.nodes_out:self.nodes_init]

    def tearDown(self):
        self.log.info("============AutoFailoverBaseTest teardown============")
        self._get_params()
        self.task_manager = TaskManager("Autofailover_thread")
        self.task_manager.start()
        self.server_to_fail = self._servers_to_fail()
        self.start_couchbase_server()
        self.sleep(10)
        self.disable_firewall()
        self.rest = RestConnection(self.orchestrator)
        self.rest.reset_autofailover()
        self.disable_autofailover()
        self._cleanup_cluster()
        super(AutoFailoverBaseTest, self).tearDown()
        if hasattr(self, "node_monitor_task"):
            if self.node_monitor_task._exception:
                self.fail("{}".format(self.node_monitor_task._exception))
            self.node_monitor_task.stop = True
        self.task_manager.shutdown(force=True)

    def shuffle_nodes_between_zones_and_rebalance(self, to_remove=None):
        """
        Shuffle the nodes present in the cluster if zone > 1. Rebalance the nodes in the end.
        Nodes are divided into groups iteratively i.e. 1st node in Group 1, 2nd in Group 2, 3rd in Group 1 and so on, when
        zone=2.
        :param to_remove: List of nodes to be removed.
        """
        if not to_remove:
            to_remove = []
        serverinfo = self.orchestrator
        rest = RestConnection(serverinfo)
        zones = ["Group 1"]
        nodes_in_zone = {"Group 1": [serverinfo.ip]}
        # Create zones, if not existing, based on params zone in test.
        # Shuffle the nodes between zones.
        if int(self.zone) > 1:
            for i in range(1, int(self.zone)):
                a = "Group "
                zones.append(a + str(i + 1))
                if not rest.is_zone_exist(zones[i]):
                    rest.add_zone(zones[i])
                nodes_in_zone[zones[i]] = []
            # Divide the nodes between zones.
            nodes_in_cluster = [
                node.ip for node in self.get_nodes_in_cluster()
            ]
            nodes_to_remove = [node.ip for node in to_remove]
            for i in range(1, len(self.servers)):
                if self.servers[i].ip in nodes_in_cluster and self.servers[
                        i].ip not in nodes_to_remove:
                    server_group = i % int(self.zone)
                    nodes_in_zone[zones[server_group]].append(
                        self.servers[i].ip)
            # Shuffle the nodesS
            for i in range(1, self.zone):
                node_in_zone = list(
                    set(nodes_in_zone[zones[i]]) -
                    set([node for node in rest.get_nodes_in_zone(zones[i])]))
                rest.shuffle_nodes_in_zones(node_in_zone, zones[0], zones[i])
        self.zones = nodes_in_zone
        otpnodes = [node.id for node in rest.node_statuses()]
        nodes_to_remove = [
            node.id for node in rest.node_statuses()
            if node.ip in [t.ip for t in to_remove]
        ]
        # Start rebalance and monitor it.
        started = rest.rebalance(otpNodes=otpnodes,
                                 ejectedNodes=nodes_to_remove)
        if started:
            result = rest.monitorRebalance()
            msg = "successfully rebalanced cluster {0}"
            self.log.info(msg.format(result))

    def enable_autofailover(self):
        """
        Enable the autofailover setting with the given timeout.
        :return: True If the setting was set with the timeout, else return
        False
        """
        status = self.rest.update_autofailover_settings(
            True,
            self.timeout,
            self.can_abort_rebalance,
            maxCount=self.max_count,
            enableServerGroup=self.server_group_failover)
        return status

    def disable_autofailover(self):
        """
        Disable the autofailover setting.
        :return: True If the setting was disabled, else return
        False
        """
        status = self.rest.update_autofailover_settings(False, 120, False)
        return status

    def enable_autofailover_and_validate(self):
        """
        Enable autofailover with given timeout and then validate if the
        settings.
        :return: Nothing
        """
        status = self.enable_autofailover()
        self.assertTrue(status, "Failed to enable autofailover_settings!")
        self.sleep(5)
        settings = self.rest.get_autofailover_settings()
        self.assertTrue(settings.enabled, "Failed to enable "
                        "autofailover_settings!")
        self.assertEqual(
            self.timeout, settings.timeout,
            "Incorrect timeout set. Expected timeout : {0} "
            "Actual timeout set : {1}".format(self.timeout, settings.timeout))
        self.assertEqual(
            self.can_abort_rebalance, settings.can_abort_rebalance,
            "Incorrect can_abort_rebalance set. Expected can_abort_rebalance : {0} "
            "Actual can_abort_rebalance set : {1}".format(
                self.can_abort_rebalance, settings.can_abort_rebalance))

    def disable_autofailover_and_validate(self):
        """
        Disable autofailover setting and then validate if the setting was
        disabled.
        :return: Nothing
        """
        status = self.disable_autofailover()
        self.assertTrue(status, "Failed to change autofailover_settings!")
        settings = self.rest.get_autofailover_settings()
        self.assertFalse(settings.enabled, "Failed to disable "
                         "autofailover_settings!")

    def start_node_monitors_task(self):
        """
        Start the node monitors task to analyze the node status monitors.
        :return: The NodeMonitorAnalyserTask.
        """
        node_monitor_task = NodeMonitorsAnalyserTask(self.orchestrator)
        self.task_manager.schedule(node_monitor_task, sleep_time=5)
        return node_monitor_task

    def enable_firewall(self):
        """
        Enable firewall on the nodes to fail in the tests.
        :return: Nothing
        """
        node_down_timer_tasks = []
        for node in self.server_to_fail:
            node_failure_timer_task = NodeDownTimerTask(node.ip)
            node_down_timer_tasks.append(node_failure_timer_task)
        task = AutoFailoverNodesFailureTask(
            self.orchestrator,
            self.server_to_fail,
            "enable_firewall",
            self.timeout,
            self.pause_between_failover_action,
            self.failover_expected,
            self.timeout_buffer,
            failure_timers=node_down_timer_tasks)
        for node_down_timer_task in node_down_timer_tasks:
            self.node_failure_task_manager.schedule(node_down_timer_task, 2)
        self.task_manager.schedule(task)
        try:
            task.result()
        except Exception, e:
            self.fail("Exception: {}".format(e))
Exemple #14
0
 def __init__(self):
     self.task_manager = TaskManager()
     self.task_manager.start()
Exemple #15
0
class ServerTasks(object):
    """A Task API for performing various operations synchronously or asynchronously on Couchbase cluster."""

    def __init__(self, task_manager=jython_task_manager()):
        self.task_manager = TaskManager("Cluster_Thread")
        self.jython_task_manager = task_manager

    def async_create_bucket(self, server, bucket):
        """Asynchronously creates the default bucket

        Parameters:
            bucket_params - a dictionary containing bucket creation parameters. (Dict)
        Returns:
            BucketCreateTask - A task future that is a handle to the scheduled task."""
#         bucket_params['bucket_name'] = 'default'
        _task = conc.BucketCreateTask(server, bucket, task_manager=self.task_manager)
        self.task_manager.schedule(_task)
        return _task

    def sync_create_bucket(self, server, bucket):
        """Synchronously creates the default bucket

        Parameters:
            bucket_params - a dictionary containing bucket creation parameters. (Dict)
        Returns:
            BucketCreateTask - A task future that is a handle to the scheduled task."""
#         bucket_params['bucket_name'] = 'default'
        _task = conc.BucketCreateTask(server, bucket, task_manager=self.task_manager)
        self.task_manager.schedule(_task)
        return _task.get_result()
    
    def async_bucket_delete(self, server, bucket='default'):
        """Asynchronously deletes a bucket

        Parameters:
            server - The server to delete the bucket on. (TestInputServer)
            bucket - The name of the bucket to be deleted. (String)

        Returns:
            BucketDeleteTask - A task future that is a handle to the scheduled task."""
        _task = conc.BucketDeleteTask(server, self.task_manager, bucket)
        self.task_manager.schedule(_task)
        return _task

    def async_failover(self, servers=[], failover_nodes=[], graceful=False,
                       use_hostnames=False, wait_for_pending=0):
        """Asynchronously failover a set of nodes

        Parameters:
            servers - servers used for connection. (TestInputServer)
            failover_nodes - The set of servers that will under go failover .(TestInputServer)
            graceful = True/False. True - graceful, False - hard. (Boolean)

        Returns:
            FailOverTask - A task future that is a handle to the scheduled task."""
        _task = conc.FailoverTask(servers, task_manager=self.task_manager,
                             to_failover=failover_nodes,
                             graceful=graceful, use_hostnames=use_hostnames,
                             wait_for_pending=wait_for_pending)
        self.task_manager.schedule(_task)
        return _task

    def async_init_node(self, server, disabled_consistent_view=None,
                        rebalanceIndexWaitingDisabled=None, rebalanceIndexPausingDisabled=None,
                        maxParallelIndexers=None, maxParallelReplicaIndexers=None, port=None,
                        quota_percent=None, services=None, index_quota_percent=None, gsi_type='forestdb'):
        """Asynchronously initializes a node

        The task scheduled will initialize a nodes username and password and will establish
        the nodes memory quota to be 2/3 of the available system memory.

        Parameters:
            server - The server to initialize. (TestInputServer)
            disabled_consistent_view - disable consistent view
            rebalanceIndexWaitingDisabled - index waiting during rebalance(Boolean)
            rebalanceIndexPausingDisabled - index pausing during rebalance(Boolean)
            maxParallelIndexers - max parallel indexers threads(Int)
            index_quota_percent - index quote used by GSI service (added due to sherlock)
            maxParallelReplicaIndexers - max parallel replica indexers threads(int)
            port - port to initialize cluster
            quota_percent - percent of memory to initialize
            services - can be kv, n1ql, index
            gsi_type - Indexer Storage Mode
        Returns:
            NodeInitTask - A task future that is a handle to the scheduled task."""
        _task = conc.NodeInitializeTask(server, self.task_manager, disabled_consistent_view,
                                        rebalanceIndexWaitingDisabled, rebalanceIndexPausingDisabled,
                                        maxParallelIndexers, maxParallelReplicaIndexers,
                                        port, quota_percent, services=services,
                                        index_quota_percent=index_quota_percent,
                                        gsi_type=gsi_type)

        self.task_manager.schedule(_task)
        return _task

    def async_load_gen_docs(self, cluster, bucket, generator, op_type, exp=0, flag=0, persist_to=0, replicate_to=0,
                            only_store_hash=True, batch_size=1, pause_secs=1, timeout_secs=5, compression=True,
                            process_concurrency=8):

        log.info("Loading documents to {}".format(bucket.name))
        client = VBucketAwareMemcached(RestConnection(cluster.master), bucket)
        _task = jython_tasks.LoadDocumentsGeneratorsTask(cluster, self.jython_task_manager, bucket, client, [generator],
                                                        op_type, exp, flag=flag, persist_to=persist_to,
                                                        replicate_to=replicate_to, only_store_hash=only_store_hash,
                                                        batch_size=batch_size,
                                                        pause_secs=pause_secs, timeout_secs=timeout_secs,
                                                        compression=compression,
                                                        process_concurrency=process_concurrency)
        self.jython_task_manager.add_new_task(_task)
        return _task

    def async_validate_docs(self, cluster, bucket, generator, opt_type, exp=0, flag=0, only_store_hash=True,
                            batch_size=1, pause_secs=1, timeout_secs=5, compression=True, process_concurrency=4):
        log.info("Validating documents")
        client = VBucketAwareMemcached(RestConnection(cluster.master), bucket)
        _task = jython_tasks.DocumentsValidatorTask(cluster, self.jython_task_manager, bucket, client, [generator],
                                                    opt_type, exp, flag=flag, only_store_hash=only_store_hash, batch_size=batch_size,
                                                        pause_secs=pause_secs, timeout_secs=timeout_secs, compression=compression,
                                                        process_concurrency=process_concurrency)
        self.jython_task_manager.add_new_task(_task)
        return _task

#     def async_load_gen_docs_java(self, server, bucket, start_from, num_items=10000):
#         def read_json_tempelate(path):
#             import json
#             istream = open(path);
#             with istream as data_file:    
#                 data = json.load(data_file)
#             return data["key"], data["value"]
#         
#         path = "b/testdata.json"
#         k,v = read_json_tempelate(path)
#         
#         _task = conc.LoadDocumentsTask_java(self.task_manager, server, bucket, num_items, start_from, k, v)
# 
#         self.task_manager.schedule(_task)
#         return _task

    def async_rebalance(self, servers, to_add, to_remove, use_hostnames=False, services = None, check_vbucket_shuffling=True):
        """Asyncronously rebalances a cluster

        Parameters:
            servers - All servers participating in the rebalance ([TestInputServers])
            to_add - All servers being added to the cluster ([TestInputServers])
            to_remove - All servers being removed from the cluster ([TestInputServers])
            use_hostnames - True if nodes should be added using hostnames (Boolean)

        Returns:
            RebalanceTask - A task future that is a handle to the scheduled task"""
        _task = jython_tasks.rebalanceTask(servers, to_add, to_remove,
                 use_hostnames=use_hostnames, services=services, check_vbucket_shuffling=check_vbucket_shuffling)
        self.jython_task_manager.add_new_task(_task)
        return _task

    def async_wait_for_stats(self, cluster, bucket, param, stat, comparison, value):
        """Asynchronously wait for stats

        Waits for stats to match the criteria passed by the stats variable. See
        couchbase.stats_tool.StatsCommon.build_stat_check(...) for a description of
        the stats structure and how it can be built.

        Parameters:
            servers - The servers to get stats from. Specifying multiple servers will
                cause the result from each server to be added together before
                comparing. ([TestInputServer])
            bucket - The name of the bucket (String)
            param - The stats parameter to use. (String)
            stat - The stat that we want to get the value from. (String)
            comparison - How to compare the stat result to the value specified.
            value - The value to compare to.

        Returns:
            RebalanceTask - A task future that is a handle to the scheduled task"""
        _task = jython_tasks.StatsWaitTask(cluster, bucket, param, stat, comparison, value)
        self.jython_task_manager.add_new_task(_task)
        return _task

    def create_default_bucket(self, bucket_params, timeout=600):
        """Synchronously creates the default bucket

        Parameters:
            bucket_params - A dictionary containing a list of bucket creation parameters. (Dict)

        Returns:
            boolean - Whether or not the bucket was created."""

        _task = self.async_create_default_bucket(bucket_params)
        return _task.get_result(timeout)

    def create_sasl_bucket(self, name, password,bucket_params, timeout=None):
        """Synchronously creates a sasl bucket

        Parameters:
            bucket_params - A dictionary containing a list of bucket creation parameters. (Dict)

        Returns:
            boolean - Whether or not the bucket was created."""

        _task = self.async_create_sasl_bucket(name, password, bucket_params)
        self.task_manager.schedule(_task)
        return _task.get_result(timeout)

    def create_standard_bucket(self, name, port, bucket_params, timeout=None):
        """Synchronously creates a standard bucket
        Parameters:
            bucket_params - A dictionary containing a list of bucket creation parameters. (Dict)
        Returns:
            boolean - Whether or not the bucket was created."""
        _task = self.async_create_standard_bucket(name, port, bucket_params)
        return _task.get_result(timeout)

    def bucket_delete(self, server, bucket='default', timeout=None):
        """Synchronously deletes a bucket

        Parameters:
            server - The server to delete the bucket on. (TestInputServer)
            bucket - The name of the bucket to be deleted. (String)

        Returns:
            boolean - Whether or not the bucket was deleted."""
        _task = self.async_bucket_delete(server, bucket)
        return _task.get_result(timeout)

    def init_node(self, server, async_init_node=True, disabled_consistent_view=None, services = None, index_quota_percent = None):
        """Synchronously initializes a node

        The task scheduled will initialize a nodes username and password and will establish
        the nodes memory quota to be 2/3 of the available system memory.

        Parameters:
            server - The server to initialize. (TestInputServer)
            index_quota_percent - index quota percentage
            disabled_consistent_view - disable consistent view

        Returns:
            boolean - Whether or not the node was properly initialized."""
        _task = self.async_init_node(server, async_init_node, disabled_consistent_view, services = services, index_quota_percent= index_quota_percent)
        return _task.result()

    def rebalance(self, servers, to_add, to_remove, timeout=None, use_hostnames=False, services = None):
        """Syncronously rebalances a cluster

        Parameters:
            servers - All servers participating in the rebalance ([TestInputServers])
            to_add - All servers being added to the cluster ([TestInputServers])
            to_remove - All servers being removed from the cluster ([TestInputServers])
            use_hostnames - True if nodes should be added using their hostnames (Boolean)
            services - Services definition per Node, default is None (this is since Sherlock release)
        Returns:
            boolean - Whether or not the rebalance was successful"""
        _task = self.async_rebalance(servers, to_add, to_remove, use_hostnames, services = services)
        result = self.jython_task_manager.get_task_result(_task)
        return result

    def load_gen_docs(self, cluster, bucket, generator, op_type, exp=0,
                      flag=0, persist_to=0, replicate_to=0, only_store_hash=True,
                      batch_size=1, compression=True):
        _task = self.async_load_gen_docs(cluster, bucket, generator, op_type, exp, flag, persist_to=persist_to,
                                         replicate_to=replicate_to,
                                         only_store_hash=only_store_hash, batch_size=batch_size, 
                                         compression=compression)
        return self.jython_task_manager.get_task_result(_task)

    def verify_data(self, server, bucket, kv_store, timeout=None, compression=True):
        _task = self.async_verify_data(server, bucket, kv_store, compression=compression)
        return _task.result(timeout)

    def async_verify_data(self, server, bucket, kv_store, max_verify=None,
                          only_store_hash=True, batch_size=1, replica_to_read=None, timeout_sec=5, compression=True):
        if batch_size > 1:
            _task = conc.BatchedValidateDataTask(server, bucket, kv_store, max_verify, only_store_hash, batch_size, 
                                                 timeout_sec, self.task_manager, compression=compression)
        else:
            _task = conc.ValidateDataTask(server, bucket, kv_store, max_verify, only_store_hash, replica_to_read, 
                                          self.task_manager, compression=compression)
        self.task_manager.schedule(_task)
        return _task
    
    def wait_for_stats(self, cluster, bucket, param, stat, comparison, value, timeout=None):
        """Synchronously wait for stats

        Waits for stats to match the criteria passed by the stats variable. See
        couchbase.stats_tool.StatsCommon.build_stat_check(...) for a description of
        the stats structure and how it can be built.

        Parameters:
            servers - The servers to get stats from. Specifying multiple servers will
                cause the result from each server to be added together before
                comparing. ([TestInputServer])
            bucket - The name of the bucket (String)
            param - The stats parameter to use. (String)
            stat - The stat that we want to get the value from. (String)
            comparison - How to compare the stat result to the value specified.
            value - The value to compare to.

        Returns:
            boolean - Whether or not the correct stats state was seen"""
        _task = self.async_wait_for_stats(cluster, bucket, param, stat, comparison, value)
        return self.jython_task_manager.get_task_result(_task)

    def shutdown(self, force=False):
        self.task_manager.shutdown(force)
        if force:
            log.info("Cluster instance shutdown with force")

    def async_n1ql_query_verification(self, server, bucket, query, n1ql_helper=None,
                                      expected_result=None, is_explain_query=False,
                                      index_name=None, verify_results=True, retry_time=2,
                                      scan_consistency=None, scan_vector=None):
        """Asynchronously runs n1ql querya and verifies result if required

        Parameters:
            server - The server to handle query verification task. (TestInputServer)
            query - Query params being used with the query. (dict)
            expected_result - expected result after querying
            is_explain_query - is query explain query
            index_name - index related to query
            bucket - The name of the bucket containing items for this view. (String)
            verify_results -  Verify results after query runs successfully
            retry_time - The time in seconds to wait before retrying failed queries (int)
            n1ql_helper - n1ql helper object
            scan_consistency - consistency value for querying
            scan_vector - scan vector used for consistency
        Returns:
            N1QLQueryTask - A task future that is a handle to the scheduled task."""
        _task = jython_tasks.N1QLQueryTask(n1ql_helper = n1ql_helper,
                 server = server, bucket = bucket,
                 query = query, expected_result=expected_result,
                 verify_results = verify_results,
                 is_explain_query = is_explain_query,
                 index_name = index_name,
                 retry_time= retry_time,
                 scan_consistency = scan_consistency,
                 scan_vector = scan_vector)
        self.jython_task_manager.add_new_task(_task)
        return _task

    def n1ql_query_verification(self, server, bucket, query, n1ql_helper = None,
                                expected_result=None, is_explain_query = False,
                                index_name = None, verify_results = True,
                                scan_consistency = None, scan_vector = None,
                                retry_time=2, timeout = 60):
        """Synchronously runs n1ql querya and verifies result if required

        Parameters:
            server - The server to handle query verification task. (TestInputServer)
            query - Query params being used with the query. (dict)
            expected_result - expected result after querying
            is_explain_query - is query explain query
            index_name - index related to query
            bucket - The name of the bucket containing items for this view. (String)
            verify_results -  Verify results after query runs successfully
            retry_time - The time in seconds to wait before retrying failed queries (int)
            n1ql_helper - n1ql helper object
            scan_consistency - consistency used during querying
            scan_vector - vector used during querying
            timeout - timeout for task
        Returns:
            N1QLQueryTask - A task future that is a handle to the scheduled task."""
        _task = self.async_n1ql_query_verification(n1ql_helper = n1ql_helper,
                 server = server, bucket = bucket,
                 query = query, expected_result=expected_result,
                 is_explain_query = is_explain_query,
                 index_name = index_name,
                 verify_results = verify_results,
                 retry_time= retry_time,
                 scan_consistency = scan_consistency,
                 scan_vector = scan_vector)
        return self.jython_task_manager.get_task_result(_task)

    def async_create_index(self, server, bucket, query, n1ql_helper = None,
                           index_name = None, defer_build = False, retry_time=2,
                           timeout = 240):
        """Asynchronously runs create index task

        Parameters:
            server - The server to handle query verification task. (TestInputServer)
            query - Query params being used with the query.
            bucket - The name of the bucket containing items for this view. (String)
            index_name - Name of the index to be created
            defer_build - build is defered
            retry_time - The time in seconds to wait before retrying failed queries (int)
            n1ql_helper - n1ql helper object
            timeout - timeout for index to come online
        Returns:
            CreateIndexTask - A task future that is a handle to the scheduled task."""
        _task = jython_tasks.CreateIndexTask(n1ql_helper = n1ql_helper,
                 server = server, bucket = bucket,
                 defer_build = defer_build,
                 index_name = index_name,
                 query = query,
                 retry_time= retry_time,
                 timeout = timeout)
        self.jython_task_manager.add_new_task(_task)
        return _task

    def async_monitor_index(self, server, bucket, n1ql_helper = None,
                            index_name = None, retry_time=2, timeout = 240):
        """Asynchronously runs create index task

        Parameters:
            server - The server to handle query verification task. (TestInputServer)
            query - Query params being used with the query.
            bucket - The name of the bucket containing items for this view. (String)
            index_name - Name of the index to be created
            retry_time - The time in seconds to wait before retrying failed queries (int)
            timeout - timeout for index to come online
            n1ql_helper - n1ql helper object
        Returns:
            MonitorIndexTask - A task future that is a handle to the scheduled task."""
        _task = jython_tasks.MonitorIndexTask(n1ql_helper = n1ql_helper,
                 server = server, bucket = bucket,
                 index_name = index_name,
                 retry_time= retry_time,
                 timeout = timeout)
        self.jython_task_manager.add_new_task(_task)
        return _task

    def async_build_index(self, server, bucket, query, n1ql_helper = None, retry_time=2):
        """Asynchronously runs create index task

        Parameters:
            server - The server to handle query verification task. (TestInputServer)
            query - Query params being used with the query.
            bucket - The name of the bucket containing items for this view. (String)
            retry_time - The time in seconds to wait before retrying failed queries (int)
            n1ql_helper - n1ql helper object
        Returns:
            BuildIndexTask - A task future that is a handle to the scheduled task."""
        _task = jython_tasks.BuildIndexTask(n1ql_helper = n1ql_helper,
                 server = server, bucket = bucket,
                 query = query,
                 retry_time= retry_time)
        self.jython_task_manager.add_new_task(_task)
        return _task

    def create_index(self, server, bucket, query, n1ql_helper = None, index_name = None,
                     defer_build = False, retry_time=2, timeout= 60):
        """Asynchronously runs drop index task

        Parameters:
            server - The server to handle query verification task. (TestInputServer)
            query - Query params being used with the query.
            bucket - The name of the bucket containing items for this view. (String)
            index_name - Name of the index to be created
            retry_time - The time in seconds to wait before retrying failed queries (int)
            n1ql_helper - n1ql helper object
            defer_build - defer the build
            timeout - timeout for the task
        Returns:
            N1QLQueryTask - A task future that is a handle to the scheduled task."""
        _task = self.async_create_index(n1ql_helper = n1ql_helper,
                 server = server, bucket = bucket,
                 query = query,
                 index_name = index_name,
                 defer_build = defer_build,
                 retry_time= retry_time)
        return self.jython_task_manager.get_task_result(_task)

    def async_drop_index(self, server = None, bucket = "default", query = None,
                         n1ql_helper = None, index_name = None, retry_time=2):
        """Synchronously runs drop index task

        Parameters:
            server - The server to handle query verification task. (TestInputServer)
            query - Query params being used with the query.
            bucket - The name of the bucket containing items for this view. (String)
            index_name - Name of the index to be dropped
            retry_time - The time in seconds to wait before retrying failed queries (int)
            n1ql_helper - n1ql helper object
        Returns:
            DropIndexTask - A task future that is a handle to the scheduled task."""
        _task = jython_tasks.DropIndexTask(n1ql_helper = n1ql_helper,
                 server = server, bucket = bucket,
                 query = query,
                 index_name = index_name,
                 retry_time= retry_time)
        self.jython_task_manager.add_new_task(_task)
        return _task

    def drop_index(self, server, bucket, query, n1ql_helper = None,
                   index_name = None, retry_time=2, timeout = 60):
        """Synchronously runs drop index task

        Parameters:
            server - The server to handle query verification task. (TestInputServer)
            query - Query params being used with the query. (dict)
            bucket - The name of the bucket containing items for this view. (String)
            index_name - Name of the index to be created
            retry_time - The time in seconds to wait before retrying failed queries (int)
            n1ql_helper - n1ql helper object
            timeout - timeout for the task
        Returns:
            N1QLQueryTask - A task future that is a handle to the scheduled task."""
        _task = self.async_drop_index(n1ql_helper = n1ql_helper,
                 server = server, bucket = bucket,
                 query = query,
                 index_name = index_name,
                 retry_time= retry_time)
        return self.jython_task_manager.get_task_result(_task)

    def failover(self, servers=[], failover_nodes=[], graceful=False, use_hostnames=False,timeout=None):
        """Synchronously flushes a bucket

        Parameters:
            servers - node used for connection (TestInputServer)
            failover_nodes - servers to be failovered, i.e. removed from the cluster. (TestInputServer)
            bucket - The name of the bucket to be flushed. (String)

        Returns:
            boolean - Whether or not the bucket was flushed."""
        _task = self.async_failover(servers, failover_nodes, graceful, use_hostnames)
        return _task.result(timeout)

    def async_bucket_flush(self, server, bucket='default'):
        """Asynchronously flushes a bucket

        Parameters:
            server - The server to flush the bucket on. (TestInputServer)
            bucket - The name of the bucket to be flushed. (String)

        Returns:
            BucketFlushTask - A task future that is a handle to the scheduled task."""
        _task = conc.BucketFlushTask(server,self.task_manager,bucket)
        self.task_manager.schedule(_task)
        return _task

    def bucket_flush(self, server, bucket='default', timeout=None):
        """Synchronously flushes a bucket

        Parameters:
            server - The server to flush the bucket on. (TestInputServer)
            bucket - The name of the bucket to be flushed. (String)

        Returns:
            boolean - Whether or not the bucket was flushed."""
        _task = self.async_bucket_flush(server, bucket)
        return _task.get_result(timeout)

    def async_compact_bucket(self, server, bucket="default"):
        """Asynchronously starts bucket compaction

        Parameters:
            server - source couchbase server
            bucket - bucket to compact

        Returns:
            boolean - Whether or not the compaction started successfully"""
        _task = conc.CompactBucketTask(server, self.task_manager, bucket)
        self.task_manager.schedule(_task)
        return _task

    def compact_bucket(self, server, bucket="default"):
        """Synchronously runs bucket compaction and monitors progress

        Parameters:
            server - source couchbase server
            bucket - bucket to compact

        Returns:
            boolean - Whether or not the cbrecovery completed successfully"""
        _task = self.async_compact_bucket(server, bucket)
        status = _task.get_result()
        return status

    def async_cbas_query_execute(self, master, cbas_server, cbas_endpoint, statement, bucket='default', mode=None, pretty=True):
        """
        Asynchronously execute a CBAS query
        :param master: Master server
        :param cbas_server: CBAS server
        :param cbas_endpoint: CBAS Endpoint URL (/analytics/service)
        :param statement: Query to be executed
        :param bucket: bucket to connect
        :param mode: Query Execution mode
        :param pretty: Pretty formatting
        :return: task with the output or error message
        """
        _task = conc.CBASQueryExecuteTask(master, cbas_server, self.task_manager, cbas_endpoint, statement, bucket,
                                          mode, pretty)
        self.task_manager.schedule(_task)
        return _task
class PlasmaCollectionsTests(BaseSecondaryIndexingTests):
    def setUp(self):
        super(PlasmaCollectionsTests, self).setUp()
        self.log.info("==============  PlasmaCollectionsTests setup has started ==============")
        self.rest.delete_all_buckets()
        self.num_scopes = self.input.param("num_scopes", 5)
        self.num_collections = self.input.param("num_collections", 10)
        self.test_timeout = self.input.param("test_timeout", 60)
        self.test_bucket = self.input.param('test_bucket', 'test_bucket')
        self.system_failure = self.input.param('system_failure', 'disk_failure')
        self.bucket_size = self.input.param('bucket_size', 100)
        self.drop_sleep = self.input.param('drop_sleep', 30)
        self.bucket_params = self._create_bucket_params(server=self.master, size=self.bucket_size,
                                                        replicas=self.num_replicas, bucket_type=self.bucket_type,
                                                        enable_replica_index=self.enable_replica_index,
                                                        eviction_policy=self.eviction_policy, lww=self.lww)
        self.cluster.create_standard_bucket(name=self.test_bucket, port=11222,
                                            bucket_params=self.bucket_params)
        self.buckets = self.rest.get_buckets()
        self.cli_rest = CollectionsRest(self.master)
        self.stat = CollectionsStats(self.master)
        self.scope_prefix = 'test_scope'
        self.collection_prefix = 'test_collection'
        self.run_cbq_query = self.n1ql_helper.run_cbq_query
        self.batch_size = self.input.param("batch_size", 50000)
        self.all_indexes = []
        self._lock_queue = threading.Lock()
        self.run_tasks = False
        self.tasks = []
        self.batch_size = self.input.param("batch_size", 100000)
        self.start_doc = self.input.param("start_doc", 1)
        self.num_items_in_collection = self.input.param("num_items_in_collection", 100000)
        self.percent_create = self.input.param("percent_create", 100)
        self.percent_update = self.input.param("percent_update", 0)
        self.percent_delete = self.input.param("percent_delete", 0)
        self.all_collections = self.input.param("all_collections", False)
        self.dataset_template = self.input.param("dataset_template", "Employee")
        self.num_of_indexes = self.input.param("num_of_indexes", 1000)
        self.failure_timeout = self.input.param("failure_timeout", 60)
        self.failure_recover_sleep = self.input.param("failure_recover_sleep", 600)
        self.index_ops_obj = ConCurIndexOps()
        self.sweep_interval = self.input.param("sweep_interval", 120)
        self.compact_sleep_duration = self.input.param("compact_sleep_duration", 300)
        self.moi_snapshot_interval = self.input.param("moi_snapshot_interval", 300000)
        self.dgm_check_timeout = self.input.param("dgm_check_timeout", 1800)
        self.concur_drop_indexes = self.input.param("concur_drop_indexes", True)
        self.concur_scan_indexes = self.input.param("concur_scan_indexes", True)
        self.concur_create_indexes = self.input.param("concur_create_indexes", True)
        self.concur_build_indexes = self.input.param("concur_build_indexes", True)
        self.concur_system_failure = self.input.param("concur_system_failure", True)

        self.simple_create_index = self.input.param("simple_create_index", False)
        self.simple_drop_index = self.input.param("simple_drop_index", False)
        self.simple_scan_index = self.input.param("simple_scan_index", False)
        self.simple_kill_indexer = self.input.param("simple_kill_indexer", False)
        self.simple_kill_memcached = self.input.param("simple_kill_memcached", False)
        self.num_pre_indexes = self.input.param("num_pre_indexes", 50)
        self.num_failure_iteration = self.input.param("num_failure_iteration", None)
        self.failure_map = {"disk_failure": {"failure_task": "induce_disk_failure",
                                             "recover_task": "recover_disk_failure",
                                             "expected_failure": ["Terminate Request due to server termination",
                                                                  "Build Already In Progress", "Timeout 1ms exceeded"]},
                            "disk_full": {"failure_task": "induce_disk_full",
                                          "recover_task": "recover_disk_full_failure",
                                          "expected_failure": ["Terminate Request due to server termination",
                                                               "Build Already In Progress", "Timeout 1ms exceeded",
                                                               "There is no available index service that can process "
                                                               "this request at this time",
                                                               "Create index or Alter replica cannot proceed "
                                                               "due to network partition, node failover or "
                                                               "indexer failure"]},
                            "restart_couchbase": {"failure_task": "stop_couchbase",
                                                "recover_task": "start_couchbase",
                                                "expected_failure": ["Terminate Request due to server termination",
                                                                     "There is no available index service that can process "
                                                                     "this request at this time",
                                                                     "Build Already In Progress", "Timeout 1ms exceeded",
                                                                     "Create index or Alter replica cannot proceed "
                                                                     "due to network partition, node failover or "
                                                                     "indexer failure"]},
                            "net_packet_loss": {"failure_task": "induce_net_packet_loss",
                                                "recover_task": "disable_net_packet_loss",
                                                "expected_failure": []},
                            "network_delay": {"failure_task": "induce_network_delay",
                                                "recover_task": "disable_network_delay",
                                                "expected_failure": []},
                            "disk_readonly": {"failure_task": "induce_disk_readonly",
                                                "recover_task": "disable_disk_readonly",
                                                "expected_failure": ["Terminate Request due to server termination",
                                                                     "There is no available index service that can process "
                                                                     "this request at this time",
                                                                     "Build Already In Progress", "Timeout 1ms exceeded"]},
                            "limit_file_limits": {"failure_task": "induce_limit_file_limits",
                                                "recover_task": "disable_limit_file_limits",
                                                "expected_failure": []},
                            "limit_file_size_limit": {"failure_task": "induce_limit_file_size_limit",
                                                "recover_task": "disable_limit_file_size_limit",
                                                "expected_failure": ["Terminate Request due to server termination"]},
                            "extra_files_in_log_dir": {"failure_task": "induce_extra_files_in_log_dir",
                                                "recover_task": "disable_extra_files_in_log_dir",
                                                "expected_failure": []},
                            "dummy_file_in_log_dir": {"failure_task": "induce_dummy_file_in_log_dir",
                                                "recover_task": "disable_dummy_file_in_log_dir",
                                                "expected_failure": []},
                            "empty_files_in_log_dir": {"failure_task": "induce_empty_files_in_log_dir",
                                                "recover_task": "disable_empty_files_in_log_dir",
                                                "expected_failure": []},
                            "shard_json_corruption": {"failure_task": "shard_json_corruption",
                                                       "recover_task": None,
                                                       "expected_failure": []},
                            "enable_firewall": {"failure_task": "induce_enable_firewall",
                                                "recover_task": "disable_firewall",
                                                "expected_failure": ["There is no available index service that can process "
                                                                     "this request at this time",
                                                                     "Build Already In Progress", "Timeout 1ms exceeded",
                                                                     "Create index or Alter replica cannot proceed "
                                                                     "due to network partition, node failover or "
                                                                     "indexer failure"]},
                            "limit_file_limits_desc": {"failure_task": "induce_limit_file_limits_desc",
                                                "recover_task": "disable_limit_file_limits_desc",
                                                "expected_failure": []},
                            "stress_cpu": {"failure_task": "stress_cpu",
                                                "recover_task": None,
                                                "expected_failure": ["Terminate Request due to server termination",
                                                                     "There is no available index service that can process "
                                                                     "this request at this time",
                                                                     "Build Already In Progress", "Timeout 1ms exceeded",
                                                                     "Create index or Alter replica cannot proceed "
                                                                     "due to network partition, node failover or "
                                                                     "indexer failure"]},
                            "stress_ram": {"failure_task": "stress_ram",
                                                "recover_task": None,
                                                "expected_failure": ["Terminate Request due to server termination",
                                                                     "There is no available index service that can process "
                                                                     "this request at this time",
                                                                     "Build Already In Progress", "Timeout 1ms exceeded",
                                                                     "Create index or Alter replica cannot proceed "
                                                                     "due to network partition, node failover or "
                                                                     "indexer failure"]}}

        self.log.info("Setting indexer memory quota to 256 MB and other settings...")
        self.index_nodes = self.get_nodes_from_services_map(service_type="index", get_all_nodes=True)
        self.n1ql_nodes = self.get_nodes_from_services_map(service_type="n1ql", get_all_nodes=True)
        self.data_nodes = self.get_kv_nodes()
        for index_node in self.index_nodes:
            rest = RestConnection(index_node)
            rest.set_service_memoryQuota(service='indexMemoryQuota', memoryQuota=256)
            rest.set_index_settings({"indexer.settings.persisted_snapshot.moi.interval": self.moi_snapshot_interval})
            rest.set_index_settings({"indexer.settings.persisted_snapshot_init_build.moi.interval": self.moi_snapshot_interval})
            rest.set_index_settings({"indexer.metadata.compaction.sleepDuration": self.compact_sleep_duration})
            rest.set_index_settings({"indexer.plasma.mainIndex.evictSweepInterval": self.sweep_interval})
            rest.set_index_settings({"indexer.plasma.backIndex.evictSweepInterval": self.sweep_interval})
        self.disk_location = RestConnection(self.index_nodes[0]).get_index_path()
        self.key_prefix=f'bigkeybigkeybigkeybigkeybigkeybigkey_{self.master.ip}_'

        self.log.info("==============  PlasmaCollectionsTests setup has completed ==============")

    def tearDown(self):
        self.log.info("==============  PlasmaCollectionsTests tearDown has started ==============")
        super(PlasmaCollectionsTests, self).tearDown()
        try:
            self.reset_data_mount_point(self.index_nodes)
        except Exception as err:
            self.log.info(str(err))
        self.log.info("==============  PlasmaCollectionsTests tearDown has completed ==============")

    def suite_tearDown(self):
        pass

    def suite_setUp(self):
        pass

    def _prepare_collection_for_indexing(self, num_scopes=1, num_collections=1):
        self.keyspace = []
        self.cli_rest.create_scope_collection_count(scope_num=num_scopes, collection_num=num_collections,
                                                    scope_prefix=self.scope_prefix,
                                                    collection_prefix=self.collection_prefix,
                                                    bucket=self.test_bucket)
        self.scopes = self.cli_rest.get_bucket_scopes(bucket=self.test_bucket)
        self.collections = list(set(self.cli_rest.get_bucket_collections(bucket=self.test_bucket)))
        self.scopes.remove('_default')
        self.collections.remove('_default')
        self.sleep(10)
        for s_item in self.scopes:
            for c_item in self.collections:
                self.keyspace.append(f'default:{self.test_bucket}.{s_item}.{c_item}')

    def check_if_indexes_created(self, index_list, defer_build=False):
        indexes_not_created = []
        for index in index_list:
            index_created, status = self.check_if_index_created(index["name"], defer_build)
            if not index_created:
                indexes_not_created.append({"name": index["name"], "status": status})
        return indexes_not_created

    def check_if_indexes_not_created(self, index_list, defer_build=False):
        indexes_created = []
        for index in index_list:
            index_created, status = self.check_if_index_created(index["name"], defer_build)
            if index_created:
                indexes_created.append({"name": index["name"], "status": status})
        return indexes_created

    def check_if_index_recovered(self, index_list):
        indexes_not_recovered = []
        for index in index_list:
            recovered, index_count, collection_itemcount = self._verify_collection_count_with_index_count(index["query_def"])
            if not recovered:
                error_map = {"index_name": index["name"], "index_count": index_count, "bucket_count": collection_itemcount}
                indexes_not_recovered.append(error_map)
        if not indexes_not_recovered:
            self.log.info("All indexes recovered")

        return indexes_not_recovered

    def check_if_indexes_deleted(self, index_list):
        indexes_not_deleted = []
        for index in index_list:
            index_created, status = self.check_if_index_created(index["name"])
            if index_created:
                indexes_not_deleted.append({"name": index["name"], "status": status})
        return indexes_not_deleted

    def verify_index_ops_obj(self):
        indexes_not_created = self.check_if_indexes_created(self.index_ops_obj.get_create_index_list())
        if indexes_not_created:
            self.index_ops_obj.update_errors(f'Expected Created indexes {indexes_not_created} found to be not created')
            self.log.info(f'Expected Created indexes {indexes_not_created} found to be not created')
        if not self.wait_for_mutation_processing(self.index_nodes):
            self.index_ops_obj.update_errors("Some indexes mutation not processed")
        indexes_not_recovered = self.check_if_index_recovered(self.index_ops_obj.get_create_index_list())
        if indexes_not_recovered:
            self.index_ops_obj.update_errors(f'Some Indexes not recovered {indexes_not_recovered}')
            self.log.info(f'Some Indexes not recovered {indexes_not_recovered}')
        indexes_not_deleted = self.check_if_indexes_deleted(self.index_ops_obj.get_delete_index_list())
        if indexes_not_deleted:
            self.index_ops_obj.update_errors(f'Expected Deleted indexes {indexes_not_deleted} found to be not deleted')
            self.log.info(f'Expected Deleted indexes {indexes_not_deleted} found to be not deleted')
        indexes_not_defer_build = self.check_if_indexes_created(index_list=self.index_ops_obj.get_defer_index_list(), defer_build=True)
        if indexes_not_defer_build:
            self.index_ops_obj.update_errors(f'Expected defer build indexes {indexes_not_defer_build} '
                                             f'found to be not in defer_build state')
            self.log.info(f'Expected defer build indexes {indexes_not_defer_build} '
                          f'found to be not in defer_build state')

    def create_indexes(self, num=0, defer_build=False, itr=0, expected_failure=[]):
        query_definition_generator = SQLDefinitionGenerator()
        index_create_tasks = []
        if self.system_failure in self.failure_map.keys():
            expected_failure = self.failure_map[self.system_failure]["expected_failure"]
        self.log.info(threading.currentThread().getName() + " Started")
        if len(self.keyspace) < num:
            num_indexes_collection = math.ceil(num / len(self.keyspace))
        else:
            num_indexes_collection = 1
        for collection_keyspace in self.keyspace:
            if self.run_tasks:
                collection_name = collection_keyspace.split('.')[-1]
                scope_name = collection_keyspace.split('.')[-2]
                query_definitions = query_definition_generator. \
                    generate_employee_data_query_definitions(index_name_prefix='idx_' +
                                                                               scope_name + "_"
                                                                               + collection_name,
                                                             keyspace=collection_keyspace)
                server = random.choice(self.n1ql_nodes)
                index_create_task = ConcurrentIndexCreateTask(server, self.test_bucket, scope_name,
                                          collection_name, query_definitions,
                                          self.index_ops_obj, self.n1ql_helper, num_indexes_collection, defer_build,
                                                              itr, expected_failure)
                self.index_create_task_manager.schedule(index_create_task)
                index_create_tasks.append(index_create_task)
        self.log.info(threading.currentThread().getName() + " Completed")

        return index_create_tasks

    def build_indexes(self):
        self.sleep(5)
        self.log.info(threading.currentThread().getName() + " Started")
        while self.run_tasks:
            index_to_build = self.index_ops_obj.all_indexes_metadata(operation="build")
            if index_to_build:
                query_def = index_to_build["query_def"]
                build_query = query_def.generate_build_query(namespace=query_def.keyspace)
                try:
                    server = random.choice(self.n1ql_nodes)
                    self.run_cbq_query(query=build_query, server=server)
                    self.index_ops_obj.build_complete_add_to_create(index_to_build)
                except Exception as err:
                    if "Build Already In Progress" not in str(err):
                        error_map = {"query": build_query, "error": str(err)}
                        self.index_ops_obj.update_errors(error_map)
            self.sleep(5)
        self.log.info(threading.currentThread().getName() + " Completed")

    def scan_indexes(self):
        self.sleep(5)
        self.log.info(threading.currentThread().getName() + " Started")
        while self.run_tasks:
            index_to_scan = self.index_ops_obj.all_indexes_metadata(operation="scan")
            if index_to_scan:
                self.log.info(f'Processing index: {index_to_scan["name"]}')
                query_def = index_to_scan["query_def"]
                query = query_def.generate_query(bucket=query_def.keyspace)
                try:
                    server = random.choice(self.n1ql_nodes)
                    self.run_cbq_query(query=query, server=server)
                except Exception as err:
                    if "No index available on keyspace" not in str(err):
                        error_map = {"query": query, "error": str(err)}
                        self.index_ops_obj.update_errors(error_map)
        self.log.info(threading.currentThread().getName() + " Completed")

    def drop_indexes(self, drop_sleep, defer_build=None):
        self.sleep(10)
        self.log.info(threading.currentThread().getName() + " Started")
        while self.run_tasks:
            if not defer_build:
                defer_build = random.choice([True, False])
            index_to_delete = self.index_ops_obj.all_indexes_metadata(operation="delete", defer_build=defer_build)
            if index_to_delete:
                query_def = index_to_delete["query_def"]
                drop_query = query_def.generate_index_drop_query(namespace=query_def.keyspace)
                try:
                    server = random.choice(self.n1ql_nodes)
                    self.run_cbq_query(query=drop_query, server=server)
                    self.index_ops_obj.add_to_deleted(index_to_delete)
                except Exception as err:
                    if "the operation will automaticaly retry after cluster is back to normal" not in str(err) \
                            and "Index Not Found" not in str(err):
                        error_map = {"query": drop_query, "error": str(err)}
                        self.index_ops_obj.update_errors(error_map)
            self.sleep(drop_sleep)
        self.log.info(threading.currentThread().getName() + " Completed")

    def compare_indexes_count(self, indexes_count_before, indexes_count_after):
        indexes_with_data_loss = {}
        for index in indexes_count_after:
            if index in indexes_count_before and indexes_count_after[index] < indexes_count_before[index]:
                indexes_with_data_loss[index] = {"failure_iteration": self.failure_iteration,
                                                 "indexes_count_before": indexes_count_before[index],
                                                 "indexes_count_after": indexes_count_after[index]}
        if indexes_with_data_loss:
            self.index_ops_obj.update_errors(indexes_with_data_loss)


    def load_docs(self):
        sdk_data_loader = SDKDataLoader(start_seq_num=self.start_doc, num_ops=self.num_items_in_collection,
                                        percent_create=self.percent_create,
                                        percent_update=self.percent_update, percent_delete=self.percent_delete,
                                        all_collections=self.all_collections, timeout=1800,
                                        json_template=self.dataset_template,
                                        key_prefix=self.key_prefix, shuffle_docs=True)
        for bucket in self.buckets:
            _task = SDKLoadDocumentsTask(self.master, bucket, sdk_data_loader)
            self.sdk_loader_manager.schedule(_task)


    def schedule_system_failure(self):
        self.log.info(threading.currentThread().getName() + " Started")
        self.sleep(10)
        self.failure_iteration = 1
        while self.run_tasks:
            if self.num_failure_iteration and self.failure_iteration > self.num_failure_iteration:
                self.log.info("Reached number of failure iterations")
                self.run_tasks = False
                break
            disk_location = RestConnection(self.index_nodes[0]).get_index_path()
            #indexes_count_before = self.get_server_indexes_count(self.index_nodes)
            self.index_ops_obj.update_ignore_failure_flag(True)
            system_failure_task = NodesFailureTask(self.master, self.index_nodes, self.system_failure, 300, 0, False, 3,
                                                   disk_location=disk_location, failure_timeout=self.failure_timeout)
            self.system_failure_task_manager.schedule(system_failure_task)
            try:
                system_failure_task.result()
            except Exception as e:
                self.log.info("Exception: {}".format(e))

            self.sleep(300, "wait for 5 mins after system failure for service to recover")
            self.index_ops_obj.update_ignore_failure_flag(False)
            self.sleep(self.failure_recover_sleep, "wait for {} secs more before collecting index count".format(self.failure_recover_sleep))

            #indexes_count_after = self.get_server_indexes_count(self.index_nodes)
            #self.compare_indexes_count(indexes_count_before, indexes_count_after)
            #self.log.info(indexes_count_before)
            #self.log.info(indexes_count_after)

            self.failure_iteration += 1

        self.log.info(threading.currentThread().getName() + " Completed")

    def induce_schedule_system_failure(self, failure_task):
        if failure_task:
            self.log.info(threading.currentThread().getName() + " Started")
            self.sleep(10)

            system_failure_task = NodesFailureTask(self.master, self.index_nodes, failure_task, 300, 0, False, 3,
                                                   disk_location=self.disk_location, failure_timeout=self.failure_timeout)
            self.system_failure_task_manager.schedule(system_failure_task)
            try:
                system_failure_task.result()
            except Exception as e:
                self.log.info("Exception: {}".format(e))

            self.log.info(threading.currentThread().getName() + " Completed")

    def get_num_compaction_per_node_initialized(self, initial_meta_store_size):
        num_compaction_per_node = {}
        for k in initial_meta_store_size:
            num_compaction_per_node[k['nodeip']] = 0
        return num_compaction_per_node

    def get_initial_meta_store_size(self, nodeip, initial_meta_store_size):
        for n in initial_meta_store_size:
            if n['nodeip'] == nodeip:
                return n['metastore_size']

    def verify_fdb_compaction(self):
        self.log.info(threading.currentThread().getName() + " Started")
        self.sleep(10)
        initial_meta_store_size = self.get_size_of_metastore_file()
        num_compaction_per_node = self.get_num_compaction_per_node_initialized(initial_meta_store_size)
        self.log.info(f'initial_meta_store_size: {initial_meta_store_size}')
        while self.run_tasks:
            meta_store_size = self.get_size_of_metastore_file()
            self.log.info(f'meta_store_size: {meta_store_size}')
            for k in meta_store_size:
                if int(k['metastore_size']) < int(self.get_initial_meta_store_size(k['nodeip'], initial_meta_store_size)):
                    num_compaction_per_node[k['nodeip']] += 1
            initial_meta_store_size = meta_store_size
            self.sleep(5)
        self.log.info(f'Number of autocompaction of fdb : {num_compaction_per_node}')

        for v in num_compaction_per_node.values():
            if v < 1:
                self.test_fail = True

        self.log.info(threading.currentThread().getName() + " Completed")

    def test_system_failure_create_drop_indexes(self):
        self.test_fail = False
        self.errors = []
        self.index_create_task_manager = TaskManager(
            "index_create_task_manager")
        self.index_create_task_manager.start()
        self.system_failure_task_manager = TaskManager(
            "system_failure_detector_thread")
        self.system_failure_task_manager.start()
        self.sdk_loader_manager = TaskManager(
            "sdk_loader_manager")
        self.sdk_loader_manager.start()
        if self.num_failure_iteration:
            self.test_timeout = self.failure_timeout * len(self.index_nodes)

        self._prepare_collection_for_indexing(num_scopes=self.num_scopes, num_collections=self.num_collections)
        self.run_tasks = True

        index_create_tasks = self.create_indexes(num=self.num_pre_indexes)
        for task in index_create_tasks:
            task.result()

        load_doc_thread = threading.Thread(name="load_doc_thread",
                                           target=self.load_docs)
        load_doc_thread.start()

        self.sleep(60, "sleeping for 60 sec for index to start processing docs")

        if not self.check_if_indexes_in_dgm():
            self.log.error("indexes not in dgm even after {}".format(self.dgm_check_timeout))

        if self.concur_create_indexes:
            create_thread = threading.Thread(name="create_thread",
                                             target=self.create_indexes,
                                             args=(self.num_of_indexes, "", 30))
            self.tasks.append(create_thread)

        if self.concur_drop_indexes:
            drop_thread = threading.Thread(name="drop_thread",
                                           target=self.drop_indexes,
                                           args=[self.drop_sleep])
            self.tasks.append(drop_thread)

        if self.concur_build_indexes:
            build_index_thread = threading.Thread(name="build_index_thread",
                                                  target=self.build_indexes)
            self.tasks.append(build_index_thread)

        if self.concur_scan_indexes:
            scan_thread = threading.Thread(name="scan_thread",
                                           target=self.scan_indexes)

            self.tasks.append(scan_thread)

        if self.concur_system_failure:
            system_failure_thread = threading.Thread(name="system_failure_thread",
                                                     target=self.schedule_system_failure)
            self.tasks.append(system_failure_thread)

        for task in self.tasks:
            task.start()

        self.tasks.append(load_doc_thread)

        self.sleep(self.test_timeout)

        self.run_tasks = False

        self.index_ops_obj.update_stop_create_index(True)
        self.kill_loader_process()
        self.sdk_loader_manager.shutdown(True)
        self.index_create_task_manager.shutdown(True)
        self.system_failure_task_manager.shutdown(True)

        for task in self.tasks:
            task.join()

        self.wait_until_indexes_online()
        self.sleep(600, "sleep for 10 mins before validation")
        self.verify_index_ops_obj()

        self.n1ql_helper.drop_all_indexes_on_keyspace()

        if self.index_ops_obj.get_errors():
            self.fail(str(self.index_ops_obj.get_errors()))


    def test_system_failure_create_drop_indexes_simple(self):
        self.test_fail = False
        self.concur_system_failure = self.input.param("concur_system_failure", False)
        self.errors = []
        self.index_create_task_manager = TaskManager(
            "index_create_task_manager")
        self.index_create_task_manager.start()
        self.system_failure_task_manager = TaskManager(
            "system_failure_detector_thread")
        self.system_failure_task_manager.start()
        self.sdk_loader_manager = TaskManager(
            "sdk_loader_manager")
        self.sdk_loader_manager.start()
        if self.num_failure_iteration:
            self.test_timeout = self.failure_timeout * len(self.index_nodes)

        self._prepare_collection_for_indexing(num_scopes=self.num_scopes, num_collections=self.num_collections)
        self.run_tasks = True

        index_create_tasks = self.create_indexes(num=self.num_pre_indexes)
        for task in index_create_tasks:
            task.result()

        load_doc_thread = threading.Thread(name="load_doc_thread",
                                           target=self.load_docs)
        load_doc_thread.start()

        self.sleep(60, "sleeping for 60 sec for index to start processing docs")

        if not self.check_if_indexes_in_dgm():
            self.log.error("indexes not in dgm even after {}".format(self.dgm_check_timeout))

        if self.concur_system_failure:
            system_failure_thread = threading.Thread(name="system_failure_thread",
                                                     target=self.induce_schedule_system_failure,
                                                     args=[self.failure_map[self.system_failure]["failure_task"]])
            system_failure_thread.start()
            self.sleep(20)
        else:
            self.induce_schedule_system_failure(self.failure_map[self.system_failure]["failure_task"])
            self.sleep(90, "sleeping for  mins for mutation processing during system failure ")

        if self.simple_create_index:
            index_create_tasks = self.create_indexes(num=1,defer_build=False,itr=300,
                                                     expected_failure=self.failure_map[self.system_failure]["expected_failure"])
            for task in index_create_tasks:
                task.result()
            self.sleep(60, "sleeping for 1 min after creation of indexes")

        if self.simple_drop_index:
            task_thread = threading.Thread(name="drop_thread",
                                           target=self.drop_indexes,
                                           args=(2, False))
            task_thread.start()
            self.sleep(15, "sleeping for 15 sec")
            self.run_tasks = False
            task_thread.join()

        if self.simple_scan_index:
            self.run_tasks = True
            task_thread = threading.Thread(name="scan_thread",
                                           target=self.scan_indexes)
            task_thread.start()
            self.sleep(30, "sleeping for 10 sec")
            self.run_tasks = False
            task_thread.join()

        if self.simple_kill_indexer:
            remote = RemoteMachineShellConnection(self.index_nodes[0])
            remote.terminate_process(process_name="indexer")
            self.sleep(60, "sleeping for 60 sec for indexer to come back")

        if self.simple_kill_memcached:
            remote = RemoteMachineShellConnection(self.data_nodes[1])
            remote.kill_memcached()
            self.sleep(60, "sleeping for 60 sec for memcached to come back")

        if self.concur_system_failure:
            system_failure_thread.join()
        else:
            self.induce_schedule_system_failure(self.failure_map[self.system_failure]["recover_task"])
        self.index_ops_obj.update_stop_create_index(True)
        self.kill_loader_process()
        self.sdk_loader_manager.shutdown(True)
        self.index_create_task_manager.shutdown(True)
        self.system_failure_task_manager.shutdown(True)

        self.wait_until_indexes_online()
        self.sleep(120, "sleep for 120 secs before validation")
        self.verify_index_ops_obj()

        self.n1ql_helper.drop_all_indexes_on_keyspace()

        if self.index_ops_obj.get_errors():
            self.fail(str(self.index_ops_obj.get_errors()))

    def test_kill_indexer_create_drop_indexes_simple(self):
        self.test_fail = False
        self.concur_system_failure = self.input.param("concur_system_failure", False)
        self.errors = []
        self.index_create_task_manager = TaskManager(
            "index_create_task_manager")
        self.index_create_task_manager.start()
        self.system_failure_task_manager = TaskManager(
            "system_failure_detector_thread")
        self.system_failure_task_manager.start()
        self.sdk_loader_manager = TaskManager(
            "sdk_loader_manager")
        self.sdk_loader_manager.start()
        if self.num_failure_iteration:
            self.test_timeout = self.failure_timeout * len(self.index_nodes)

        self._prepare_collection_for_indexing(num_scopes=self.num_scopes, num_collections=self.num_collections)
        self.run_tasks = True

        index_create_tasks = self.create_indexes(num=self.num_pre_indexes)
        for task in index_create_tasks:
            task.result()

        load_doc_thread = threading.Thread(name="load_doc_thread",
                                           target=self.load_docs)
        load_doc_thread.start()

        self.sleep(60, "sleeping for 60 sec for index to start processing docs")

        if not self.check_if_indexes_in_dgm():
            self.log.error("indexes not in dgm even after {}".format(self.dgm_check_timeout))

        index_create_tasks = self.create_indexes(itr=300, num=25)

        self.kill_index = True
        index_node = self.get_nodes_from_services_map(service_type="index")
        system_failure_thread = threading.Thread(name="kill_indexer_thread",
                                                 target=self._kill_all_processes_index_with_sleep,
                                                 args=(index_node, 1, 600))
        system_failure_thread.start()

        for task in index_create_tasks:
            task.result()

        self.kill_index = False
        self.index_ops_obj.update_stop_create_index(True)
        self.kill_loader_process()
        self.sdk_loader_manager.shutdown(True)
        self.index_create_task_manager.shutdown(True)
        self.system_failure_task_manager.shutdown(True)
        system_failure_thread.join()

        self.wait_until_indexes_online()
        self.sleep(120, "sleep for 120 secs before validation")
        self.verify_index_ops_obj()

        self.n1ql_helper.drop_all_indexes_on_keyspace()

        if self.index_ops_obj.get_errors():
            self.fail(str(self.index_ops_obj.get_errors()))

    def test_shard_json_corruption(self):
        self.test_fail = False
        self.concur_system_failure = self.input.param("concur_system_failure", False)
        self.errors = []
        self.index_create_task_manager = TaskManager(
            "index_create_task_manager")
        self.index_create_task_manager.start()
        self.system_failure_task_manager = TaskManager(
            "system_failure_detector_thread")
        self.system_failure_task_manager.start()
        self.sdk_loader_manager = TaskManager(
            "sdk_loader_manager")
        self.sdk_loader_manager.start()
        if self.num_failure_iteration:
            self.test_timeout = self.failure_timeout * len(self.index_nodes)

        self._prepare_collection_for_indexing(num_scopes=self.num_scopes, num_collections=self.num_collections)
        self.run_tasks = True

        index_create_tasks = self.create_indexes(num=self.num_pre_indexes)
        for task in index_create_tasks:
            task.result()

        load_doc_thread = threading.Thread(name="load_doc_thread",
                                           target=self.load_docs)
        load_doc_thread.start()

        self.sleep(60, "sleeping for 60 sec for index to start processing docs")

        #if not self.check_if_indexes_in_dgm():
            #self.log.error("indexes not in dgm even after {}".format(self.dgm_check_timeout))

        self.kill_loader_process()
        self.wait_for_mutation_processing(self.index_nodes)

        self.induce_schedule_system_failure(self.failure_map[self.system_failure]["failure_task"])
        self.sleep(90, "sleeping for  mins for mutation processing during system failure ")

        remote = RemoteMachineShellConnection(self.index_nodes[0])
        remote.terminate_process(process_name="indexer")
        self.sleep(60, "sleeping for 60 sec for indexer to come back")

        self.index_ops_obj.update_stop_create_index(True)
        self.sdk_loader_manager.shutdown(True)
        self.index_create_task_manager.shutdown(True)
        self.system_failure_task_manager.shutdown(True)

        self.wait_until_indexes_online()
        indexes_created = self.check_if_indexes_not_created(self.index_ops_obj.get_create_index_list())
        if indexes_created:
            self.fail(f'{indexes_created} are not dropped')

        if self.check_if_shard_exists("shard1", self.index_nodes[0]):
            self.fail('shard1 is not cleaned on disk')

    def test_autocompaction_forestdb(self):
        self.run_tasks = True
        self.test_fail = False
        self.index_create_task_manager = TaskManager(
            "index_create_task_manager")
        self.index_create_task_manager.start()

        self._prepare_collection_for_indexing(num_scopes=self.num_scopes, num_collections=self.num_collections)

        sdk_data_loader = SDKDataLoader(start_seq_num=self.start_doc, num_ops=self.num_items_in_collection,
                                        percent_create=self.percent_create,
                                        percent_update=self.percent_update, percent_delete=self.percent_delete,
                                        all_collections=self.all_collections, timeout=self.test_timeout,
                                        json_template=self.dataset_template)

        self.data_ops_javasdk_loader_in_batches(sdk_data_loader, self.batch_size)

        create_thread = threading.Thread(name="create_thread",
                                         target=self.create_indexes,
                                         args=(self.num_of_indexes, False))
        self.tasks.append(create_thread)

        drop_thread = threading.Thread(name="drop_thread",
                                       target=self.drop_indexes,
                                       args=[self.drop_sleep])
        self.tasks.append(drop_thread)

        verify_fdb_compaction = threading.Thread(name="verify_fdb_compaction",
                                                 target=self.verify_fdb_compaction)
        self.tasks.append(verify_fdb_compaction)

        self.run_tasks = True

        for task in self.tasks:
            task.start()

        self.sleep(self.test_timeout)

        self.run_tasks = False

        for task in self.tasks:
            task.join()

        self.index_ops_obj.update_stop_create_index(True)
        self.index_create_task_manager.shutdown(True)


        if self.test_fail:
            self.fail("Auto compaction did not trigger for expected number of times")
Exemple #17
0
class ElasticSearchBase(object):

    def __init__(self, host, logger):
        #host is in the form IP address
        self.__log = logger
        self.__host = host
        self.__document = {}
        self.__mapping = {}
        self.__STATUSOK = 200
        self.__indices = []
        self.__index_types = {}
        self.__connection_url = 'http://{0}:{1}/'.format(self.__host.ip,
                                                        self.__host.port)
        self.es_queries = []
        self.task_manager = TaskManager("ES_Thread")
        self.task_manager.start()
        self.http = httplib2.Http

    def _http_request(self, api, method='GET', params='', headers=None,
                      timeout=30):
        if not headers:
            headers = {'Content-Type': 'application/json',
                       'Accept': '*/*'}
        try:
            response, content = httplib2.Http(timeout=timeout).request(api,
                                                                       method,
                                                                       params,
                                                                       headers)
            if response['status'] in ['200', '201', '202']:
                return True, content, response
            else:
                try:
                    json_parsed = json.loads(content)
                except ValueError as e:
                    json_parsed = {}
                    json_parsed["error"] = "status: {0}, content: {1}".\
                        format(response['status'], content)
                reason = "unknown"
                if "error" in json_parsed:
                    reason = json_parsed["error"]
                self.__log.error('{0} error {1} reason: {2} {3}'.format(
                    api,
                    response['status'],
                    reason,
                    content.rstrip('\n')))
                return False, content, response
        except socket.error as e:
            self.__log.error("socket error while connecting to {0} error {1} ".
                             format(api, e))
            raise ServerUnavailableException(ip=self.__host.ip)

    def is_running(self):
        """
         make sure ES is up and running
         check the service is running , if not abort the test
        """

        try:
            status, content, _ = self._http_request(
                self.__connection_url,
                'GET')
            if status:
                return True
            else:
                return False
        except Exception as e:
            raise e

    def delete_index(self, index_name):
        """
        Deletes index
        """
        try:
            url = self.__connection_url + index_name
            status, content, _ = self._http_request(url, 'DELETE')
        except Exception as e:
            raise e

    def delete_indices(self):
        """
        Delete all indices present
        """
        for index_name in self.__indices:
            self.delete_index(index_name)
            self.__log.info("ES index %s deleted" % index_name)

    def create_empty_index(self, index_name):
        """
        Creates an empty index, given the name
        """
        try:
            self.delete_index(index_name)
            status, content, _ = self._http_request(
                self.__connection_url + index_name,
                'PUT')
            if status:
                self.__indices.append(index_name)
        except Exception as e:
            raise Exception("Could not create ES index : %s" % e)

    def create_alias(self, name, indexes):
        """
        @name: alias name
        @indexes: list of target indexes
        """
        try:
            self.__log.info("Checking if ES alias '{0}' exists...".format(name))
            self.delete_index(name)
            alias_info = {"actions": [{"add": {"indices": indexes, "alias": name}}]}
            self.__log.info("Creating ES alias '{0}' on {1}...".format(
                name,
                indexes))
            status, content, _ = self._http_request(
                self.__connection_url + name,
                'POST',
                json.dumps(alias_info))
            if status:
                self.__log.info("ES alias '{0}' created".format(name))
                self.__indices.append(name)
        except Exception as e:
            raise Exception("Could not create ES alias : %s" % e)

    def async_load_ES(self, index_name, gen, op_type='create'):
        """
        Asynchronously run query against FTS and ES and compare result
        note: every task runs a single query
        """

        _task = ESLoadGeneratorTask(es_instance=self,
                                    index_name=index_name,
                                    generator=gen,
                                    op_type=op_type)
        self.task_manager.schedule(_task)
        return _task

    def async_bulk_load_ES(self, index_name, gen, op_type='create', batch=5000):
        _task = ESBulkLoadGeneratorTask(es_instance=self,
                                    index_name=index_name,
                                    generator=gen,
                                    op_type=op_type,
                                    batch=batch)
        self.task_manager.schedule(_task)
        return _task

    def load_bulk_data(self, filename):
        """
        Bulk load to ES from a file
        curl -s -XPOST 172.23.105.25:9200/_bulk --data-binary @req
        cat req:
        { "index" : { "_index" : "default_es_index", "_type" : "aruna", "_id" : "1" } }
        { "field1" : "value1" , "field2" : "value2"}
        { "index" : { "_index" : "default_es_index", "_type" : "aruna", "_id" : "2" } }
        { "field1" : "value1" , "field2" : "value2"}
        """
        try:
            import os
            url = self.__connection_url + "/_bulk"
            data = open(filename, "rb").read()
            status, content, _ = self._http_request(url,
                                                    'POST',
                                                    data)
            return status
        except Exception as e:
            raise e

    def load_data(self, index_name, document_json, doc_type, doc_id):
        """
        index_name : name of index into which the doc is loaded
        document_json: json doc
        doc_type : type of doc. Usually the '_type' field in the doc body
        doc_id : document id
        """
        try:
            url = self.__connection_url + index_name + '/' + doc_type + '/' +\
                  doc_id
            status, content, _ = self._http_request(url,
                                                    'POST',
                                                    document_json)
        except Exception as e:
            raise e

    def update_index(self, index_name):
        """
        This procedure will refresh index when insert is performed .
        Need to call this API to take search in effect.
        :param index_name:
        :return:
        """
        try:
            status, content, _ = self._http_request(
                self.__connection_url + index_name +'/_refresh',
                'POST')
        except Exception as e:
            raise e

    def search(self, index_name, query, result_size=1000000):
        """
           This function will be used for search . based on the query
           :param index_name:
           :param query:
           :return: number of matches found, doc_ids and time taken
        """
        try:
            doc_ids = []
            url = self.__connection_url + index_name + '/_search?size='+ \
                  str(result_size)
            status, content, _ = self._http_request(
                url,
                'POST',
                json.dumps(query))
            if status:
                content = json.loads(content)
                for doc in content['hits']['hits']:
                    doc_ids.append(doc['_id'])
                return content['hits']['total'], doc_ids, content['took']
        except Exception as e:
            self.__log.error("Couldn't run query on ES: %s, reason : %s"
                             % (json.dumps(query), e))
            raise e

    def get_index_count(self, index_name):
        """
         Returns count of docs in the index
        """
        try:
            status, content, _ = self._http_request(
                self.__connection_url + index_name + '/_count',
                'POST')
            if status:
                return json.loads(content)['count']
        except Exception as e:
            raise e

    def get_indices(self):
        """
        Return all the indices created
        :return: List of all indices
        """
        return self.__indices
    def test_system_failure_create_drop_indexes(self):
        self.test_fail = False
        self.errors = []
        self.index_create_task_manager = TaskManager(
            "index_create_task_manager")
        self.index_create_task_manager.start()
        self.system_failure_task_manager = TaskManager(
            "system_failure_detector_thread")
        self.system_failure_task_manager.start()
        self.sdk_loader_manager = TaskManager(
            "sdk_loader_manager")
        self.sdk_loader_manager.start()
        if self.num_failure_iteration:
            self.test_timeout = self.failure_timeout * len(self.index_nodes)

        self._prepare_collection_for_indexing(num_scopes=self.num_scopes, num_collections=self.num_collections)
        self.run_tasks = True

        index_create_tasks = self.create_indexes(num=self.num_pre_indexes)
        for task in index_create_tasks:
            task.result()

        load_doc_thread = threading.Thread(name="load_doc_thread",
                                           target=self.load_docs)
        load_doc_thread.start()

        self.sleep(60, "sleeping for 60 sec for index to start processing docs")

        if not self.check_if_indexes_in_dgm():
            self.log.error("indexes not in dgm even after {}".format(self.dgm_check_timeout))

        if self.concur_create_indexes:
            create_thread = threading.Thread(name="create_thread",
                                             target=self.create_indexes,
                                             args=(self.num_of_indexes, "", 30))
            self.tasks.append(create_thread)

        if self.concur_drop_indexes:
            drop_thread = threading.Thread(name="drop_thread",
                                           target=self.drop_indexes,
                                           args=[self.drop_sleep])
            self.tasks.append(drop_thread)

        if self.concur_build_indexes:
            build_index_thread = threading.Thread(name="build_index_thread",
                                                  target=self.build_indexes)
            self.tasks.append(build_index_thread)

        if self.concur_scan_indexes:
            scan_thread = threading.Thread(name="scan_thread",
                                           target=self.scan_indexes)

            self.tasks.append(scan_thread)

        if self.concur_system_failure:
            system_failure_thread = threading.Thread(name="system_failure_thread",
                                                     target=self.schedule_system_failure)
            self.tasks.append(system_failure_thread)

        for task in self.tasks:
            task.start()

        self.tasks.append(load_doc_thread)

        self.sleep(self.test_timeout)

        self.run_tasks = False

        self.index_ops_obj.update_stop_create_index(True)
        self.kill_loader_process()
        self.sdk_loader_manager.shutdown(True)
        self.index_create_task_manager.shutdown(True)
        self.system_failure_task_manager.shutdown(True)

        for task in self.tasks:
            task.join()

        self.wait_until_indexes_online()
        self.sleep(600, "sleep for 10 mins before validation")
        self.verify_index_ops_obj()

        self.n1ql_helper.drop_all_indexes_on_keyspace()

        if self.index_ops_obj.get_errors():
            self.fail(str(self.index_ops_obj.get_errors()))
Exemple #19
0
 def __init__(self):
     self.task_manager = TaskManager("Cluster_Thread")
     self.task_manager.start()
class AutoFailoverAbortsRebalance(AutoFailoverBaseTest, BaseTestCase):
    MAX_FAIL_DETECT_TIME = 120
    ORCHESTRATOR_TIMEOUT_BUFFER = 60

    def setUp(self):
        super(AutoFailoverAbortsRebalance, self).setUp()
        self.master = self.servers[0]
        self._get_params()
        self.rest = RestConnection(self.orchestrator)
        self.task_manager = TaskManager("Autofailover_thread")
        self.task_manager.start()
        self.node_failure_task_manager = TaskManager("Nodes_failure_detector_thread")
        self.node_failure_task_manager.start()
        node_ram_ratio = BucketOperationHelper.base_bucket_ratio(self.servers)
        self.num_buckets = self.num_buckets - 1  # this is done as default is created by base class
        if self.num_buckets:
            BucketOperationHelper.create_multiple_buckets(self.master, self.num_replicas, node_ram_ratio * (2.0 / 3.0),
                                                          howmany=self.num_buckets)
        self.buckets = self.rest.get_buckets()
        for bucket in self.buckets:
            ready = BucketOperationHelper.wait_for_memcached(self.master, bucket.name)
            self.assertTrue(ready, "wait_for_memcached failed")
        self.initial_load_gen = BlobGenerator('auto-failover',
                                              'auto-failover-',
                                              self.value_size,
                                              end=self.num_items)
        self.update_load_gen = BlobGenerator('auto-failover',
                                             'auto-failover-',
                                             self.value_size,
                                             end=self.update_items)
        self.delete_load_gen = BlobGenerator('auto-failover',
                                             'auto-failover-',
                                             self.value_size,
                                             start=self.update_items,
                                             end=self.delete_items)
        self._load_all_buckets(self.servers[0], self.initial_load_gen,
                               "create", 0)
        self._async_load_all_buckets(self.orchestrator,
                                     self.update_load_gen, "update", 0)
        self._async_load_all_buckets(self.orchestrator,
                                     self.delete_load_gen, "delete", 0)

    def tearDown(self):
        super(AutoFailoverAbortsRebalance, self).tearDown()

    def test_failure_scenarios_during_rebalance_in_of_node_A(self):
        # enable auto failover and canAbortRebalance
        self.enable_autofailover_and_validate()
        self.sleep(5)
        # Start rebalance in
        rebalance_task = self.cluster.async_rebalance(self.servers,
                                                      self.servers_to_add,
                                                      self.servers_to_remove)
        reached = RestHelper(self.rest).rebalance_reached(percentage=30)
        self.assertTrue(reached, "Rebalance failed or did not reach {0}%".format(30))
        try:
            # Do a fail over action - reboot, hang, kill. This is defined in the conf file
            self.failover_actions[self.failover_action](self)
            rebalance_task.result()
        except Exception as ex:
            self.log.info("Rebalance failed with : {0}".format(str(ex)))
            if "Rebalance failed. See logs for detailed reason. You can try again" in str(ex):
                self.log.info(
                    "Rebalance failed even before auto-failover had a chance to stop it self.server_to_fail.ip: {0}".format(
                        str(ex)))
            elif not RestHelper(self.rest).is_cluster_rebalanced():
                if self._auto_failover_message_present_in_logs(self.server_to_fail[0].ip):
                    self.log.info("Rebalance interrupted due to auto-failover of nodes - message was seen in logs")
                else:
                    self.fail("Rebalance interrupted message was not seen in logs")
            else:
                self.fail("Rebalance was not aborted by auto fail-over")
        # Reset auto failover settings
        self.disable_autofailover_and_validate()

    def test_failure_scenarios_during_recovery_of_node_A(self):
        self.recovery_type = self.input.param("recovery_type", 'full')
        # enable auto failover and canAbortRebalance
        self.enable_autofailover_and_validate()
        self.sleep(5)
        # do a graceful failover
        self.cluster.failover([self.master], failover_nodes=[self.servers[self.server_index_to_fail]], graceful=True)
        # wait for failover to complete
        self.wait_for_failover_or_assert(1, 500)
        # do a delta recovery
        self.rest.set_recovery_type(otpNode='ns_1@' + self.servers[self.server_index_to_fail].ip,
                                    recoveryType=self.recovery_type)
        # Start rebalance of recovered nodes
        rebalance_task = self.cluster.async_rebalance(self.servers, [], [])
        reached = RestHelper(self.rest).rebalance_reached(percentage=30)
        self.assertTrue(reached, "Rebalance failed or did not reach {0}%".format(30))
        try:
            # Do a fail over action - reboot, hang, kill. This is defined in the conf file
            self.failover_actions[self.failover_action](self)
            rebalance_task.result()
        except Exception as ex:
            self.log.info("Rebalance failed with : {0}".format(str(ex)))
            if "Rebalance failed. See logs for detailed reason. You can try again" in str(ex):
                self.log.info(
                    "Rebalance failed even before auto-failover had a chance to stop it self.server_to_fail.ip: {0}".format(
                        str(ex)))
            elif not RestHelper(self.rest).is_cluster_rebalanced():
                if self._auto_failover_message_present_in_logs(self.server_to_fail[0].ip):
                    self.log.info("Rebalance interrupted due to auto-failover of nodes - message was seen in logs")
                else:
                    self.fail("Rebalance interrupted message was not seen in logs")
            else:
                self.fail("Rebalance was not aborted by auto fail-over")
        # Reset auto failover settings
        self.disable_autofailover_and_validate()

    def test_failure_scenarios_during_rebalance_out_of_node_A(self):
        # enable auto failover and canAbortRebalance
        self.enable_autofailover_and_validate()
        self.sleep(5)
        # Start rebalance out
        rebalance_task = self.cluster.async_rebalance(self.servers,
                                                      [],
                                                      [self.servers[self.server_index_to_fail]])
        reached = RestHelper(self.rest).rebalance_reached(percentage=30)
        self.assertTrue(reached, "Rebalance failed or did not reach {0}%".format(30))
        try:
            # Do a fail over action - reboot, hang, kill. This is defined in the conf file
            self.failover_actions[self.failover_action](self)
            rebalance_task.result()
        except Exception as ex:
            self.log.info("Rebalance failed with : {0}".format(str(ex)))
            if "Rebalance failed. See logs for detailed reason. You can try again" in str(ex):
                self.log.info(
                    "Rebalance failed even before auto-failover had a chance to stop it self.server_to_fail.ip: {0}".format(
                        str(ex)))
            elif not RestHelper(self.rest).is_cluster_rebalanced():
                if self._auto_failover_message_present_in_logs(self.server_to_fail[0].ip):
                    self.log.info("Rebalance interrupted due to auto-failover of nodes - message was seen in logs")
                else:
                    self.fail("Rebalance interrupted message was not seen in logs")
            else:
                self.fail("Rebalance was not aborted by auto fail-over")
        # Reset auto failover settings
        self.disable_autofailover_and_validate()

    def test_failure_scenarios_during_rebalance_out_of_failedover_node_A(self):
        # enable auto failover and canAbortRebalance
        self.enable_autofailover_and_validate()
        # failover a node
        self.cluster.failover([self.master], failover_nodes=[self.servers[self.server_index_to_fail]], graceful=False)
        # wait for failover to complete
        self.wait_for_failover_or_assert(1, 500)
        # Start rebalance out
        rebalance_task = self.cluster.async_rebalance(self.servers,
                                                      [],
                                                      [self.servers[self.server_index_to_fail]])
        reached = RestHelper(self.rest).rebalance_reached(percentage=30)
        self.assertTrue(reached, "Rebalance failed or did not reach {0}%".format(30))
        try:
            # Do a fail over action - reboot, hang, kill. This is defined in the conf file
            self.failover_actions[self.failover_action](self)
            rebalance_task.result()
        except Exception as ex:
            self.log.info("Rebalance failed with : {0}".format(str(ex)))
            if "Rebalance failed. See logs for detailed reason. You can try again" in str(ex):
                self.fail("Rebalance failed when it was not expected to fail".format(str(ex)))
            elif not RestHelper(self.rest).is_cluster_rebalanced():
                if self._auto_failover_message_present_in_logs(self.server_to_fail[0].ip):
                    self.fail("Rebalance interrupted due to auto-failover of nodes - It was not expected")
                else:
                    self.log.info("Rebalance was not interrupted as expected")
            else:
                self.log.info("Rebalance completes successfully")
        # Reset auto failover settings
        self.disable_autofailover_and_validate()

    def test_failure_scenarios_during_rebalance_out_of_failedover_other_than_node_A(self):
        self.server_index_to_failover = self.input.param("server_index_to_failover", None)
        # enable auto failover and canAbortRebalance
        self.enable_autofailover_and_validate()
        # failover a node
        self.cluster.failover([self.master], failover_nodes=[self.servers[self.server_index_to_failover]],
                              graceful=False)
        self.sleep(5)
        # Start rebalance out
        rebalance_task = self.cluster.async_rebalance(self.servers,
                                                      [],
                                                      [self.servers[self.server_index_to_failover]])
        reached = RestHelper(self.rest).rebalance_reached(percentage=30)
        self.assertTrue(reached, "Rebalance failed or did not reach {0}%".format(30))
        try:
            # Do a fail over action - reboot, hang, kill. This is defined in the conf file
            self.failover_actions[self.failover_action](self)
            rebalance_task.result()
        except Exception as ex:
            self.log.info("Rebalance failed with : {0}".format(str(ex)))
            if "Rebalance failed. See logs for detailed reason. You can try again" in str(ex):
                self.log.info(
                    "Rebalance failed even before auto-failover had a chance to stop it self.server_to_fail.ip: {0}".format(
                        str(ex)))
            elif not RestHelper(self.rest).is_cluster_rebalanced():
                if self._auto_failover_message_present_in_logs(self.server_to_fail[0].ip):
                    self.log.info("Rebalance interrupted due to auto-failover of nodes - message was seen in logs")
                else:
                    self.fail("Rebalance interrupted message was not seen in logs")
            else:
                self.fail("Rebalance was not aborted by auto fail-over")
        # Reset auto failover settings
        self.disable_autofailover_and_validate()

    def test_failure_scenarios_during_recovery_of_node_other_than_node_A(self):
        self.server_index_to_failover = self.input.param("server_index_to_failover", None)
        self.recovery_type = self.input.param("recovery_type", 'full')
        # enable auto failover and canAbortRebalance
        self.enable_autofailover_and_validate()
        self.sleep(5)
        # do a graceful failover
        self.cluster.failover([self.master], failover_nodes=[self.servers[self.server_index_to_failover]],
                              graceful=True)
        # wait for failover to complete
        self.wait_for_failover_or_assert(1, 500)
        # do a delta recovery
        self.rest.set_recovery_type(otpNode='ns_1@' + self.servers[self.server_index_to_failover].ip,
                                    recoveryType=self.recovery_type)
        # Start rebalance of recovered nodes
        rebalance_task = self.cluster.async_rebalance(self.servers, [], [])
        reached = RestHelper(self.rest).rebalance_reached(percentage=30)
        self.assertTrue(reached, "Rebalance failed or did not reach {0}%".format(30))
        try:
            # Do a fail over action - reboot, hang, kill. This is defined in the conf file
            self.failover_actions[self.failover_action](self)
            rebalance_task.result()
        except Exception as ex:
            self.log.info("Rebalance failed with : {0}".format(str(ex)))
            if "Rebalance failed. See logs for detailed reason. You can try again" in str(ex):
                self.log.info(
                    "Rebalance failed even before auto-failover had a chance to stop it self.server_to_fail.ip: {0}".format(
                        str(ex)))
            elif not RestHelper(self.rest).is_cluster_rebalanced():
                if self._auto_failover_message_present_in_logs(self.server_to_fail[0].ip):
                    self.log.info("Rebalance interrupted due to auto-failover of nodes - message was seen in logs")
                else:
                    self.fail("Rebalance interrupted message was not seen in logs")
            else:
                self.fail("Rebalance was not aborted by auto fail-over")
        # Reset auto failover settings
        self.disable_autofailover_and_validate()
    def test_system_failure_create_drop_indexes_simple(self):
        self.test_fail = False
        self.concur_system_failure = self.input.param("concur_system_failure", False)
        self.errors = []
        self.index_create_task_manager = TaskManager(
            "index_create_task_manager")
        self.index_create_task_manager.start()
        self.system_failure_task_manager = TaskManager(
            "system_failure_detector_thread")
        self.system_failure_task_manager.start()
        self.sdk_loader_manager = TaskManager(
            "sdk_loader_manager")
        self.sdk_loader_manager.start()
        if self.num_failure_iteration:
            self.test_timeout = self.failure_timeout * len(self.index_nodes)

        self._prepare_collection_for_indexing(num_scopes=self.num_scopes, num_collections=self.num_collections)
        self.run_tasks = True

        index_create_tasks = self.create_indexes(num=self.num_pre_indexes)
        for task in index_create_tasks:
            task.result()

        load_doc_thread = threading.Thread(name="load_doc_thread",
                                           target=self.load_docs)
        load_doc_thread.start()

        self.sleep(60, "sleeping for 60 sec for index to start processing docs")

        if not self.check_if_indexes_in_dgm():
            self.log.error("indexes not in dgm even after {}".format(self.dgm_check_timeout))

        if self.concur_system_failure:
            system_failure_thread = threading.Thread(name="system_failure_thread",
                                                     target=self.induce_schedule_system_failure,
                                                     args=[self.failure_map[self.system_failure]["failure_task"]])
            system_failure_thread.start()
            self.sleep(20)
        else:
            self.induce_schedule_system_failure(self.failure_map[self.system_failure]["failure_task"])
            self.sleep(90, "sleeping for  mins for mutation processing during system failure ")

        if self.simple_create_index:
            index_create_tasks = self.create_indexes(num=1,defer_build=False,itr=300,
                                                     expected_failure=self.failure_map[self.system_failure]["expected_failure"])
            for task in index_create_tasks:
                task.result()
            self.sleep(60, "sleeping for 1 min after creation of indexes")

        if self.simple_drop_index:
            task_thread = threading.Thread(name="drop_thread",
                                           target=self.drop_indexes,
                                           args=(2, False))
            task_thread.start()
            self.sleep(15, "sleeping for 15 sec")
            self.run_tasks = False
            task_thread.join()

        if self.simple_scan_index:
            self.run_tasks = True
            task_thread = threading.Thread(name="scan_thread",
                                           target=self.scan_indexes)
            task_thread.start()
            self.sleep(30, "sleeping for 10 sec")
            self.run_tasks = False
            task_thread.join()

        if self.simple_kill_indexer:
            remote = RemoteMachineShellConnection(self.index_nodes[0])
            remote.terminate_process(process_name="indexer")
            self.sleep(60, "sleeping for 60 sec for indexer to come back")

        if self.simple_kill_memcached:
            remote = RemoteMachineShellConnection(self.data_nodes[1])
            remote.kill_memcached()
            self.sleep(60, "sleeping for 60 sec for memcached to come back")

        if self.concur_system_failure:
            system_failure_thread.join()
        else:
            self.induce_schedule_system_failure(self.failure_map[self.system_failure]["recover_task"])
        self.index_ops_obj.update_stop_create_index(True)
        self.kill_loader_process()
        self.sdk_loader_manager.shutdown(True)
        self.index_create_task_manager.shutdown(True)
        self.system_failure_task_manager.shutdown(True)

        self.wait_until_indexes_online()
        self.sleep(120, "sleep for 120 secs before validation")
        self.verify_index_ops_obj()

        self.n1ql_helper.drop_all_indexes_on_keyspace()

        if self.index_ops_obj.get_errors():
            self.fail(str(self.index_ops_obj.get_errors()))
class AutoFailoverBaseTest(BaseTestCase):
    MAX_FAIL_DETECT_TIME = 120
    ORCHESTRATOR_TIMEOUT_BUFFER = 60

    def setUp(self):
        super(AutoFailoverBaseTest, self).setUp()
        self._get_params()
        self.rest = RestConnection(self.orchestrator)
        self.task_manager = TaskManager("Autofailover_thread")
        self.task_manager.start()
        self.node_failure_task_manager = TaskManager(
            "Nodes_failure_detector_thread")
        self.node_failure_task_manager.start()
        self.initial_load_gen = BlobGenerator('auto-failover',
                                              'auto-failover-',
                                              self.value_size,
                                              end=self.num_items)
        self.update_load_gen = BlobGenerator('auto-failover',
                                             'auto-failover-',
                                             self.value_size,
                                             end=self.update_items)
        self.delete_load_gen = BlobGenerator('auto-failover',
                                             'auto-failover-',
                                             self.value_size,
                                             start=self.update_items,
                                             end=self.delete_items)
        self._load_all_buckets(self.servers[0], self.initial_load_gen,
                               "create", 0)
        self._async_load_all_buckets(self.orchestrator,
                                     self.update_load_gen, "update", 0)
        self._async_load_all_buckets(self.orchestrator,
                                     self.delete_load_gen, "delete", 0)
        self.server_to_fail = self._servers_to_fail()
        self.servers_to_add = self.servers[self.nodes_init:self.nodes_init +
                                                           self.nodes_in]
        self.servers_to_remove = self.servers[self.nodes_init -
                                              self.nodes_out:self.nodes_init]
        # self.node_monitor_task = self.start_node_monitors_task()

    def tearDown(self):
        self.log.info("============AutoFailoverBaseTest teardown============")
        self._get_params()
        self.task_manager = TaskManager("Autofailover_thread")
        self.task_manager.start()
        self.server_to_fail = self._servers_to_fail()
        self.start_couchbase_server()
        self.sleep(10)
        self.disable_firewall()
        self.rest = RestConnection(self.orchestrator)
        self.rest.reset_autofailover()
        self.disable_autofailover()
        self._cleanup_cluster()
        super(AutoFailoverBaseTest, self).tearDown()
        if hasattr(self, "node_monitor_task"):
            if self.node_monitor_task._exception:
                self.fail("{}".format(self.node_monitor_task._exception))
            self.node_monitor_task.stop = True
        self.task_manager.shutdown(force=True)

    def enable_autofailover(self):
        """
        Enable the autofailover setting with the given timeout.
        :return: True If the setting was set with the timeout, else return
        False
        """
        status = self.rest.update_autofailover_settings(True,
                                                        self.timeout)
        return status

    def disable_autofailover(self):
        """
        Disable the autofailover setting.
        :return: True If the setting was disabled, else return
        False
        """
        status = self.rest.update_autofailover_settings(False, 120)
        return status

    def enable_autofailover_and_validate(self):
        """
        Enable autofailover with given timeout and then validate if the
        settings.
        :return: Nothing
        """
        status = self.enable_autofailover()
        self.assertTrue(status, "Failed to enable autofailover_settings!")
        self.sleep(5)
        settings = self.rest.get_autofailover_settings()
        self.assertTrue(settings.enabled, "Failed to enable "
                                          "autofailover_settings!")
        self.assertEqual(self.timeout, settings.timeout,
                         "Incorrect timeout set. Expected timeout : {0} "
                         "Actual timeout set : {1}".format(self.timeout,
                                                           settings.timeout))

    def disable_autofailover_and_validate(self):
        """
        Disable autofailover setting and then validate if the setting was
        disabled.
        :return: Nothing
        """
        status = self.disable_autofailover()
        self.assertTrue(status, "Failed to change autofailover_settings!")
        settings = self.rest.get_autofailover_settings()
        self.assertFalse(settings.enabled, "Failed to disable "
                                           "autofailover_settings!")

    def start_node_monitors_task(self):
        """
        Start the node monitors task to analyze the node status monitors.
        :return: The NodeMonitorAnalyserTask.
        """
        node_monitor_task = NodeMonitorsAnalyserTask(self.orchestrator)
        self.task_manager.schedule(node_monitor_task, sleep_time=5)
        return node_monitor_task

    def enable_firewall(self):
        """
        Enable firewall on the nodes to fail in the tests.
        :return: Nothing
        """
        node_down_timer_tasks = []
        for node in self.server_to_fail:
            node_failure_timer_task = NodeDownTimerTask(node.ip)
            node_down_timer_tasks.append(node_failure_timer_task)
        task = AutoFailoverNodesFailureTask(self.orchestrator,
                                            self.server_to_fail,
                                            "enable_firewall", self.timeout,
                                            self.pause_between_failover_action,
                                            self.failover_expected,
                                            self.timeout_buffer,
                                            failure_timers=node_down_timer_tasks)
        for node_down_timer_task in node_down_timer_tasks:
            self.node_failure_task_manager.schedule(node_down_timer_task, 2)
        self.task_manager.schedule(task)
        try:
            task.result()
        except Exception, e:
            self.fail("Exception: {}".format(e))
Exemple #23
0
class AutoFailoverBaseTest(BaseTestCase):
    MAX_FAIL_DETECT_TIME = 120
    ORCHESTRATOR_TIMEOUT_BUFFER = 60

    def setUp(self):
        super(AutoFailoverBaseTest, self).setUp()
        self._get_params()
        self.rest = RestConnection(self.orchestrator)
        self.task_manager = TaskManager("Autofailover_thread")
        self.task_manager.start()
        self.node_failure_task_manager = TaskManager(
            "Nodes_failure_detector_thread")
        self.node_failure_task_manager.start()
        self.initial_load_gen = BlobGenerator('auto-failover',
                                              'auto-failover-',
                                              self.value_size,
                                              end=self.num_items)
        self.update_load_gen = BlobGenerator('auto-failover',
                                             'auto-failover-',
                                             self.value_size,
                                             end=self.update_items)
        self.delete_load_gen = BlobGenerator('auto-failover',
                                             'auto-failover-',
                                             self.value_size,
                                             start=self.update_items,
                                             end=self.delete_items)
        self._load_all_buckets(self.servers[0], self.initial_load_gen,
                               "create", 0)
        self._async_load_all_buckets(self.orchestrator, self.update_load_gen,
                                     "update", 0)
        self._async_load_all_buckets(self.orchestrator, self.delete_load_gen,
                                     "delete", 0)
        self.server_index_to_fail = self.input.param("server_index_to_fail",
                                                     None)
        if self.server_index_to_fail is None:
            self.server_to_fail = self._servers_to_fail()
        else:
            self.server_to_fail = [self.servers[self.server_index_to_fail]]
        self.servers_to_add = self.servers[self.nodes_init:self.nodes_init +
                                           self.nodes_in]
        self.servers_to_remove = self.servers[self.nodes_init -
                                              self.nodes_out:self.nodes_init]

    def bareSetUp(self):
        super(AutoFailoverBaseTest, self).setUp()
        self._get_params()
        self.rest = RestConnection(self.orchestrator)
        self.task_manager = TaskManager("Autofailover_thread")
        self.task_manager.start()
        self.node_failure_task_manager = TaskManager(
            "Nodes_failure_detector_thread")
        self.node_failure_task_manager.start()
        self.initial_load_gen = BlobGenerator('auto-failover',
                                              'auto-failover-',
                                              self.value_size,
                                              end=self.num_items)
        self.update_load_gen = BlobGenerator('auto-failover',
                                             'auto-failover-',
                                             self.value_size,
                                             end=self.update_items)
        self.delete_load_gen = BlobGenerator('auto-failover',
                                             'auto-failover-',
                                             self.value_size,
                                             start=self.update_items,
                                             end=self.delete_items)
        self.server_to_fail = self._servers_to_fail()
        self.servers_to_add = self.servers[self.nodes_init:self.nodes_init +
                                           self.nodes_in]
        self.servers_to_remove = self.servers[self.nodes_init -
                                              self.nodes_out:self.nodes_init]

    def tearDown(self):
        self.log.info("============AutoFailoverBaseTest teardown============")
        self._get_params()
        self.task_manager = TaskManager("Autofailover_thread")
        self.task_manager.start()
        self.server_to_fail = self._servers_to_fail()
        self.start_couchbase_server()
        self.sleep(10)
        self.disable_firewall()
        self.rest = RestConnection(self.orchestrator)
        self.rest.reset_autofailover()
        self.disable_autofailover()
        self._cleanup_cluster()
        super(AutoFailoverBaseTest, self).tearDown()
        if hasattr(self, "node_monitor_task"):
            if self.node_monitor_task._exception:
                self.fail("{}".format(self.node_monitor_task._exception))
            self.node_monitor_task.stop = True
        self.task_manager.shutdown(force=True)

    def shuffle_nodes_between_zones_and_rebalance(self, to_remove=None):
        """
        Shuffle the nodes present in the cluster if zone > 1. Rebalance the nodes in the end.
        Nodes are divided into groups iteratively i.e. 1st node in Group 1, 2nd in Group 2, 3rd in Group 1 and so on, when
        zone=2.
        :param to_remove: List of nodes to be removed.
        """
        if not to_remove:
            to_remove = []
        serverinfo = self.orchestrator
        rest = RestConnection(serverinfo)
        zones = ["Group 1"]
        nodes_in_zone = {"Group 1": [serverinfo.ip]}
        # Create zones, if not existing, based on params zone in test.
        # Shuffle the nodes between zones.
        if int(self.zone) > 1:
            for i in range(1, int(self.zone)):
                a = "Group "
                zones.append(a + str(i + 1))
                if not rest.is_zone_exist(zones[i]):
                    rest.add_zone(zones[i])
                nodes_in_zone[zones[i]] = []
            # Divide the nodes between zones.
            nodes_in_cluster = [
                node.ip for node in self.get_nodes_in_cluster()
            ]
            nodes_to_remove = [node.ip for node in to_remove]
            for i in range(1, len(self.servers)):
                if self.servers[i].ip in nodes_in_cluster and self.servers[
                        i].ip not in nodes_to_remove:
                    server_group = i % int(self.zone)
                    nodes_in_zone[zones[server_group]].append(
                        self.servers[i].ip)
            # Shuffle the nodesS
            for i in range(1, self.zone):
                node_in_zone = list(
                    set(nodes_in_zone[zones[i]]) -
                    set([node for node in rest.get_nodes_in_zone(zones[i])]))
                rest.shuffle_nodes_in_zones(node_in_zone, zones[0], zones[i])
        self.zones = nodes_in_zone
        otpnodes = [node.id for node in rest.node_statuses()]
        nodes_to_remove = [
            node.id for node in rest.node_statuses()
            if node.ip in [t.ip for t in to_remove]
        ]
        # Start rebalance and monitor it.
        started = rest.rebalance(otpNodes=otpnodes,
                                 ejectedNodes=nodes_to_remove)
        if started:
            result = rest.monitorRebalance()
            msg = "successfully rebalanced cluster {0}"
            self.log.info(msg.format(result))

    def enable_autofailover(self):
        """
        Enable the autofailover setting with the given timeout.
        :return: True If the setting was set with the timeout, else return
        False
        """
        status = self.rest.update_autofailover_settings(
            True,
            self.timeout,
            self.can_abort_rebalance,
            maxCount=self.max_count,
            enableServerGroup=self.server_group_failover)
        return status

    def disable_autofailover(self):
        """
        Disable the autofailover setting.
        :return: True If the setting was disabled, else return
        False
        """
        status = self.rest.update_autofailover_settings(False, 120, False)
        return status

    def enable_autofailover_and_validate(self):
        """
        Enable autofailover with given timeout and then validate if the
        settings.
        :return: Nothing
        """
        status = self.enable_autofailover()
        self.assertTrue(status, "Failed to enable autofailover_settings!")
        self.sleep(5)
        settings = self.rest.get_autofailover_settings()
        self.assertTrue(settings.enabled, "Failed to enable "
                        "autofailover_settings!")
        self.assertEqual(
            self.timeout, settings.timeout,
            "Incorrect timeout set. Expected timeout : {0} "
            "Actual timeout set : {1}".format(self.timeout, settings.timeout))
        self.assertEqual(
            self.can_abort_rebalance, settings.can_abort_rebalance,
            "Incorrect can_abort_rebalance set. Expected can_abort_rebalance : {0} "
            "Actual can_abort_rebalance set : {1}".format(
                self.can_abort_rebalance, settings.can_abort_rebalance))

    def disable_autofailover_and_validate(self):
        """
        Disable autofailover setting and then validate if the setting was
        disabled.
        :return: Nothing
        """
        status = self.disable_autofailover()
        self.assertTrue(status, "Failed to change autofailover_settings!")
        settings = self.rest.get_autofailover_settings()
        self.assertFalse(settings.enabled, "Failed to disable "
                         "autofailover_settings!")

    def start_node_monitors_task(self):
        """
        Start the node monitors task to analyze the node status monitors.
        :return: The NodeMonitorAnalyserTask.
        """
        node_monitor_task = NodeMonitorsAnalyserTask(self.orchestrator)
        self.task_manager.schedule(node_monitor_task, sleep_time=5)
        return node_monitor_task

    def enable_firewall(self):
        """
        Enable firewall on the nodes to fail in the tests.
        :return: Nothing
        """
        node_down_timer_tasks = []
        for node in self.server_to_fail:
            node_failure_timer_task = NodeDownTimerTask(node.ip)
            node_down_timer_tasks.append(node_failure_timer_task)
        task = AutoFailoverNodesFailureTask(
            self.orchestrator,
            self.server_to_fail,
            "enable_firewall",
            self.timeout,
            self.pause_between_failover_action,
            self.failover_expected,
            self.timeout_buffer,
            failure_timers=node_down_timer_tasks)
        for node_down_timer_task in node_down_timer_tasks:
            self.node_failure_task_manager.schedule(node_down_timer_task, 2)
        self.task_manager.schedule(task)
        try:
            task.result()
        except Exception as e:
            self.fail("Exception: {}".format(e))

    def disable_firewall(self):
        """
        Disable firewall on the nodes to fail in the tests
        :return: Nothing
        """
        self.time_start = time.time()
        task = AutoFailoverNodesFailureTask(self.orchestrator,
                                            self.server_to_fail,
                                            "disable_firewall", self.timeout,
                                            self.pause_between_failover_action,
                                            False, self.timeout_buffer, False)
        self.task_manager.schedule(task)
        try:
            task.result()
        except Exception as e:
            self.fail("Exception: {}".format(e))

    def restart_couchbase_server(self):
        """
        Restart couchbase server on the nodes to fail in the tests
        :return: Nothing
        """
        node_down_timer_tasks = []
        for node in self.server_to_fail:
            node_failure_timer_task = NodeDownTimerTask(node.ip, node.port)
            node_down_timer_tasks.append(node_failure_timer_task)
        task = AutoFailoverNodesFailureTask(
            self.orchestrator,
            self.server_to_fail,
            "restart_couchbase",
            self.timeout,
            self.pause_between_failover_action,
            self.failover_expected,
            self.timeout_buffer,
            failure_timers=node_down_timer_tasks)
        for node_down_timer_task in node_down_timer_tasks:
            self.node_failure_task_manager.schedule(node_down_timer_task, 2)
        self.task_manager.schedule(task)
        try:
            task.result()
        except Exception as e:
            self.fail("Exception: {}".format(e))

    def stop_couchbase_server(self):
        """
        Stop couchbase server on the nodes to fail in the tests
        :return: Nothing
        """
        node_down_timer_tasks = []
        for node in self.server_to_fail:
            node_failure_timer_task = NodeDownTimerTask(node.ip, node.port)
            node_down_timer_tasks.append(node_failure_timer_task)
        task = AutoFailoverNodesFailureTask(
            self.orchestrator,
            self.server_to_fail,
            "stop_couchbase",
            self.timeout,
            self.pause_between_failover_action,
            self.failover_expected,
            self.timeout_buffer,
            failure_timers=node_down_timer_tasks)
        for node_down_timer_task in node_down_timer_tasks:
            self.node_failure_task_manager.schedule(node_down_timer_task, 2)
        self.task_manager.schedule(task)
        try:
            task.result()
        except Exception as e:
            self.fail("Exception: {}".format(e))

    def start_couchbase_server(self):
        """
        Start the couchbase server on the nodes to fail in the tests
        :return: Nothing
        """
        task = AutoFailoverNodesFailureTask(self.orchestrator,
                                            self.server_to_fail,
                                            "start_couchbase", self.timeout, 0,
                                            False, self.timeout_buffer, False)
        self.task_manager.schedule(task)
        try:
            task.result()
        except Exception as e:
            self.fail("Exception: {}".format(e))

    def stop_restart_network(self):
        """
        Stop and restart network for said timeout period on the nodes to
        fail in the tests
        :return: Nothing
        """

        node_down_timer_tasks = []
        for node in self.server_to_fail:
            node_failure_timer_task = NodeDownTimerTask(node.ip)
            node_down_timer_tasks.append(node_failure_timer_task)
        task = AutoFailoverNodesFailureTask(
            self.orchestrator,
            self.server_to_fail,
            "restart_network",
            self.timeout,
            self.pause_between_failover_action,
            self.failover_expected,
            self.timeout_buffer,
            failure_timers=node_down_timer_tasks)
        for node_down_timer_task in node_down_timer_tasks:
            self.node_failure_task_manager.schedule(node_down_timer_task, 2)
        self.task_manager.schedule(task)
        try:
            task.result()
        except Exception as e:
            self.fail("Exception: {}".format(e))

    def restart_machine(self):
        """
        Restart the nodes to fail in the tests
        :return: Nothing
        """

        node_down_timer_tasks = []
        for node in self.server_to_fail:
            node_failure_timer_task = NodeDownTimerTask(node.ip)
            node_down_timer_tasks.append(node_failure_timer_task)
        task = AutoFailoverNodesFailureTask(
            self.orchestrator,
            self.server_to_fail,
            "restart_machine",
            self.timeout,
            self.pause_between_failover_action,
            self.failover_expected,
            self.timeout_buffer,
            failure_timers=node_down_timer_tasks)
        for node_down_timer_task in node_down_timer_tasks:
            self.node_failure_task_manager.schedule(node_down_timer_task, 2)
        self.task_manager.schedule(task)
        try:
            task.result()
        except Exception as e:

            self.fail("Exception: {}".format(e))
        finally:
            self.sleep(120, "Sleeping for 2 min for the machines to restart")
            for node in self.server_to_fail:
                for i in range(0, 2):
                    try:
                        shell = RemoteMachineShellConnection(node)
                        break
                    except:
                        self.log.info("Unable to connect to the host. "
                                      "Machine has not restarted")
                        self.sleep(60, "Sleep for another minute and try "
                                   "again")

    def stop_memcached(self):
        """
        Stop the memcached on the nodes to fail in the tests
        :return: Nothing
        """
        node_down_timer_tasks = []
        for node in self.server_to_fail:
            node_failure_timer_task = NodeDownTimerTask(node.ip, 11211)
            node_down_timer_tasks.append(node_failure_timer_task)
        self.timeout_buffer += 3
        task = AutoFailoverNodesFailureTask(
            self.orchestrator,
            self.server_to_fail,
            "stop_memcached",
            self.timeout,
            self.pause_between_failover_action,
            self.failover_expected,
            self.timeout_buffer,
            failure_timers=node_down_timer_tasks)
        for node_down_timer_task in node_down_timer_tasks:
            self.node_failure_task_manager.schedule(node_down_timer_task, 2)
        self.task_manager.schedule(task)
        try:
            task.result()
        except Exception as e:
            self.fail("Exception: {}".format(e))
        finally:
            task = AutoFailoverNodesFailureTask(self.orchestrator,
                                                self.server_to_fail,
                                                "start_memcached",
                                                self.timeout,
                                                0,
                                                False,
                                                0,
                                                check_for_failover=False)
            self.task_manager.schedule(task)
            task.result()

    def split_network(self):
        """
        Split the network in the cluster. Stop network traffic from few
        nodes while allowing the traffic from rest of the cluster.
        :return: Nothing
        """
        self.time_start = time.time()
        if self.server_to_fail.__len__() < 2:
            self.fail("Need atleast 2 servers to fail")
        task = AutoFailoverNodesFailureTask(self.orchestrator,
                                            self.server_to_fail,
                                            "network_split", self.timeout,
                                            self.pause_between_failover_action,
                                            False, self.timeout_buffer)
        self.task_manager.schedule(task)
        try:
            task.result()
        except Exception as e:
            self.fail("Exception: {}".format(e))
        self.disable_firewall()

    def bring_back_failed_nodes_up(self):
        """
        Bring back the failed nodes.
        :return: Nothing
        """
        if self.failover_action == "firewall":
            self.disable_firewall()
        elif self.failover_action == "stop_server":
            self.start_couchbase_server()

    def _servers_to_fail(self):
        """
        Select the nodes to be failed in the tests.
        :return: Nothing
        """
        if self.failover_orchestrator:
            servers_to_fail = self.servers[0:self.num_node_failures]
        else:
            servers_to_fail = self.servers[1:self.num_node_failures + 1]
        return servers_to_fail

    def _get_params(self):
        """
        Initialize the test parameters.
        :return:  Nothing
        """
        self.timeout = self.input.param("timeout", 60)
        self.max_count = self.input.param("maxCount", 1)
        self.server_group_failover = self.input.param("serverGroupFailover",
                                                      False)
        self.failover_action = self.input.param("failover_action",
                                                "stop_server")
        self.failover_orchestrator = self.input.param("failover_orchestrator",
                                                      False)
        self.multiple_node_failure = self.input.param("multiple_nodes_failure",
                                                      False)
        self.num_items = self.input.param("num_items", 1000000)
        self.update_items = self.input.param("update_items", 100000)
        self.delete_items = self.input.param("delete_items", 100000)
        self.add_back_node = self.input.param("add_back_node", True)
        self.recovery_strategy = self.input.param("recovery_strategy", "delta")
        self.multi_node_failures = self.input.param("multi_node_failures",
                                                    False)
        self.can_abort_rebalance = self.input.param("can_abort_rebalance",
                                                    True)
        self.num_node_failures = self.input.param("num_node_failures", 1)
        self.services = self.input.param("services", None)
        self.zone = self.input.param("zone", 1)
        self.multi_services_node = self.input.param("multi_services_node",
                                                    False)
        self.pause_between_failover_action = self.input.param(
            "pause_between_failover_action", 0)
        self.remove_after_failover = self.input.param("remove_after_failover",
                                                      False)
        self.timeout_buffer = 120 if self.failover_orchestrator else 10
        failover_not_expected = (
            self.max_count == 1 and self.num_node_failures > 1
            and self.pause_between_failover_action < self.timeout
            or self.num_replicas < 1)
        failover_not_expected = failover_not_expected or (
            1 < self.max_count < self.num_node_failures
            and self.pause_between_failover_action < self.timeout
            or self.num_replicas < self.max_count)
        self.failover_expected = not failover_not_expected
        if self.failover_action is "restart_server":
            self.num_items *= 100
        self.orchestrator = self.servers[0] if not \
            self.failover_orchestrator else self.servers[
            self.num_node_failures]

    def _cleanup_cluster(self):
        """
        Cleaup the cluster. Delete all the buckets in the nodes and remove
        the nodes from any cluster that has been formed.
        :return:
        """
        BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self)
        for node in self.servers:
            master = node
            try:
                ClusterOperationHelper.cleanup_cluster(self.servers,
                                                       master=master)
            except:
                continue

    failover_actions = {
        "firewall": enable_firewall,
        "stop_server": stop_couchbase_server,
        "restart_server": restart_couchbase_server,
        "restart_machine": restart_machine,
        "restart_network": stop_restart_network,
        "stop_memcached": stop_memcached,
        "network_split": split_network
    }

    def _auto_failover_message_present_in_logs(self, ipaddress):
        return any(
            "Rebalance interrupted due to auto-failover of nodes ['ns_1@{0}']."
            .format(ipaddress) in d.values()[2]
            for d in self.rest.get_logs(20))

    def wait_for_failover_or_assert(self, expected_failover_count, timeout):
        time_start = time.time()
        time_max_end = time_start + timeout
        actual_failover_count = 0
        while time.time() < time_max_end:
            actual_failover_count = self.get_failover_count()
            if actual_failover_count == expected_failover_count:
                break
            time.sleep(20)
        time_end = time.time()
        self.assertTrue(
            actual_failover_count == expected_failover_count,
            "{0} nodes failed over, expected : {1}".format(
                actual_failover_count, expected_failover_count))
        self.log.info(
            "{0} nodes failed over as expected in {1} seconds".format(
                actual_failover_count, time_end - time_start))

    def get_failover_count(self):
        rest = RestConnection(self.master)
        cluster_status = rest.cluster_status()
        failover_count = 0
        # check for inactiveFailed
        for node in cluster_status['nodes']:
            if node['clusterMembership'] == "inactiveFailed":
                failover_count += 1
        return failover_count
Exemple #24
0
class Cluster(object):
    """An API for interacting with Couchbase clusters"""

    def __init__(self):
        self.task_manager = TaskManager()
        self.task_manager.start()

    def async_create_default_bucket(self, server, size, replicas=1):
        """Asynchronously creates the default bucket

        Parameters:
            server - The server to create the bucket on. (TestInputServer)
            size - The size of the bucket to be created. (int)
            replicas - The number of replicas for this bucket. (int)

        Returns:
            BucketCreateTask - A task future that is a handle to the scheduled task."""
        _task = BucketCreateTask(server, 'default', replicas, size)
        self.task_manager.schedule(_task)
        return _task

    def async_create_sasl_bucket(self, server, name, password, size, replicas):
        """Asynchronously creates a sasl bucket

        Parameters:
            server - The server to create the bucket on. (TestInputServer)
            name - The name of the bucket to be created. (String)
            password - The password for this bucket. (String)
            replicas - The number of replicas for this bucket. (int)
            size - The size of the bucket to be created. (int)

        Returns:
            BucketCreateTask - A task future that is a handle to the scheduled task."""
        _task = BucketCreateTask(server, name, replicas, size, password=password)
        self.task_manager.schedule(_task)
        return _task

    def async_create_standard_bucket(self, server, name, port, size, replicas):
        """Asynchronously creates a standard bucket

        Parameters:
            server - The server to create the bucket on. (TestInputServer)
            name - The name of the bucket to be created. (String)
            port - The port to create this bucket on. (String)
            replicas - The number of replicas for this bucket. (int)
            size - The size of the bucket to be created. (int)

        Returns:
            BucketCreateTask - A task future that is a handle to the scheduled task."""
        _task = BucketCreateTask(server, name, replicas, size, port)
        self.task_manager.schedule(_task)
        return _task

    def async_bucket_delete(self, server, bucket='default'):
        """Asynchronously deletes a bucket

        Parameters:
            server - The server to delete the bucket on. (TestInputServer)
            bucket - The name of the bucket to be deleted. (String)

        Returns:
            BucketDeleteTask - A task future that is a handle to the scheduled task."""
        _task = BucketDeleteTask(server, bucket)
        self.task_manager.schedule(_task)
        return _task

    def async_init_node(self, server, disabled_consistent_view=None):
        """Asynchronously initializes a node

        The task scheduled will initialize a nodes username and password and will establish
        the nodes memory quota to be 2/3 of the available system memory.

        Parameters:
            server - The server to initialize. (TestInputServer)
            disabled_consistent_view - disable consistent view

        Returns:
            NodeInitTask - A task future that is a handle to the scheduled task."""
        _task = NodeInitializeTask(server, disabled_consistent_view)
        self.task_manager.schedule(_task)
        return _task

    def async_load_gen_docs(self, server, bucket, generator, kv_store, op_type, exp=0, flag=0, only_store_hash=False, batch_size=1, pause_secs=1, timeout_secs=5):
        if batch_size > 1:
            _task = BatchedLoadDocumentsTask(server, bucket, generator, kv_store, op_type, exp, flag, only_store_hash, batch_size, pause_secs, timeout_secs)
        else:
            _task = LoadDocumentsTask(server, bucket, generator, kv_store, op_type, exp, flag, only_store_hash)
        self.task_manager.schedule(_task)
        return _task

    def async_workload(self, server, bucket, kv_store, num_ops, create, read, update,
                       delete, exp):
        _task = WorkloadTask(server, bucket, kv_store, num_ops, create, read, update,
                             delete, exp)
        self.task_manager.schedule(_task)
        return _task

    def async_verify_data(self, server, bucket, kv_store, max_verify=None, only_store_hash=False, batch_size=1):
        if batch_size > 1:
            _task = BatchedValidateDataTask(server, bucket, kv_store, max_verify, only_store_hash, batch_size)
        else:
            _task = ValidateDataTask(server, bucket, kv_store, max_verify, only_store_hash)
        self.task_manager.schedule(_task)
        return _task

    def async_verify_revid(self, src_server, dest_server, bucket, kv_store, ops_perf):
        _task = VerifyRevIdTask(src_server, dest_server, bucket, kv_store, ops_perf)
        self.task_manager.schedule(_task)
        return _task

    def async_rebalance(self, servers, to_add, to_remove):
        """Asyncronously rebalances a cluster

        Parameters:
            servers - All servers participating in the rebalance ([TestInputServers])
            to_add - All servers being added to the cluster ([TestInputServers])
            to_remove - All servers being removed from the cluster ([TestInputServers])

        Returns:
            RebalanceTask - A task future that is a handle to the scheduled task"""
        _task = RebalanceTask(servers, to_add, to_remove)
        self.task_manager.schedule(_task)
        return _task

    def async_wait_for_stats(self, servers, bucket, param, stat, comparison, value):
        """Asynchronously wait for stats

        Waits for stats to match the criteria passed by the stats variable. See
        couchbase.stats_tool.StatsCommon.build_stat_check(...) for a description of
        the stats structure and how it can be built.

        Parameters:
            servers - The servers to get stats from. Specifying multiple servers will
                cause the result from each server to be added together before
                comparing. ([TestInputServer])
            bucket - The name of the bucket (String)
            param - The stats parameter to use. (String)
            stat - The stat that we want to get the value from. (String)
            comparison - How to compare the stat result to the value specified.
            value - The value to compare to.

        Returns:
            RebalanceTask - A task future that is a handle to the scheduled task"""
        _task = StatsWaitTask(servers, bucket, param, stat, comparison, value)
        self.task_manager.schedule(_task)
        return _task

    def create_default_bucket(self, server, size, replicas=1, timeout=None):
        """Synchronously creates the default bucket

        Parameters:
            server - The server to create the bucket on. (TestInputServer)
            size - The size of the bucket to be created. (int)
            replicas - The number of replicas for this bucket. (int)

        Returns:
            boolean - Whether or not the bucket was created."""
        _task = self.async_create_default_bucket(server, size, replicas)
        return _task.result(timeout)

    def create_sasl_bucket(self, server, name, password, size, replicas, timeout=None):
        """Synchronously creates a sasl bucket

        Parameters:
            server - The server to create the bucket on. (TestInputServer)
            name - The name of the bucket to be created. (String)
            password - The password for this bucket. (String)
            replicas - The number of replicas for this bucket. (int)
            size - The size of the bucket to be created. (int)

        Returns:
            boolean - Whether or not the bucket was created."""
        _task = self.async_create_sasl_bucket(server, name, password, replicas, size)
        self.task_manager.schedule(_task)
        return _task.result(timeout)

    def create_standard_bucket(self, server, name, port, size, replicas, timeout=None):
        """Synchronously creates a standard bucket

        Parameters:
            server - The server to create the bucket on. (TestInputServer)
            name - The name of the bucket to be created. (String)
            port - The port to create this bucket on. (String)
            replicas - The number of replicas for this bucket. (int)
            size - The size of the bucket to be created. (int)

        Returns:
            boolean - Whether or not the bucket was created."""
        _task = self.async_create_standard_bucket(server, name, port, size, replicas)
        return _task.result(timeout)

    def bucket_delete(self, server, bucket='default', timeout=None):
        """Synchronously deletes a bucket

        Parameters:
            server - The server to delete the bucket on. (TestInputServer)
            bucket - The name of the bucket to be deleted. (String)

        Returns:
            boolean - Whether or not the bucket was deleted."""
        _task = self.async_bucket_delete(server, bucket)
        return _task.result(timeout)

    def init_node(self, server, async_init_node=True, disabled_consistent_view=None):
        """Synchronously initializes a node

        The task scheduled will initialize a nodes username and password and will establish
        the nodes memory quota to be 2/3 of the available system memory.

        Parameters:
            server - The server to initialize. (TestInputServer)
            disabled_consistent_view - disable consistent view

        Returns:
            boolean - Whether or not the node was properly initialized."""
        _task = self.async_init_node(server, async_init_node, disabled_consistent_view)
        return _task.result()

    def rebalance(self, servers, to_add, to_remove, timeout=None):
        """Syncronously rebalances a cluster

        Parameters:
            servers - All servers participating in the rebalance ([TestInputServers])
            to_add - All servers being added to the cluster ([TestInputServers])
            to_remove - All servers being removed from the cluster ([TestInputServers])

        Returns:
            boolean - Whether or not the rebalance was successful"""
        _task = self.async_rebalance(servers, to_add, to_remove)
        return _task.result(timeout)

    def load_gen_docs(self, server, bucket, generator, kv_store, op_type, exp=0, timeout=None, flag=0, only_store_hash=False, batch_size=1):
        _task = self.async_load_gen_docs(server, bucket, generator, kv_store, op_type, exp, flag, only_store_hash=only_store_hash, batch_size=batch_size)
        return _task.result(timeout)

    def workload(self, server, bucket, kv_store, num_ops, create, read, update, delete, exp, timeout=None):
        _task = self.async_workload(server, bucket, kv_store, num_ops, create, read, update,
                                    delete, exp)
        return _task.result(timeout)

    def verify_data(self, server, bucket, kv_store, timeout=None):
        _task = self.async_verify_data(server, bucket, kv_store)
        return _task.result(timeout)

    def wait_for_stats(self, servers, bucket, param, stat, comparison, value, timeout=None):
        """Synchronously wait for stats

        Waits for stats to match the criteria passed by the stats variable. See
        couchbase.stats_tool.StatsCommon.build_stat_check(...) for a description of
        the stats structure and how it can be built.

        Parameters:
            servers - The servers to get stats from. Specifying multiple servers will
                cause the result from each server to be added together before
                comparing. ([TestInputServer])
            bucket - The name of the bucket (String)
            param - The stats parameter to use. (String)
            stat - The stat that we want to get the value from. (String)
            comparison - How to compare the stat result to the value specified.
            value - The value to compare to.

        Returns:
            boolean - Whether or not the correct stats state was seen"""
        _task = self.async_wait_for_stats(servers, bucket, param, stat, comparison, value)
        return _task.result(timeout)

    def shutdown(self, force=False):
        self.task_manager.shutdown(force)

    def async_create_view(self, server, design_doc_name, view, bucket="default"):
        """Asynchronously creates a views in a design doc

        Parameters:
            server - The server to handle create view task. (TestInputServer)
            design_doc_name - Design doc to be created or updated with view(s) being created (String)
            view - The view being created (document.View)
            bucket - The name of the bucket containing items for this view. (String)

        Returns:
            ViewCreateTask - A task future that is a handle to the scheduled task."""
        _task = ViewCreateTask(server, design_doc_name, view, bucket)
        self.task_manager.schedule(_task)
        return _task

    def create_view(self, server, design_doc_name, view, bucket="default", timeout=None):
        """Synchronously creates a views in a design doc

        Parameters:
            server - The server to handle create view task. (TestInputServer)
            design_doc_name - Design doc to be created or updated with view(s) being created (String)
            view - The view being created (document.View)
            bucket - The name of the bucket containing items for this view. (String)

        Returns:
            string - revision number of design doc."""
        _task = self.async_create_view(server, design_doc_name, view, bucket)
        return _task.result(timeout)

    def async_delete_view(self, server, design_doc_name, view, bucket="default"):
        """Asynchronously deletes a views in a design doc

        Parameters:
            server - The server to handle delete view task. (TestInputServer)
            design_doc_name - Design doc to be deleted or updated with view(s) being deleted (String)
            view - The view being deleted (document.View)
            bucket - The name of the bucket containing items for this view. (String)

        Returns:
            ViewDeleteTask - A task future that is a handle to the scheduled task."""
        _task = ViewDeleteTask(server, design_doc_name, view, bucket)
        self.task_manager.schedule(_task)
        return _task

    def delete_view(self, server, design_doc_name, view, bucket="default", timeout=None):
        """Synchronously deletes a views in a design doc

        Parameters:
            server - The server to handle delete view task. (TestInputServer)
            design_doc_name - Design doc to be deleted or updated with view(s) being deleted (String)
            view - The view being deleted (document.View)
            bucket - The name of the bucket containing items for this view. (String)

        Returns:
            boolean - Whether or not delete view was successful."""
        _task = self.async_delete_view(server, design_doc_name, view, bucket)
        return _task.result(timeout)


    def async_query_view(self, server, design_doc_name, view_name, query,
                         expected_rows=None, bucket="default", retry_time=2):
        """Asynchronously query a views in a design doc

        Parameters:
            server - The server to handle query view task. (TestInputServer)
            design_doc_name - Design doc with view(s) being queried(String)
            view_name - The view being queried (String)
            expected_rows - The number of rows expected to be returned from the query (int)
            bucket - The name of the bucket containing items for this view. (String)
            retry_time - The time in seconds to wait before retrying failed queries (int)

        Returns:
            ViewQueryTask - A task future that is a handle to the scheduled task."""
        _task = ViewQueryTask(server, design_doc_name, view_name, query, expected_rows, bucket, retry_time)
        self.task_manager.schedule(_task)
        return _task

    def query_view(self, server, design_doc_name, view_name, query,
                   expected_rows=None, bucket="default", retry_time=2, timeout=None):
        """Synchronously query a views in a design doc

        Parameters:
            server - The server to handle query view task. (TestInputServer)
            design_doc_name - Design doc with view(s) being queried(String)
            view_name - The view being queried (String)
            expected_rows - The number of rows expected to be returned from the query (int)
            bucket - The name of the bucket containing items for this view. (String)
            retry_time - The time in seconds to wait before retrying failed queries (int)

        Returns:
            ViewQueryTask - A task future that is a handle to the scheduled task."""
        _task = self.async_query_view(server, design_doc_name, view_name, query, expected_rows, bucket, retry_time)
        return _task.result(timeout)


    def modify_fragmentation_config(self, server, config, bucket="default", timeout=None):
        """Synchronously modify fragmentation configuration spec

        Parameters:
            server - The server to handle fragmentation config task. (TestInputServer)
            config - New compaction configuration (dict - see task)
            bucket - The name of the bucket fragementation config applies to. (String)

        Returns:
            boolean - True if config values accepted."""

        _task = ModifyFragmentationConfigTask(server, config, bucket)
        self.task_manager.schedule(_task)
        return _task.result(timeout)

    def async_monitor_active_task(self, server,
                                  type,
                                  target_value,
                                  wait_progress=100,
                                  num_iteration=100,
                                  wait_task=True):
        """Asynchronously monitor active task.

           When active task reached wait_progress this method  will return.

        Parameters:
            server - The server to handle fragmentation config task. (TestInputServer)
            type - task type('indexer' , 'bucket_compaction', 'view_compaction' ) (String)
            target_value - target value (for example "_design/ddoc" for indexing, bucket "default"
                for bucket_compaction or "_design/dev_view" for view_compaction) (String)
            wait_progress - expected progress (int)
            num_iteration - failed test if progress is not changed during num iterations(int)
            wait_task - expect to find task in the first attempt(bool)

        Returns:
            MonitorActiveTask - A task future that is a handle to the scheduled task."""
        _task = MonitorActiveTask(server, type, target_value, wait_progress, num_iteration, wait_task)
        self.task_manager.schedule(_task)
        return _task

    def async_monitor_view_fragmentation(self, server,
                                         design_doc_name,
                                         fragmentation_value,
                                         bucket="default"):
        """Asynchronously monitor view fragmentation.

           When <fragmentation_value> is reached on the
           index file for <design_doc_name> this method
           will return.

        Parameters:
            server - The server to handle fragmentation config task. (TestInputServer)
            design_doc_name - design doc with views represented in index file. (String)
            fragmentation_value - target amount of fragmentation within index file to detect. (String)
            bucket - The name of the bucket design_doc belongs to. (String)

        Returns:
            MonitorViewFragmentationTask - A task future that is a handle to the scheduled task."""

        _task = MonitorViewFragmentationTask(server, design_doc_name,
                                             fragmentation_value, bucket)
        self.task_manager.schedule(_task)
        return _task

    def async_generate_expected_view_results(self, doc_generators, view, query):
        """Asynchronously generate expected view query results

        Parameters:
            doc_generators - Generators used for loading docs (DocumentGenerator[])
            view - The view with map function (View)
            query - Query params to filter docs from the generator. (dict)

        Returns:
            GenerateExpectedViewResultsTask - A task future that is a handle to the scheduled task."""

        _task = GenerateExpectedViewResultsTask(doc_generators, view, query)
        self.task_manager.schedule(_task)
        return _task

    def generate_expected_view_query_results(self, doc_generators, view, query, timeout=None):
        """Synchronously generate expected view query results

        Parameters:
            doc_generators - Generators used for loading docs (DocumentGenerator[])
            view - The view with map function (View)
            query - Query params to filter docs from the generator. (dict)

        Returns:
            list - A list of rows expected to be returned for given query"""

        _task = self.async_generate_expected_view_results(doc_generators, view, query)
        return _task.result(timeout)


    def async_view_query_verification(self, server, design_doc_name, view_name, query, expected_rows, num_verified_docs=20, bucket="default", query_timeout=20):
        """Asynchronously query a views in a design doc and does full verification of results

        Parameters:
            server - The server to handle query verification task. (TestInputServer)
            design_doc_name - Design doc with view(s) being queried(String)
            view_name - The view being queried (String)
            query - Query params being used with the query. (dict)
            expected_rows - The number of rows expected to be returned from the query (int)
            num_verified_docs - The number of docs to verify that require memcached gets (int)
            bucket - The name of the bucket containing items for this view. (String)
            query_timeout - The time to allow a query with stale=false to run. (int)
            retry_time - The time in seconds to wait before retrying failed queries (int)

        Returns:
            ViewQueryVerificationTask - A task future that is a handle to the scheduled task."""
        _task = ViewQueryVerificationTask(server, design_doc_name, view_name, query, expected_rows, num_verified_docs, bucket, query_timeout)
        self.task_manager.schedule(_task)
        return _task

    def view_query_verification(self, server, design_doc_name, view_name, query, expected_rows, num_verified_docs=20, bucket="default", query_timeout=20, timeout=None):
        """Synchronously query a views in a design doc and does full verification of results

        Parameters:
            server - The server to handle query verification task. (TestInputServer)
            design_doc_name - Design doc with view(s) being queried(String)
            view_name - The view being queried (String)
            query - Query params being used with the query. (dict)
            expected_rows - The number of rows expected to be returned from the query (int)
            num_verified_docs - The number of docs to verify that require memcached gets (int)
            bucket - The name of the bucket containing items for this view. (String)
            query_timeout - The time to allow a query with stale=false to run. (int)
            retry_time - The time in seconds to wait before retrying failed queries (int)

        Returns:
            dict - An object with keys: passed = True or False
                                        errors = reasons why verification failed """
        _task = self.async_view_query_verification(server, design_doc_name, view_name, query, expected_rows, num_verified_docs, bucket, query_timeout)
        return _task.result(timeout)


    def monitor_view_fragmentation(self, server,
                                   design_doc_name,
                                   fragmentation_value,
                                   bucket="default",
                                   timeout=None):
        """Synchronously monitor view fragmentation.

           When <fragmentation_value> is reached on the
           index file for <design_doc_name> this method
           will return.

        Parameters:
            server - The server to handle fragmentation config task. (TestInputServer)
            design_doc_name - design doc with views represented in index file. (String)
            fragmentation_value - target amount of fragmentation within index file to detect. (String)
            bucket - The name of the bucket design_doc belongs to. (String)

        Returns:
            boolean - True if <fragmentation_value> reached"""

        _task = self.async_monitor_view_fragmentation(server, design_doc_name,
                                                      fragmentation_value,
                                                      bucket)
        self.task_manager.schedule(_task)
        return _task.result(timeout)

    def async_compact_view(self, server, design_doc_name, bucket="default"):
        """Asynchronously run view compaction.

        Compacts index file represented by views within the specified <design_doc_name>

        Parameters:
            server - The server to handle fragmentation config task. (TestInputServer)
            design_doc_name - design doc with views represented in index file. (String)
            bucket - The name of the bucket design_doc belongs to. (String)

        Returns:
            ViewCompactionTask - A task future that is a handle to the scheduled task."""


        _task = ViewCompactionTask(server, design_doc_name, bucket)
        self.task_manager.schedule(_task)
        return _task

    def compact_view(self, server, design_doc_name, bucket="default", timeout=None):
        """Synchronously run view compaction.

        Compacts index file represented by views within the specified <design_doc_name>

        Parameters:
            server - The server to handle fragmentation config task. (TestInputServer)
            design_doc_name - design doc with views represented in index file. (String)
            bucket - The name of the bucket design_doc belongs to. (String)

        Returns:
            boolean - True file size reduced after compaction, False if successful but no work done """

        _task = self.async_compact_view(server, design_doc_name, bucket)
        return _task.result(timeout)

    def async_failover(self, servers, to_failover):
        """Asyncronously fails over nodes

        Parameters:
            servers - All servers participating in the failover ([TestInputServers])
            to_failover - All servers being failed over ([TestInputServers])

        Returns:
            FailoverTask - A task future that is a handle to the scheduled task"""
        _task = FailoverTask(servers, to_failover)
        self.task_manager.schedule(_task)
        return _task

    def failover(self, servers, to_failover, timeout=None):
        """Syncronously fails over nodes

        Parameters:
            servers - All servers participating in the failover ([TestInputServers])
            to_failover - All servers being failed over ([TestInputServers])

        Returns:
            boolean - Whether or not the failover was successful"""
        _task = self.async_failover(servers, to_failover)
        return _task.result(timeout)
Exemple #25
0
class ElasticSearchBase(object):
    def __init__(self, host, logger):
        #host is in the form IP address
        self.__log = logger
        self.__host = host
        self.__document = {}
        self.__mapping = {}
        self.__STATUSOK = 200
        self.__indices = []
        self.__index_types = {}
        self.__connection_url = 'http://{0}:{1}/'.format(
            self.__host.ip, self.__host.port)
        self.es_queries = []
        self.task_manager = TaskManager("ES_Thread")
        self.task_manager.start()
        self.http = httplib2.Http

    def _http_request(self,
                      api,
                      method='GET',
                      params='',
                      headers=None,
                      timeout=120):
        if not headers:
            headers = {'Content-Type': 'application/json', 'Accept': '*/*'}
        try:
            response, content = httplib2.Http(timeout=timeout).request(
                api, method, params, headers)
            if response['status'] in ['200', '201', '202']:
                return True, content, response
            else:
                try:
                    json_parsed = ast.literal_eval(content)
                except ValueError as e:
                    json_parsed = {}
                    json_parsed["error"] = "status: {0}, content: {1}".\
                        format(response['status'], content)
                reason = "unknown"
                if "error" in json_parsed:
                    reason = json_parsed["error"]
                self.__log.error('{0} error {1} reason: {2} {3}'.format(
                    api, response['status'], reason, content.rstrip('\n')))
                return False, content, response
        except socket.error as e:
            self.__log.error(
                "socket error while connecting to {0} error {1} ".format(
                    api, e))
            raise ServerUnavailableException(ip=self.__host.ip)

    def restart_es(self):
        shell = RemoteMachineShellConnection(self.__host)
        es_restart_cmd = "/etc/init.d/elasticsearch restart"
        o, e = shell.execute_non_sudo_command(es_restart_cmd)
        shell.log_command_output(o, e)

        es_start = False
        for i in range(2):
            self.sleep(10)
            if self.is_running():
                es_start = True
                break
        if not es_start:
            self.fail("Could not reach Elastic Search server on %s" % self.ip)
        else:
            self.__log.info("Restarted ES server %s successfully" %
                            self.__host.ip)

    def is_running(self):
        """
         make sure ES is up and running
         check the service is running , if not abort the test
        """

        try:
            status, content, _ = self._http_request(self.__connection_url,
                                                    'GET')
            if status:
                return True
            else:
                return False
        except Exception as e:
            raise e

    def delete_index(self, index_name):
        """
        Deletes index
        """
        try:
            url = self.__connection_url + index_name
            status, content, _ = self._http_request(url, 'DELETE')
        except Exception as e:
            raise e

    def delete_indices(self):
        """
        Delete all indices present
        """
        for index_name in self.__indices:
            self.delete_index(index_name)
            self.__log.info("ES index %s deleted" % index_name)

    def create_empty_index(self, index_name):
        """
        Creates an empty index, given the name
        """
        try:
            self.delete_index(index_name)
            status, content, _ = self._http_request(
                self.__connection_url + index_name, 'PUT')
            if status:
                self.__indices.append(index_name)
        except Exception as e:
            raise Exception("Could not create ES index : %s" % e)

    def create_empty_index_with_bleve_equivalent_std_analyzer(
            self, index_name):
        """
        Refer:
        https://www.elastic.co/guide/en/elasticsearch/guide/current/
        configuring-analyzers.html
        """
        try:
            self.delete_index(index_name)
            status, content, _ = self._http_request(
                self.__connection_url + index_name, 'PUT',
                json.dumps(BLEVE.STD_ANALYZER))
            if status:
                self.__indices.append(index_name)
        except Exception as e:
            raise Exception(
                "Could not create index with ES std analyzer : %s" % e)

    def create_index_mapping(self, index_name, es_mapping, fts_mapping=None):
        """
        Creates a new default index, with the given mapping
        """
        self.delete_index(index_name)

        if not fts_mapping:
            map = {
                "mappings": es_mapping,
                "settings": BLEVE.STD_ANALYZER['settings']
            }
        else:
            # Find the ES equivalent char_filter, token_filter and tokenizer
            es_settings = self.populate_es_settings(
                fts_mapping['params']['mapping']['analysis']['analyzers'])

            # Create an ES custom index definition
            map = {"mappings": es_mapping, "settings": es_settings['settings']}

        # Create ES index
        try:
            self.__log.info("Creating %s with mapping %s" %
                            (index_name, json.dumps(map, indent=3)))
            status, content, _ = self._http_request(
                self.__connection_url + index_name, 'PUT', json.dumps(map))
            if status:
                self.__log.info("SUCCESS: ES index created with above mapping")
            else:
                raise Exception("Could not create ES index")
        except Exception as e:
            raise Exception("Could not create ES index : %s" % e)

    def populate_es_settings(self, fts_custom_analyzers_def):
        """
        Populates the custom analyzer defintion of the ES Index Definition.
        Refers to the FTS Custom Analyzers definition and creates an
            equivalent definition for each ES custom analyzer
        :param fts_custom_analyzers_def: FTS Custom Analyzer Definition
        :return:
        """

        num_custom_analyzers = len(fts_custom_analyzers_def)
        n = 1
        analyzer_map = {}
        while n <= num_custom_analyzers:
            customAnalyzerName = fts_custom_analyzers_def.keys()[n - 1]
            fts_char_filters = fts_custom_analyzers_def[customAnalyzerName][
                "char_filters"]
            fts_tokenizer = fts_custom_analyzers_def[customAnalyzerName][
                "tokenizer"]
            fts_token_filters = fts_custom_analyzers_def[customAnalyzerName][
                "token_filters"]

            analyzer_map[customAnalyzerName] = {}
            analyzer_map[customAnalyzerName]["char_filter"] = []
            analyzer_map[customAnalyzerName]["filter"] = []
            analyzer_map[customAnalyzerName]["tokenizer"] = ""

            for fts_char_filter in fts_char_filters:
                analyzer_map[customAnalyzerName]['char_filter'].append( \
                    BLEVE.FTS_ES_ANALYZER_MAPPING['char_filters'][fts_char_filter])

            analyzer_map[customAnalyzerName]['tokenizer'] = \
                BLEVE.FTS_ES_ANALYZER_MAPPING['tokenizers'][fts_tokenizer]

            for fts_token_filter in fts_token_filters:
                analyzer_map[customAnalyzerName]['filter'].append( \
                    BLEVE.FTS_ES_ANALYZER_MAPPING['token_filters'][fts_token_filter])

            n += 1

        analyzer = BLEVE.CUSTOM_ANALYZER
        analyzer['settings']['analysis']['analyzer'] = analyzer_map
        return analyzer

    def create_alias(self, name, indexes):
        """
        @name: alias name
        @indexes: list of target indexes
        """
        try:
            self.__log.info(
                "Checking if ES alias '{0}' exists...".format(name))
            self.delete_index(name)
            alias_info = {"actions": []}
            for index in indexes:
                alias_info['actions'].append(
                    {"add": {
                        "index": index,
                        "alias": name
                    }})
            self.__log.info("Creating ES alias '{0}' on {1}...".format(
                name, indexes))
            status, content, _ = self._http_request(
                self.__connection_url + "_aliases", 'POST',
                json.dumps(alias_info))
            if status:
                self.__log.info("ES alias '{0}' created".format(name))
                self.__indices.append(name)
        except Exception as ex:
            raise Exception("Could not create ES alias : %s" % ex)

    def async_load_ES(self, index_name, gen, op_type='create'):
        """
        Asynchronously run query against FTS and ES and compare result
        note: every task runs a single query
        """

        _task = ESLoadGeneratorTask(es_instance=self,
                                    index_name=index_name,
                                    generator=gen,
                                    op_type=op_type)
        self.task_manager.schedule(_task)
        return _task

    def async_bulk_load_ES(self,
                           index_name,
                           gen,
                           op_type='create',
                           batch=5000):
        _task = ESBulkLoadGeneratorTask(es_instance=self,
                                        index_name=index_name,
                                        generator=gen,
                                        op_type=op_type,
                                        batch=batch)
        self.task_manager.schedule(_task)
        return _task

    def load_bulk_data(self, filename):
        """
        Bulk load to ES from a file
        curl -s -XPOST 172.23.105.25:9200/_bulk --data-binary @req
        cat req:
        { "index" : { "_index" : "default_es_index", "_type" : "aruna", "_id" : "1" } }
        { "field1" : "value1" , "field2" : "value2"}
        { "index" : { "_index" : "default_es_index", "_type" : "aruna", "_id" : "2" } }
        { "field1" : "value1" , "field2" : "value2"}
        """
        try:
            import os
            url = self.__connection_url + "/_bulk"
            data = open(filename, "rb").read()
            status, content, _ = self._http_request(url, 'POST', data)
            return status
        except Exception as e:
            raise e

    def load_data(self, index_name, document_json, doc_type, doc_id):
        """
        index_name : name of index into which the doc is loaded
        document_json: json doc
        doc_type : type of doc. Usually the '_type' field in the doc body
        doc_id : document id
        """
        try:
            url = self.__connection_url + index_name + '/' + doc_type + '/' +\
                  doc_id
            status, content, _ = self._http_request(url, 'POST', document_json)
        except Exception as e:
            raise e

    def update_index(self, index_name):
        """
        This procedure will refresh index when insert is performed .
        Need to call this API to take search in effect.
        :param index_name:
        :return:
        """
        try:
            status, content, _ = self._http_request(
                self.__connection_url + index_name + '/_refresh', 'POST')
        except Exception as e:
            raise e

    def search(self, index_name, query, result_size=1000000):
        """
           This function will be used for search . based on the query
           :param index_name:
           :param query:
           :return: number of matches found, doc_ids and time taken
        """
        try:
            doc_ids = []
            self.__log.info("ES query '{0}' ".format(query))
            url = self.__connection_url + index_name + '/_search?size='+ \
                  str(result_size)
            status, content, _ = self._http_request(url, 'POST',
                                                    json.dumps(query))
            if status:
                content = json.loads(content)
                for doc in content['hits']['hits']:
                    doc_ids.append(doc['_id'])
                return content['hits']['total'], doc_ids, content['took']
        except Exception as e:
            self.__log.error("Couldn't run query on ES: %s, reason : %s" %
                             (json.dumps(query), e))
            raise e

    def get_index_count(self, index_name):
        """
         Returns count of docs in the index
        """
        try:
            status, content, _ = self._http_request(
                self.__connection_url + index_name + '/_count', 'POST')
            if status:
                return json.loads(content)['count']
        except Exception as e:
            raise e

    def get_indices(self):
        """
        Return all the indices created
        :return: List of all indices
        """
        return self.__indices

    def sleep(self, timeout=1, message=""):
        self.__log.info("sleep for {0} secs. {1} ...".format(timeout, message))
        time.sleep(timeout)
Exemple #26
0
class ElasticSearchBase(object):

    def __init__(self, host, logger):
        #host is in the form IP address
        self.__log = logger
        self.__host = host
        self.__document = {}
        self.__mapping = {}
        self.__STATUSOK = 200
        self.__indices = []
        self.__index_types = {}
        self.__connection_url = 'http://{0}:{1}/'.format(self.__host.ip,
                                                        self.__host.port)
        self.es_queries = []
        self.task_manager = TaskManager("ES_Thread")
        self.task_manager.start()
        self.http = httplib2.Http

    def _http_request(self, api, method='GET', params='', headers=None,
                      timeout=30):
        if not headers:
            headers = {'Content-Type': 'application/json',
                       'Accept': '*/*'}
        try:
            response, content = httplib2.Http(timeout=timeout).request(api,
                                                                       method,
                                                                       params,
                                                                       headers)
            if response['status'] in ['200', '201', '202']:
                return True, content, response
            else:
                try:
                    json_parsed = json.loads(content)
                except ValueError as e:
                    json_parsed = {}
                    json_parsed["error"] = "status: {0}, content: {1}".\
                        format(response['status'], content)
                reason = "unknown"
                if "error" in json_parsed:
                    reason = json_parsed["error"]
                self.__log.error('{0} error {1} reason: {2} {3}'.format(
                    api,
                    response['status'],
                    reason,
                    content.rstrip('\n')))
                return False, content, response
        except socket.error as e:
            self.__log.error("socket error while connecting to {0} error {1} ".
                             format(api, e))
            raise ServerUnavailableException(ip=self.__host.ip)

    def is_running(self):
        """
         make sure ES is up and running
         check the service is running , if not abort the test
        """

        try:
            status, content, _ = self._http_request(
                self.__connection_url,
                'GET')
            if status:
                return True
            else:
                return False
        except Exception as e:
            raise e

    def delete_index(self, index_name):
        """
        Deletes index
        """
        try:
            url = self.__connection_url + index_name
            status, content, _ = self._http_request(url, 'DELETE')
        except Exception as e:
            raise e

    def delete_indices(self):
        """
        Delete all indices present
        """
        for index_name in self.__indices:
            self.delete_index(index_name)
            self.__log.info("ES index %s deleted" % index_name)

    def create_empty_index(self, index_name):
        """
        Creates an empty index, given the name
        """
        try:
            self.delete_index(index_name)
            status, content, _ = self._http_request(
                self.__connection_url + index_name,
                'PUT')
            if status:
                self.__indices.append(index_name)
        except Exception as e:
            raise Exception("Could not create ES index : %s" % e)

    def create_empty_index_with_bleve_equivalent_std_analyzer(self, index_name):
        """
        Refer:
        https://www.elastic.co/guide/en/elasticsearch/guide/current/
        configuring-analyzers.html
        """
        try:
            self.delete_index(index_name)
            status, content, _ = self._http_request(
                self.__connection_url + index_name,
                'PUT', json.dumps(BLEVE.STD_ANALYZER))
            if status:
                self.__indices.append(index_name)
        except Exception as e:
            raise Exception("Could not create index with ES std analyzer : %s"
                            % e)

    def create_index_mapping(self, index_name, es_mapping, fts_mapping=None):
        """
        Creates a new default index, with the given mapping
        """
        self.delete_index(index_name)

        if not fts_mapping:
            map = {"mappings": es_mapping, "settings": BLEVE.STD_ANALYZER['settings']}
        else :
            # Find the ES equivalent char_filter, token_filter and tokenizer
            es_settings = self.populate_es_settings(fts_mapping['params']
                                                    ['mapping']['analysis']['analyzers'])

            # Create an ES custom index definition
            map = {"mappings": es_mapping, "settings": es_settings['settings']}

        # Create ES index
        try:
            self.__log.info("Creating %s with mapping %s"
                            % (index_name, json.dumps(map, indent=3)))
            status, content, _ = self._http_request(
                self.__connection_url + index_name,
                'PUT',
                json.dumps(map))
            if status:
                self.__log.info("SUCCESS: ES index created with above mapping")
            else:
                raise Exception("Could not create ES index")
        except Exception as e:
            raise Exception("Could not create ES index : %s" % e)

    def populate_es_settings(self, fts_custom_analyzers_def):
        """
        Populates the custom analyzer defintion of the ES Index Definition.
        Refers to the FTS Custom Analyzers definition and creates an
            equivalent definition for each ES custom analyzer
        :param fts_custom_analyzers_def: FTS Custom Analyzer Definition
        :return:
        """

        num_custom_analyzers = len(fts_custom_analyzers_def)
        n = 1
        analyzer_map = {}
        while n <= num_custom_analyzers:
            customAnalyzerName = fts_custom_analyzers_def.keys()[n-1]
            fts_char_filters = fts_custom_analyzers_def[customAnalyzerName]["char_filters"]
            fts_tokenizer = fts_custom_analyzers_def[customAnalyzerName]["tokenizer"]
            fts_token_filters = fts_custom_analyzers_def[customAnalyzerName]["token_filters"]

            analyzer_map[customAnalyzerName] = {}
            analyzer_map[customAnalyzerName]["char_filter"] = []
            analyzer_map[customAnalyzerName]["filter"] = []
            analyzer_map[customAnalyzerName]["tokenizer"] = ""

            for fts_char_filter in fts_char_filters:
                analyzer_map[customAnalyzerName]['char_filter'].append( \
                    BLEVE.FTS_ES_ANALYZER_MAPPING['char_filters'][fts_char_filter])

            analyzer_map[customAnalyzerName]['tokenizer'] = \
                BLEVE.FTS_ES_ANALYZER_MAPPING['tokenizers'][fts_tokenizer]

            for fts_token_filter in fts_token_filters:
                analyzer_map[customAnalyzerName]['filter'].append( \
                    BLEVE.FTS_ES_ANALYZER_MAPPING['token_filters'][fts_token_filter])

            n += 1

        analyzer = BLEVE.CUSTOM_ANALYZER
        analyzer['settings']['analysis']['analyzer'] = analyzer_map
        return analyzer

    def create_alias(self, name, indexes):
        """
        @name: alias name
        @indexes: list of target indexes
        """
        try:
            self.__log.info("Checking if ES alias '{0}' exists...".format(name))
            self.delete_index(name)
            alias_info = {"actions": []}
            for index in indexes:
                alias_info['actions'].append({"add": {"index": index,
                                                      "alias": name}})
            self.__log.info("Creating ES alias '{0}' on {1}...".format(
                name,
                indexes))
            status, content, _ = self._http_request(
                self.__connection_url + "_aliases",
                'POST',
                json.dumps(alias_info))
            if status:
                self.__log.info("ES alias '{0}' created".format(name))
                self.__indices.append(name)
        except Exception as ex:
            raise Exception("Could not create ES alias : %s" % ex)

    def async_load_ES(self, index_name, gen, op_type='create'):
        """
        Asynchronously run query against FTS and ES and compare result
        note: every task runs a single query
        """

        _task = ESLoadGeneratorTask(es_instance=self,
                                    index_name=index_name,
                                    generator=gen,
                                    op_type=op_type)
        self.task_manager.schedule(_task)
        return _task

    def async_bulk_load_ES(self, index_name, gen, op_type='create', batch=5000):
        _task = ESBulkLoadGeneratorTask(es_instance=self,
                                    index_name=index_name,
                                    generator=gen,
                                    op_type=op_type,
                                    batch=batch)
        self.task_manager.schedule(_task)
        return _task

    def load_bulk_data(self, filename):
        """
        Bulk load to ES from a file
        curl -s -XPOST 172.23.105.25:9200/_bulk --data-binary @req
        cat req:
        { "index" : { "_index" : "default_es_index", "_type" : "aruna", "_id" : "1" } }
        { "field1" : "value1" , "field2" : "value2"}
        { "index" : { "_index" : "default_es_index", "_type" : "aruna", "_id" : "2" } }
        { "field1" : "value1" , "field2" : "value2"}
        """
        try:
            import os
            url = self.__connection_url + "/_bulk"
            data = open(filename, "rb").read()
            status, content, _ = self._http_request(url,
                                                    'POST',
                                                    data)
            return status
        except Exception as e:
            raise e

    def load_data(self, index_name, document_json, doc_type, doc_id):
        """
        index_name : name of index into which the doc is loaded
        document_json: json doc
        doc_type : type of doc. Usually the '_type' field in the doc body
        doc_id : document id
        """
        try:
            url = self.__connection_url + index_name + '/' + doc_type + '/' +\
                  doc_id
            status, content, _ = self._http_request(url,
                                                    'POST',
                                                    document_json)
        except Exception as e:
            raise e

    def update_index(self, index_name):
        """
        This procedure will refresh index when insert is performed .
        Need to call this API to take search in effect.
        :param index_name:
        :return:
        """
        try:
            status, content, _ = self._http_request(
                self.__connection_url + index_name +'/_refresh',
                'POST')
        except Exception as e:
            raise e

    def search(self, index_name, query, result_size=1000000):
        """
           This function will be used for search . based on the query
           :param index_name:
           :param query:
           :return: number of matches found, doc_ids and time taken
        """
        try:
            doc_ids = []
            url = self.__connection_url + index_name + '/_search?size='+ \
                  str(result_size)
            status, content, _ = self._http_request(
                url,
                'POST',
                json.dumps(query))
            if status:
                content = json.loads(content)
                for doc in content['hits']['hits']:
                    doc_ids.append(doc['_id'])
                return content['hits']['total'], doc_ids, content['took']
        except Exception as e:
            self.__log.error("Couldn't run query on ES: %s, reason : %s"
                             % (json.dumps(query), e))
            raise e

    def get_index_count(self, index_name):
        """
         Returns count of docs in the index
        """
        try:
            status, content, _ = self._http_request(
                self.__connection_url + index_name + '/_count',
                'POST')
            if status:
                return json.loads(content)['count']
        except Exception as e:
            raise e

    def get_indices(self):
        """
        Return all the indices created
        :return: List of all indices
        """
        return self.__indices
Exemple #27
0
class Cluster(object):
    """An API for interacting with Couchbase clusters"""

    def __init__(self):
        self.task_manager = TaskManager("Cluster_Thread")
        self.task_manager.start()

    def async_create_default_bucket(self, server, size, replicas=1, enable_replica_index=1, eviction_policy='valueOnly'):
        """Asynchronously creates the default bucket

        Parameters:
            server - The server to create the bucket on. (TestInputServer)
            size - The size of the bucket to be created. (int)
            replicas - The number of replicas for this bucket. (int)

        Returns:
            BucketCreateTask - A task future that is a handle to the scheduled task."""

        _task = BucketCreateTask(server, 'default', replicas, size,
                                 enable_replica_index=enable_replica_index, eviction_policy=eviction_policy)
        self.task_manager.schedule(_task)
        return _task

    def async_create_sasl_bucket(self, server, name, password, size, replicas, enable_replica_index=1, eviction_policy='valueOnly'):
        """Asynchronously creates a sasl bucket

        Parameters:
            server - The server to create the bucket on. (TestInputServer)
            name - The name of the bucket to be created. (String)
            password - The password for this bucket. (String)
            replicas - The number of replicas for this bucket. (int)
            size - The size of the bucket to be created. (int)

        Returns:
            BucketCreateTask - A task future that is a handle to the scheduled task."""
        _task = BucketCreateTask(server, name, replicas, size, password=password,
                                 enable_replica_index=enable_replica_index, eviction_policy=eviction_policy)
        self.task_manager.schedule(_task)
        return _task

    def async_create_standard_bucket(self, server, name, port, size, replicas, enable_replica_index=1, eviction_policy='valueOnly'):
        """Asynchronously creates a standard bucket

        Parameters:
            server - The server to create the bucket on. (TestInputServer)
            name - The name of the bucket to be created. (String)
            port - The port to create this bucket on. (String)
            replicas - The number of replicas for this bucket. (int)
            size - The size of the bucket to be created. (int)

        Returns:
            BucketCreateTask - A task future that is a handle to the scheduled task."""
        _task = BucketCreateTask(server, name, replicas, size, port,
                                 enable_replica_index=enable_replica_index, eviction_policy=eviction_policy)
        self.task_manager.schedule(_task)
        return _task

    def async_create_memcached_bucket(self, server, name, port, size, replicas):
        """Asynchronously creates a standard bucket

        Parameters:
            server - The server to create the bucket on. (TestInputServer)
            name - The name of the bucket to be created. (String)
            port - The port to create this bucket on. (String)
            replicas - The number of replicas for this bucket. (int)
            size - The size of the bucket to be created. (int)

        Returns:
            BucketCreateTask - A task future that is a handle to the scheduled task."""
        _task = BucketCreateTask(server, name, replicas, size, port, bucket_type="memcached")
        self.task_manager.schedule(_task)
        return _task

    def async_bucket_delete(self, server, bucket='default'):
        """Asynchronously deletes a bucket

        Parameters:
            server - The server to delete the bucket on. (TestInputServer)
            bucket - The name of the bucket to be deleted. (String)

        Returns:
            BucketDeleteTask - A task future that is a handle to the scheduled task."""
        _task = BucketDeleteTask(server, bucket)
        self.task_manager.schedule(_task)
        return _task

    def async_init_node(self, server, disabled_consistent_view=None,
                        rebalanceIndexWaitingDisabled=None, rebalanceIndexPausingDisabled=None,
                        maxParallelIndexers=None, maxParallelReplicaIndexers=None, port=None,
                        quota_percent=None):
        """Asynchronously initializes a node

        The task scheduled will initialize a nodes username and password and will establish
        the nodes memory quota to be 2/3 of the available system memory.

        Parameters:
            server - The server to initialize. (TestInputServer)
            disabled_consistent_view - disable consistent view
            rebalanceIndexWaitingDisabled - index waiting during rebalance(Boolean)
            rebalanceIndexPausingDisabled - index pausing during rebalance(Boolean)
            maxParallelIndexers - max parallel indexers threads(Int)
            maxParallelReplicaIndexers - max parallel replica indexers threads(int)
            port - port to initialize cluster
            quota_percent - percent of memory to initialize
        Returns:
            NodeInitTask - A task future that is a handle to the scheduled task."""
        _task = NodeInitializeTask(server, disabled_consistent_view, rebalanceIndexWaitingDisabled,
                          rebalanceIndexPausingDisabled, maxParallelIndexers, maxParallelReplicaIndexers,
                          port, quota_percent)
        self.task_manager.schedule(_task)
        return _task

    def async_load_gen_docs(self, server, bucket, generator, kv_store, op_type, exp=0, flag=0, only_store_hash=True,
                            batch_size=1, pause_secs=1, timeout_secs=5, proxy_client=None):
        if batch_size > 1:
            _task = BatchedLoadDocumentsTask(server, bucket, generator, kv_store, op_type, exp, flag, only_store_hash, batch_size, pause_secs, timeout_secs)
        else:
            if isinstance(generator, list):
                _task = LoadDocumentsGeneratorsTask(server, bucket, generator, kv_store, op_type, exp, flag, only_store_hash)
            else:
                _task = LoadDocumentsTask(server, bucket, generator, kv_store, op_type, exp, flag, only_store_hash, proxy_client)
        self.task_manager.schedule(_task)
        return _task

    def async_workload(self, server, bucket, kv_store, num_ops, create, read, update,
                       delete, exp):
        _task = WorkloadTask(server, bucket, kv_store, num_ops, create, read, update,
                             delete, exp)
        self.task_manager.schedule(_task)
        return _task

    def async_verify_data(self, server, bucket, kv_store, max_verify=None,
                          only_store_hash=True, batch_size=1, replica_to_read=None, timeout_sec=5):
        if batch_size > 1:
            _task = BatchedValidateDataTask(server, bucket, kv_store, max_verify, only_store_hash, batch_size, timeout_sec)
        else:
            _task = ValidateDataTask(server, bucket, kv_store, max_verify, only_store_hash, replica_to_read)
        self.task_manager.schedule(_task)
        return _task

    def async_verify_revid(self, src_server, dest_server, bucket, kv_store, ops_perf):
        _task = VerifyRevIdTask(src_server, dest_server, bucket, kv_store, ops_perf)
        self.task_manager.schedule(_task)
        return _task

    def async_rebalance(self, servers, to_add, to_remove, use_hostnames=False):
        """Asyncronously rebalances a cluster

        Parameters:
            servers - All servers participating in the rebalance ([TestInputServers])
            to_add - All servers being added to the cluster ([TestInputServers])
            to_remove - All servers being removed from the cluster ([TestInputServers])
            use_hostnames - True if nodes should be added using hostnames (Boolean)

        Returns:
            RebalanceTask - A task future that is a handle to the scheduled task"""
        _task = RebalanceTask(servers, to_add, to_remove, use_hostnames=use_hostnames)
        self.task_manager.schedule(_task)
        return _task

    def async_wait_for_stats(self, servers, bucket, param, stat, comparison, value):
        """Asynchronously wait for stats

        Waits for stats to match the criteria passed by the stats variable. See
        couchbase.stats_tool.StatsCommon.build_stat_check(...) for a description of
        the stats structure and how it can be built.

        Parameters:
            servers - The servers to get stats from. Specifying multiple servers will
                cause the result from each server to be added together before
                comparing. ([TestInputServer])
            bucket - The name of the bucket (String)
            param - The stats parameter to use. (String)
            stat - The stat that we want to get the value from. (String)
            comparison - How to compare the stat result to the value specified.
            value - The value to compare to.

        Returns:
            RebalanceTask - A task future that is a handle to the scheduled task"""
        _task = StatsWaitTask(servers, bucket, param, stat, comparison, value)
        self.task_manager.schedule(_task)
        return _task

    def create_default_bucket(self, server, size, replicas=1, timeout=600,
                              enable_replica_index=1, eviction_policy='valueOnly'):
        """Synchronously creates the default bucket

        Parameters:
            server - The server to create the bucket on. (TestInputServer)
            size - The size of the bucket to be created. (int)
            replicas - The number of replicas for this bucket. (int)

        Returns:
            boolean - Whether or not the bucket was created."""

        _task = self.async_create_default_bucket(server, size, replicas,
                                                 enable_replica_index=enable_replica_index, eviction_policy=eviction_policy)
        return _task.result(timeout)

    def create_sasl_bucket(self, server, name, password, size, replicas, timeout=None):
        """Synchronously creates a sasl bucket

        Parameters:
            server - The server to create the bucket on. (TestInputServer)
            name - The name of the bucket to be created. (String)
            password - The password for this bucket. (String)
            replicas - The number of replicas for this bucket. (int)
            size - The size of the bucket to be created. (int)

        Returns:
            boolean - Whether or not the bucket was created."""
        _task = self.async_create_sasl_bucket(server, name, password, replicas, size)
        self.task_manager.schedule(_task)
        return _task.result(timeout)

    def create_standard_bucket(self, server, name, port, size, replicas, timeout=None):
        """Synchronously creates a standard bucket

        Parameters:
            server - The server to create the bucket on. (TestInputServer)
            name - The name of the bucket to be created. (String)
            port - The port to create this bucket on. (String)
            replicas - The number of replicas for this bucket. (int)
            size - The size of the bucket to be created. (int)

        Returns:
            boolean - Whether or not the bucket was created."""
        _task = self.async_create_standard_bucket(server, name, port, size, replicas)
        return _task.result(timeout)

    def bucket_delete(self, server, bucket='default', timeout=None):
        """Synchronously deletes a bucket

        Parameters:
            server - The server to delete the bucket on. (TestInputServer)
            bucket - The name of the bucket to be deleted. (String)

        Returns:
            boolean - Whether or not the bucket was deleted."""
        _task = self.async_bucket_delete(server, bucket)
        return _task.result(timeout)

    def init_node(self, server, async_init_node=True, disabled_consistent_view=None):
        """Synchronously initializes a node

        The task scheduled will initialize a nodes username and password and will establish
        the nodes memory quota to be 2/3 of the available system memory.

        Parameters:
            server - The server to initialize. (TestInputServer)
            disabled_consistent_view - disable consistent view

        Returns:
            boolean - Whether or not the node was properly initialized."""
        _task = self.async_init_node(server, async_init_node, disabled_consistent_view)
        return _task.result()

    def rebalance(self, servers, to_add, to_remove, timeout=None, use_hostnames=False):
        """Syncronously rebalances a cluster

        Parameters:
            servers - All servers participating in the rebalance ([TestInputServers])
            to_add - All servers being added to the cluster ([TestInputServers])
            to_remove - All servers being removed from the cluster ([TestInputServers])
            use_hostnames - True if nodes should be added using their hostnames (Boolean)

        Returns:
            boolean - Whether or not the rebalance was successful"""
        _task = self.async_rebalance(servers, to_add, to_remove, use_hostnames)
        return _task.result(timeout)

    def load_gen_docs(self, server, bucket, generator, kv_store, op_type, exp=0, timeout=None,
                      flag=0, only_store_hash=True, batch_size=1, proxy_client=None):
        _task = self.async_load_gen_docs(server, bucket, generator, kv_store, op_type, exp, flag,
                                         only_store_hash=only_store_hash, batch_size=batch_size, proxy_client=proxy_client)
        return _task.result(timeout)

    def workload(self, server, bucket, kv_store, num_ops, create, read, update, delete, exp, timeout=None):
        _task = self.async_workload(server, bucket, kv_store, num_ops, create, read, update,
                                    delete, exp)
        return _task.result(timeout)

    def verify_data(self, server, bucket, kv_store, timeout=None):
        _task = self.async_verify_data(server, bucket, kv_store)
        return _task.result(timeout)

    def wait_for_stats(self, servers, bucket, param, stat, comparison, value, timeout=None):
        """Synchronously wait for stats

        Waits for stats to match the criteria passed by the stats variable. See
        couchbase.stats_tool.StatsCommon.build_stat_check(...) for a description of
        the stats structure and how it can be built.

        Parameters:
            servers - The servers to get stats from. Specifying multiple servers will
                cause the result from each server to be added together before
                comparing. ([TestInputServer])
            bucket - The name of the bucket (String)
            param - The stats parameter to use. (String)
            stat - The stat that we want to get the value from. (String)
            comparison - How to compare the stat result to the value specified.
            value - The value to compare to.

        Returns:
            boolean - Whether or not the correct stats state was seen"""
        _task = self.async_wait_for_stats(servers, bucket, param, stat, comparison, value)
        return _task.result(timeout)

    def shutdown(self, force=False):
        self.task_manager.shutdown(force)

    def async_create_view(self, server, design_doc_name, view, bucket="default", with_query=True,
                          check_replication=False, ddoc_options=None):
        """Asynchronously creates a views in a design doc

        Parameters:
            server - The server to handle create view task. (TestInputServer)
            design_doc_name - Design doc to be created or updated with view(s) being created (String)
            view - The view being created (document.View)
            bucket - The name of the bucket containing items for this view. (String) or (Bucket)
            with_query - Wait indexing to get view query results after creation
            check_replication - Should the test check replication or not (Boolean)
            ddoc_options - DDoc options to define automatic index building (minUpdateChanges, updateInterval ...) (Dict)
        Returns:
            ViewCreateTask - A task future that is a handle to the scheduled task."""
        _task = ViewCreateTask(server, design_doc_name, view, bucket, with_query, check_replication, ddoc_options)
        self.task_manager.schedule(_task)
        return _task

    def create_view(self, server, design_doc_name, view, bucket="default", timeout=None, with_query=True, check_replication=False):
        """Synchronously creates a views in a design doc

        Parameters:
            server - The server to handle create view task. (TestInputServer)
            design_doc_name - Design doc to be created or updated with view(s) being created (String)
            view - The view being created (document.View)
            bucket - The name of the bucket containing items for this view. (String) or (Bucket)
            with_query - Wait indexing to get view query results after creation

        Returns:
            string - revision number of design doc."""
        _task = self.async_create_view(server, design_doc_name, view, bucket, with_query, check_replication)
        return _task.result(timeout)

    def async_delete_view(self, server, design_doc_name, view, bucket="default"):
        """Asynchronously deletes a views in a design doc

        Parameters:
            server - The server to handle delete view task. (TestInputServer)
            design_doc_name - Design doc to be deleted or updated with view(s) being deleted (String)
            view - The view being deleted (document.View)
            bucket - The name of the bucket containing items for this view. (String) or (Bucket)

        Returns:
            ViewDeleteTask - A task future that is a handle to the scheduled task."""
        _task = ViewDeleteTask(server, design_doc_name, view, bucket)
        self.task_manager.schedule(_task)
        return _task

    def delete_view(self, server, design_doc_name, view, bucket="default", timeout=None):
        """Synchronously deletes a views in a design doc

        Parameters:
            server - The server to handle delete view task. (TestInputServer)
            design_doc_name - Design doc to be deleted or updated with view(s) being deleted (String)
            view - The view being deleted (document.View)
            bucket - The name of the bucket containing items for this view. (String) or (Bucket)

        Returns:
            boolean - Whether or not delete view was successful."""
        _task = self.async_delete_view(server, design_doc_name, view, bucket)
        return _task.result(timeout)


    def async_query_view(self, server, design_doc_name, view_name, query,
                         expected_rows=None, bucket="default", retry_time=2):
        """Asynchronously query a views in a design doc

        Parameters:
            server - The server to handle query view task. (TestInputServer)
            design_doc_name - Design doc with view(s) being queried(String)
            view_name - The view being queried (String)
            expected_rows - The number of rows expected to be returned from the query (int)
            bucket - The name of the bucket containing items for this view. (String)
            retry_time - The time in seconds to wait before retrying failed queries (int)

        Returns:
            ViewQueryTask - A task future that is a handle to the scheduled task."""
        _task = ViewQueryTask(server, design_doc_name, view_name, query, expected_rows, bucket, retry_time)
        self.task_manager.schedule(_task)
        return _task

    def query_view(self, server, design_doc_name, view_name, query,
                   expected_rows=None, bucket="default", retry_time=2, timeout=None):
        """Synchronously query a views in a design doc

        Parameters:
            server - The server to handle query view task. (TestInputServer)
            design_doc_name - Design doc with view(s) being queried(String)
            view_name - The view being queried (String)
            expected_rows - The number of rows expected to be returned from the query (int)
            bucket - The name of the bucket containing items for this view. (String)
            retry_time - The time in seconds to wait before retrying failed queries (int)

        Returns:
            ViewQueryTask - A task future that is a handle to the scheduled task."""
        _task = self.async_query_view(server, design_doc_name, view_name, query, expected_rows, bucket, retry_time)
        return _task.result(timeout)


    def modify_fragmentation_config(self, server, config, bucket="default", timeout=None):
        """Synchronously modify fragmentation configuration spec

        Parameters:
            server - The server to handle fragmentation config task. (TestInputServer)
            config - New compaction configuration (dict - see task)
            bucket - The name of the bucket fragementation config applies to. (String)

        Returns:
            boolean - True if config values accepted."""

        _task = ModifyFragmentationConfigTask(server, config, bucket)
        self.task_manager.schedule(_task)
        return _task.result(timeout)

    def async_monitor_active_task(self, servers,
                                  type_task,
                                  target_value,
                                  wait_progress=100,
                                  num_iteration=100,
                                  wait_task=True):
        """Asynchronously monitor active task.

           When active task reached wait_progress this method  will return.

        Parameters:
            servers - list of servers or The server to handle fragmentation config task. (TestInputServer)
            type_task - task type('indexer' , 'bucket_compaction', 'view_compaction' ) (String)
            target_value - target value (for example "_design/ddoc" for indexing, bucket "default"
                for bucket_compaction or "_design/dev_view" for view_compaction) (String)
            wait_progress - expected progress (int)
            num_iteration - failed test if progress is not changed during num iterations(int)
            wait_task - expect to find task in the first attempt(bool)

        Returns:
            list of MonitorActiveTask - A task future that is a handle to the scheduled task."""
        _tasks = []
        if type(servers) != types.ListType:
            servers = [servers, ]
        for server in servers:
            _task = MonitorActiveTask(server, type_task, target_value, wait_progress, num_iteration, wait_task)
            self.task_manager.schedule(_task)
            _tasks.append(_task)
        return _tasks

    def async_monitor_view_fragmentation(self, server,
                                         design_doc_name,
                                         fragmentation_value,
                                         bucket="default"):
        """Asynchronously monitor view fragmentation.

           When <fragmentation_value> is reached on the
           index file for <design_doc_name> this method
           will return.

        Parameters:
            server - The server to handle fragmentation config task. (TestInputServer)
            design_doc_name - design doc with views represented in index file. (String)
            fragmentation_value - target amount of fragmentation within index file to detect. (String)
            bucket - The name of the bucket design_doc belongs to. (String)

        Returns:
            MonitorViewFragmentationTask - A task future that is a handle to the scheduled task."""

        _task = MonitorViewFragmentationTask(server, design_doc_name,
                                             fragmentation_value, bucket)
        self.task_manager.schedule(_task)
        return _task

    def async_generate_expected_view_results(self, doc_generators, view, query, type_query="view"):
        """Asynchronously generate expected view query results

        Parameters:
            doc_generators - Generators used for loading docs (DocumentGenerator[])
            view - The view with map function (View)
            query - Query params to filter docs from the generator. (dict)
            type_query - type of query: "view" or "all_doc" (String)

        Returns:
            GenerateExpectedViewResultsTask - A task future that is a handle to the scheduled task."""

        _task = GenerateExpectedViewResultsTask(doc_generators, view, query, type_query)
        self.task_manager.schedule(_task)
        return _task

    def generate_expected_view_query_results(self, doc_generators, view, query, timeout=None, type_query='view'):
        """Synchronously generate expected view query results

        Parameters:
            doc_generators - Generators used for loading docs (DocumentGenerator[])
            view - The view with map function (View)
            query - Query params to filter docs from the generator. (dict)

        Returns:
            list - A list of rows expected to be returned for given query"""

        _task = self.async_generate_expected_view_results(doc_generators, view, query, type_query)
        return _task.result(timeout)

    def async_monitor_view_query(self, servers, design_doc_name, view_name,
                                 query, expected_docs=None, bucket="default",
                                 retries=100, error=None, verify_rows=False,
                                 server_to_query=0, type_query="view"):
        """
        Asynchronously monitor view query results:
        waits for expected rows length match with returned rows length

        Parameters:
            servers - servers to be checked (List of TestInputServer)
            design_doc_name - name of ddoc to query (String)
            view_name - name of view to query (String)
            query - query params (dict)
            expected_docs - expected emitted rows(list)
            bucket - bucket which contains ddoc (String or Bucket)
            retries - how much times it will try to get correct result
            error - for negative tests, expected error raised by query results (String)
            verify_rows - verify values of returned results
            server_to_query - index of server to query (int)
            type_query - "view" or "all_doc" (String)
        """
        _task = MonitorViewQueryResultsTask(servers, design_doc_name, view_name,
                 query, expected_docs, bucket, retries, error, verify_rows, server_to_query,
                 type_query)
        self.task_manager.schedule(_task)
        return _task

    def async_view_query_verification(self, design_doc_name, view_name, query, expected_rows, num_verified_docs=20, bucket="default", query_timeout=20,
                                      results=None, server=None):
        """Asynchronously query a views in a design doc and does full verification of results

        Parameters:
            server - The server to handle query verification task. (TestInputServer)
            design_doc_name - Design doc with view(s) being queried(String)
            view_name - The view being queried (String)
            query - Query params being used with the query. (dict)
            expected_rows - The number of rows expected to be returned from the query (int)
            num_verified_docs - The number of docs to verify that require memcached gets (int)
            bucket - The name of the bucket containing items for this view. (String)
            query_timeout - The time to allow a query with stale=false to run. (int)
            retry_time - The time in seconds to wait before retrying failed queries (int)
            results - already gotten results to check, if None task will newly get results(dict)

        Returns:
            ViewQueryVerificationTask - A task future that is a handle to the scheduled task."""
        _task = ViewQueryVerificationTask(design_doc_name, view_name, query, expected_rows, server, num_verified_docs, bucket, query_timeout, results=results)
        self.task_manager.schedule(_task)
        return _task

    def view_query_verification(self, server, design_doc_name, view_name, query,
                                expected_rows, num_verified_docs=20,
                                bucket="default", query_timeout=20, timeout=None,
                                results=None):
        """Synchronously query a views in a design doc and does full verification of results

        Parameters:
            server - The server to handle query verification task. (TestInputServer)
            design_doc_name - Design doc with view(s) being queried(String)
            view_name - The view being queried (String)
            query - Query params being used with the query. (dict)
            expected_rows - The number of rows expected to be returned from the query (int)
            num_verified_docs - The number of docs to verify that require memcached gets (int)
            bucket - The name of the bucket containing items for this view. (String)
            query_timeout - The time to allow a query with stale=false to run. (int)
            retry_time - The time in seconds to wait before retrying failed queries (int)
            results - already gotten results to check, if None task will newly get results(dict)

        Returns:
            dict - An object with keys: passed = True or False
                                        errors = reasons why verification failed """
        _task = self.async_view_query_verification(server, design_doc_name, view_name, query, expected_rows, num_verified_docs, bucket, query_timeout, results)
        return _task.result(timeout)


    def monitor_view_fragmentation(self, server,
                                   design_doc_name,
                                   fragmentation_value,
                                   bucket="default",
                                   timeout=None):
        """Synchronously monitor view fragmentation.

           When <fragmentation_value> is reached on the
           index file for <design_doc_name> this method
           will return.

        Parameters:
            server - The server to handle fragmentation config task. (TestInputServer)
            design_doc_name - design doc with views represented in index file. (String)
            fragmentation_value - target amount of fragmentation within index file to detect. (String)
            bucket - The name of the bucket design_doc belongs to. (String)

        Returns:
            boolean - True if <fragmentation_value> reached"""

        _task = self.async_monitor_view_fragmentation(server, design_doc_name,
                                                      fragmentation_value,
                                                      bucket)
        self.task_manager.schedule(_task)
        return _task.result(timeout)

    def async_compact_view(self, server, design_doc_name, bucket="default", with_rebalance=False):
        """Asynchronously run view compaction.

        Compacts index file represented by views within the specified <design_doc_name>

        Parameters:
            server - The server to handle fragmentation config task. (TestInputServer)
            design_doc_name - design doc with views represented in index file. (String)
            bucket - The name of the bucket design_doc belongs to. (String)
            with_rebalance - there are two cases that process this parameter:
                "Error occured reading set_view _info" will be ignored if True
                (This applies to rebalance in case),
                and with concurrent updates(for instance, with rebalance)
                it's possible that compaction value has not changed significantly

        Returns:
            ViewCompactionTask - A task future that is a handle to the scheduled task."""


        _task = ViewCompactionTask(server, design_doc_name, bucket, with_rebalance)
        self.task_manager.schedule(_task)
        return _task

    def compact_view(self, server, design_doc_name, bucket="default", timeout=None, with_rebalance=False):
        """Synchronously run view compaction.

        Compacts index file represented by views within the specified <design_doc_name>

        Parameters:
            server - The server to handle fragmentation config task. (TestInputServer)
            design_doc_name - design doc with views represented in index file. (String)
            bucket - The name of the bucket design_doc belongs to. (String)
            with_rebalance - "Error occured reading set_view _info" will be ignored if True
                and with concurrent updates(for instance, with rebalance)
                it's possible that compaction value has not changed significantly

        Returns:
            boolean - True file size reduced after compaction, False if successful but no work done """

        _task = self.async_compact_view(server, design_doc_name, bucket, with_rebalance)
        return _task.result(timeout)

    def async_failover(self, servers, to_failover):
        """Asyncronously fails over nodes

        Parameters:
            servers - All servers participating in the failover ([TestInputServers])
            to_failover - All servers being failed over ([TestInputServers])

        Returns:
            FailoverTask - A task future that is a handle to the scheduled task"""
        _task = FailoverTask(servers, to_failover)
        self.task_manager.schedule(_task)
        return _task

    def failover(self, servers, to_failover, timeout=None):
        """Syncronously fails over nodes

        Parameters:
            servers - All servers participating in the failover ([TestInputServers])
            to_failover - All servers being failed over ([TestInputServers])

        Returns:
            boolean - Whether or not the failover was successful"""
        _task = self.async_failover(servers, to_failover)
        return _task.result(timeout)

    def async_bucket_flush(self, server, bucket='default'):
        """Asynchronously flushes a bucket

        Parameters:
            server - The server to flush the bucket on. (TestInputServer)
            bucket - The name of the bucket to be flushed. (String)

        Returns:
            BucketFlushTask - A task future that is a handle to the scheduled task."""
        _task = BucketFlushTask(server, bucket)
        self.task_manager.schedule(_task)
        return _task

    def bucket_flush(self, server, bucket='default', timeout=None):
        """Synchronously flushes a bucket

        Parameters:
            server - The server to flush the bucket on. (TestInputServer)
            bucket - The name of the bucket to be flushed. (String)

        Returns:
            boolean - Whether or not the bucket was flushed."""
        _task = self.async_bucket_flush(server, bucket)
        return _task.result(timeout)

    def async_monitor_db_fragmentation(self, server, fragmentation, bucket):
        """Asyncronously monitor db fragmentation

        Parameters:
            servers - server to check(TestInputServers)
            bucket - bucket to check
            fragmentation - fragmentation to reach

        Returns:
            MonitorDBFragmentationTask - A task future that is a handle to the scheduled task"""
        _task = MonitorDBFragmentationTask(server, fragmentation, bucket)
        self.task_manager.schedule(_task)
        return _task

    def cbrecovery(self, src_server, dest_server, bucket_src='', bucket_dest='', username='', password='',
                 username_dest='', password_dest='', verbose=False, wait_completed=True):
        """Synchronously run and monitor cbrecovery

        Parameters:
            src_server - source cluster to restore data from(TestInputServers)
            dest_server - destination cluster to restore data to(TestInputServers)
            bucket_src - source bucket to recover from
            bucket_dest - destination bucket to recover to
            username - REST username for source cluster
            password - REST password for source cluster
            username_dest - REST username for destination cluster or server node
            password_dest - REST password for destination cluster or server node
            verbose - verbose logging; more -v's provide more verbosity
            wait_completed - wait for the end of the cbrecovery

        Returns:
            boolean - Whether or not the cbrecovery completed successfully"""
        _task = self.async_cbrecovery(server, src_server, dest_server, bucket_src, bucket_dest, username, password,
                 username_dest, password_dest, verbose, wait_completed)
        return _task.result(timeout)

    def async_cbrecovery(self, src_server, dest_server, bucket_src='', bucket_dest='', username='', password='',
                 username_dest='', password_dest='', verbose=False, wait_completed=True):
        """Asyncronously run/monitor cbrecovery

        Parameters:
            src_server - source cluster to restore data from(TestInputServers)
            dest_server - destination cluster to restore data to(TestInputServers)
            bucket_src - source bucket to recover from
            bucket_dest - destination bucket to recover to
            username - REST username for source cluster
            password - REST password for source cluster
            username_dest - REST username for destination cluster or server node
            password_dest - REST password for destination cluster or server node
            verbose - verbose logging; more -v's provide more verbosity
            wait_completed - wait for the end of the cbrecovery

        Returns:
            CBRecoveryTask - A task future that is a handle to the scheduled task"""
        _task = CBRecoveryTask(src_server, dest_server, bucket_src, bucket_dest, username, password,
                 username_dest, password_dest, verbose, wait_completed)
        self.task_manager.schedule(_task)
        return _task
Exemple #28
0
class Cluster(object):
    """An API for interacting with Couchbase clusters"""

    def __init__(self):
        self.task_manager = TaskManager()
        self.task_manager.start()

    def async_create_default_bucket(self, server, size, replicas=1):
        """Asynchronously creates the default bucket

        Parameters:
            server - The server to create the bucket on. (TestInputServer)
            size - The size of the bucket to be created. (int)
            replicas - The number of replicas for this bucket. (int)

        Returns:
            BucketCreateTask - A task future that is a handle to the scheduled task."""
        _task = BucketCreateTask(server, 'default', replicas, size)
        self.task_manager.schedule(_task)
        return _task

    def async_create_sasl_bucket(self, server, name, password, size, replicas):
        """Asynchronously creates a sasl bucket

        Parameters:
            server - The server to create the bucket on. (TestInputServer)
            name - The name of the bucket to be created. (String)
            password - The password for this bucket. (String)
            replicas - The number of replicas for this bucket. (int)
            size - The size of the bucket to be created. (int)

        Returns:
            BucketCreateTask - A task future that is a handle to the scheduled task."""
        _task = BucketCreateTask(server, name, replicas, size, password=password)
        self.task_manager.schedule(_task)
        return _task

    def async_create_standard_bucket(self, server, name, port, size, replicas):
        """Asynchronously creates a standard bucket

        Parameters:
            server - The server to create the bucket on. (TestInputServer)
            name - The name of the bucket to be created. (String)
            port - The port to create this bucket on. (String)
            replicas - The number of replicas for this bucket. (int)
            size - The size of the bucket to be created. (int)

        Returns:
            BucketCreateTask - A task future that is a handle to the scheduled task."""
        _task = BucketCreateTask(server, name, replicas, size, port)
        self.task_manager.schedule(_task)
        return _task

    def async_bucket_delete(self, server, bucket='default'):
        """Asynchronously deletes a bucket

        Parameters:
            server - The server to delete the bucket on. (TestInputServer)
            bucket - The name of the bucket to be deleted. (String)

        Returns:
            BucketDeleteTask - A task future that is a handle to the scheduled task."""
        _task = BucketDeleteTask(server, bucket)
        self.task_manager.schedule(_task)
        return _task

    def async_init_node(self, server):
        """Asynchronously initializes a node

        The task scheduled will initialize a nodes username and password and will establish
        the nodes memory quota to be 2/3 of the available system memory.

        Parameters:
            server - The server to initialize. (TestInputServer)

        Returns:
            NodeInitTask - A task future that is a handle to the scheduled task."""
        _task = NodeInitializeTask(server)
        self.task_manager.schedule(_task)
        return _task

    def async_load_gen_docs(self, server, bucket, generator, kv_store, op_type, exp = 0):
        _task = LoadDocumentsTask(server, bucket, generator, kv_store, op_type, exp)
        self.task_manager.schedule(_task)
        return _task

    def async_workload(self, server, bucket, kv_store, num_ops, create, read, update,
                       delete, exp):
        _task = WorkloadTask(server, bucket, kv_store, num_ops, create, read, update,
                             delete, exp)
        self.task_manager.schedule(_task)
        return _task

    def async_verify_data(self, server, bucket, kv_store):
        _task = ValidateDataTask(server, bucket, kv_store)
        self.task_manager.schedule(_task)
        return _task

    def async_rebalance(self, servers, to_add, to_remove):
        """Asyncronously rebalances a cluster

        Parameters:
            servers - All servers participating in the rebalance ([TestInputServers])
            to_add - All servers being added to the cluster ([TestInputServers])
            to_remove - All servers being removed from the cluster ([TestInputServers])

        Returns:
            RebalanceTask - A task future that is a handle to the scheduled task"""
        _task = RebalanceTask(servers, to_add, to_remove)
        self.task_manager.schedule(_task)
        return _task

    def async_wait_for_stats(self, servers, bucket, param, stat, comparison, value):
        """Asynchronously wait for stats

        Waits for stats to match the criteria passed by the stats variable. See
        couchbase.stats_tool.StatsCommon.build_stat_check(...) for a description of
        the stats structure and how it can be built.

        Parameters:
            servers - The servers to get stats from. Specifying multiple servers will
                cause the result from each server to be added together before
                comparing. ([TestInputServer])
            bucket - The name of the bucket (String)
            param - The stats parameter to use. (String)
            stat - The stat that we want to get the value from. (String)
            comparison - How to compare the stat result to the value specified.
            value - The value to compare to.

        Returns:
            RebalanceTask - A task future that is a handle to the scheduled task"""
        _task = StatsWaitTask(servers, bucket, param, stat, comparison, value)
        self.task_manager.schedule(_task)
        return _task

    def create_default_bucket(self, server, size, replicas=1, timeout=None):
        """Synchronously creates the default bucket

        Parameters:
            server - The server to create the bucket on. (TestInputServer)
            size - The size of the bucket to be created. (int)
            replicas - The number of replicas for this bucket. (int)

        Returns:
            boolean - Whether or not the bucket was created."""
        _task = self.async_create_default_bucket(server, size, replicas)
        return _task.result(timeout)

    def create_sasl_bucket(self, server, name, password, size, replicas, timeout=None):
        """Synchronously creates a sasl bucket

        Parameters:
            server - The server to create the bucket on. (TestInputServer)
            name - The name of the bucket to be created. (String)
            password - The password for this bucket. (String)
            replicas - The number of replicas for this bucket. (int)
            size - The size of the bucket to be created. (int)

        Returns:
            boolean - Whether or not the bucket was created."""
        _task = async_create_sasl_bucket(server, name, password, replicas, size)
        self.task_manager.schedule(_task)
        return _task.result(timeout)

    def create_standard_bucket(self, server, name, port, size, replicas, timeout=None):
        """Synchronously creates a standard bucket

        Parameters:
            server - The server to create the bucket on. (TestInputServer)
            name - The name of the bucket to be created. (String)
            port - The port to create this bucket on. (String)
            replicas - The number of replicas for this bucket. (int)
            size - The size of the bucket to be created. (int)

        Returns:
            boolean - Whether or not the bucket was created."""
        _task = self.async_create_standard_bucket(server, name, port, size, replicas)
        return _task.result(timeout)

    def bucket_delete(self, server, bucket='default', timeout=None):
        """Synchronously deletes a bucket

        Parameters:
            server - The server to delete the bucket on. (TestInputServer)
            bucket - The name of the bucket to be deleted. (String)

        Returns:
            boolean - Whether or not the bucket was deleted."""
        _task = self.async_bucket_delete(server, bucket)
        return _task.result(timeout)

    def init_node(self, server):
        """Synchronously initializes a node

        The task scheduled will initialize a nodes username and password and will establish
        the nodes memory quota to be 2/3 of the available system memory.

        Parameters:
            server - The server to initialize. (TestInputServer)

        Returns:
            boolean - Whether or not the node was properly initialized."""
        _task = self.async_init_node(server)
        return _task.result()

    def rebalance(self, servers, to_add, to_remove, timeout=None):
        """Syncronously rebalances a cluster

        Parameters:
            servers - All servers participating in the rebalance ([TestInputServers])
            to_add - All servers being added to the cluster ([TestInputServers])
            to_remove - All servers being removed from the cluster ([TestInputServers])

        Returns:
            boolean - Whether or not the rebalance was successful"""
        _task = self.async_rebalance(servers, to_add, to_remove)
        return _task.result(timeout)

    def load_gen_docs(self, server, bucket, generator, kv_store, op_type, exp = 0, timeout=None):
        _task = self.async_load_gen_docs(server, bucket, generator, kv_store, op_type, exp)
        return _task.result(timeout)

    def workload(self, server, bucket, kv_store, num_ops, create, read, update, delete, exp, timeout=None):
        _task = self.async_workload(server, bucket, kv_store, num_ops, create, read, update,
                                    delete, exp)
        return _task.result(timeout)

    def verify_data(self, server, bucket, kv_store, timeout=None):
        _task = self.async_verify_data(server, bucket, kv_store)
        return _task.result(timeout)

    def wait_for_stats(self, servers, bucket, param, stat, comparison, value, timeout=None):
        """Synchronously wait for stats

        Waits for stats to match the criteria passed by the stats variable. See
        couchbase.stats_tool.StatsCommon.build_stat_check(...) for a description of
        the stats structure and how it can be built.

        Parameters:
            servers - The servers to get stats from. Specifying multiple servers will
                cause the result from each server to be added together before
                comparing. ([TestInputServer])
            bucket - The name of the bucket (String)
            param - The stats parameter to use. (String)
            stat - The stat that we want to get the value from. (String)
            comparison - How to compare the stat result to the value specified.
            value - The value to compare to.

        Returns:
            boolean - Whether or not the correct stats state was seen"""
        _task = self.async_wait_for_stats(servers, bucket, param, stat, comparison, value)
        return _task.result(timeout)

    def shutdown(self, force=False):
        self.task_manager.shutdown(force)

    def async_create_view(self, server, design_doc_name, view, bucket = "default"):
        """Asynchronously creates a views in a design doc

        Parameters:
            server - The server to handle create view task. (TestInputServer)
            design_doc_name - Design doc to be created or updated with view(s) being created (String)
            view - The view being created (document.View)
            bucket - The name of the bucket containing items for this view. (String)

        Returns:
            ViewCreateTask - A task future that is a handle to the scheduled task."""
        _task = ViewCreateTask(server, design_doc_name, view, bucket)
        self.task_manager.schedule(_task)
        return _task

    def create_view(self, server, design_doc_name, view, bucket = "default", timeout=None):
        """Synchronously creates a views in a design doc

        Parameters:
            server - The server to handle create view task. (TestInputServer)
            design_doc_name - Design doc to be created or updated with view(s) being created (String)
            view - The view being created (document.View)
            bucket - The name of the bucket containing items for this view. (String)

        Returns:
            string - revision number of design doc."""
        _task = self.async_create_view(server, design_doc_name, view, bucket)
        return _task.result(timeout)

    def async_delete_view(self, server, design_doc_name, view, bucket = "default"):
        """Asynchronously deletes a views in a design doc

        Parameters:
            server - The server to handle delete view task. (TestInputServer)
            design_doc_name - Design doc to be deleted or updated with view(s) being deleted (String)
            view - The view being deleted (document.View)
            bucket - The name of the bucket containing items for this view. (String)

        Returns:
            ViewDeleteTask - A task future that is a handle to the scheduled task."""
        _task = ViewDeleteTask(server, design_doc_name, view, bucket)
        self.task_manager.schedule(_task)
        return _task

    def delete_view(self, server, design_doc_name, view, bucket = "default", timeout=None):
        """Synchronously deletes a views in a design doc

        Parameters:
            server - The server to handle delete view task. (TestInputServer)
            design_doc_name - Design doc to be deleted or updated with view(s) being deleted (String)
            view - The view being deleted (document.View)
            bucket - The name of the bucket containing items for this view. (String)

        Returns:
            boolean - Whether or not delete view was successful."""
        _task = self.async_delete_view(server, design_doc_name, view, bucket)
        return _task.result(timeout)


    def async_query_view(self, server, design_doc_name, view_name, query,
                         expected_rows = None, bucket = "default", retry_time = 2):
        """Asynchronously query a views in a design doc

        Parameters:
            server - The server to handle query view task. (TestInputServer)
            design_doc_name - Design doc with view(s) being queried(String)
            view_name - The view being queried (String)
            expected_rows - The number of rows expected to be returned from the query (int)
            bucket - The name of the bucket containing items for this view. (String)
            retry_time - The time in seconds to wait before retrying failed queries (int)

        Returns:
            ViewQueryTask - A task future that is a handle to the scheduled task."""
        _task = ViewQueryTask(server, design_doc_name, view_name, query, expected_rows, bucket, retry_time)
        self.task_manager.schedule(_task)
        return _task

    def query_view(self, server, design_doc_name, view_name, query,
                   expected_rows = None, bucket = "default", retry_time = 2, timeout=None):
        """Synchronously query a views in a design doc

        Parameters:
            server - The server to handle query view task. (TestInputServer)
            design_doc_name - Design doc with view(s) being queried(String)
            view_name - The view being queried (String)
            expected_rows - The number of rows expected to be returned from the query (int)
            bucket - The name of the bucket containing items for this view. (String)
            retry_time - The time in seconds to wait before retrying failed queries (int)

        Returns:
            ViewQueryTask - A task future that is a handle to the scheduled task."""
        _task = self.async_query_view(server, design_doc_name, view_name, query, expected_rows, bucket, retry_time)
        return _task.result(timeout)


    def modify_fragmentation_config(self, server, config, bucket = "default", timeout=None):
        """Synchronously modify fragmentation configuration spec

        Parameters:
            server - The server to handle fragmentation config task. (TestInputServer)
            config - New compaction configuration (dict - see task)
            bucket - The name of the bucket fragementation config applies to. (String)

        Returns:
            boolean - True if config values accepted."""

        _task = ModifyFragmentationConfigTask(server, config, bucket)
        self.task_manager.schedule(_task)
        return _task.result(timeout)


    def async_monitor_view_fragmentation(self, server,
                                         design_doc_name,
                                         fragmentation_value,
                                         bucket = "default",
                                         timeout = None):
        """Asynchronously monitor view fragmentation.

           When <fragmentation_value> is reached on the
           index file for <design_doc_name> this method
           will return.

        Parameters:
            server - The server to handle fragmentation config task. (TestInputServer)
            design_doc_name - design doc with views represented in index file. (String)
            fragmentation_value - target amount of fragmentation within index file to detect. (String)
            bucket - The name of the bucket design_doc belongs to. (String)

        Returns:
            MonitorViewFragmentationTask - A task future that is a handle to the scheduled task."""

        _task = MonitorViewFragmentationTask(server, design_doc_name,
                                             fragmentation_value, bucket)
        self.task_manager.schedule(_task)
        return _task


    def monitor_view_fragmentation(self, server,
                                   design_doc_name,
                                   fragmentation_value,
                                   bucket = "default",
                                   timeout = None):
        """Synchronously monitor view fragmentation.

           When <fragmentation_value> is reached on the
           index file for <design_doc_name> this method
           will return.

        Parameters:
            server - The server to handle fragmentation config task. (TestInputServer)
            design_doc_name - design doc with views represented in index file. (String)
            fragmentation_value - target amount of fragmentation within index file to detect. (String)
            bucket - The name of the bucket design_doc belongs to. (String)

        Returns:
            boolean - True if <fragmentation_value> reached"""

        _task = self.async_monitor_view_fragmentation(server, design_doc_name,
                                                      fragmentation_value,
                                                      bucket)
        self.task_manager.schedule(_task)
        return _task.result(timeout)

    def async_compact_view(self, server, design_doc_name, bucket = "default"):
        """Asynchronously run view compaction.

        Compacts index file represented by views within the specified <design_doc_name>

        Parameters:
            server - The server to handle fragmentation config task. (TestInputServer)
            design_doc_name - design doc with views represented in index file. (String)
            bucket - The name of the bucket design_doc belongs to. (String)

        Returns:
            ViewCompactionTask - A task future that is a handle to the scheduled task."""


        _task = ViewCompactionTask(server, design_doc_name, bucket)
        self.task_manager.schedule(_task)
        return _task

    def compact_view(self, server, design_doc_name, bucket = "default", timeout=None):
        """Synchronously run view compaction.

        Compacts index file represented by views within the specified <design_doc_name>

        Parameters:
            server - The server to handle fragmentation config task. (TestInputServer)
            design_doc_name - design doc with views represented in index file. (String)
            bucket - The name of the bucket design_doc belongs to. (String)

        Returns:
            boolean - True file size reduced after compaction, False if successful but no work done """

        _task = self.async_compact_view(server, design_doc_name, bucket)
        return _task.result(timeout)
Exemple #29
0
class AutoFailoverBaseTest(BaseTestCase):
    MAX_FAIL_DETECT_TIME = 120
    ORCHESTRATOR_TIMEOUT_BUFFER = 60

    def setUp(self):
        super(AutoFailoverBaseTest, self).setUp()
        self._get_params()
        self.rest = RestConnection(self.orchestrator)
        self.task_manager = TaskManager("Autofailover_thread")
        self.task_manager.start()
        self.node_failure_task_manager = TaskManager(
            "Nodes_failure_detector_thread")
        self.node_failure_task_manager.start()
        self.initial_load_gen = BlobGenerator('auto-failover',
                                              'auto-failover-',
                                              self.value_size,
                                              end=self.num_items)
        self.update_load_gen = BlobGenerator('auto-failover',
                                             'auto-failover-',
                                             self.value_size,
                                             end=self.update_items)
        self.delete_load_gen = BlobGenerator('auto-failover',
                                             'auto-failover-',
                                             self.value_size,
                                             start=self.update_items,
                                             end=self.delete_items)
        self._load_all_buckets(self.servers[0], self.initial_load_gen,
                               "create", 0)
        self._async_load_all_buckets(self.orchestrator,
                                     self.update_load_gen, "update", 0)
        self._async_load_all_buckets(self.orchestrator,
                                     self.delete_load_gen, "delete", 0)
        self.server_to_fail = self._servers_to_fail()
        self.servers_to_add = self.servers[self.nodes_init:self.nodes_init +
                                                           self.nodes_in]
        self.servers_to_remove = self.servers[self.nodes_init -
                                              self.nodes_out:self.nodes_init]
        # self.node_monitor_task = self.start_node_monitors_task()

    def tearDown(self):
        self.log.info("============AutoFailoverBaseTest teardown============")
        self._get_params()
        self.task_manager = TaskManager("Autofailover_thread")
        self.task_manager.start()
        self.server_to_fail = self._servers_to_fail()
        self.start_couchbase_server()
        self.sleep(10)
        self.disable_firewall()
        self.rest = RestConnection(self.orchestrator)
        self.rest.reset_autofailover()
        self.disable_autofailover()
        self._cleanup_cluster()
        super(AutoFailoverBaseTest, self).tearDown()
        if hasattr(self, "node_monitor_task"):
            if self.node_monitor_task._exception:
                self.fail("{}".format(self.node_monitor_task._exception))
            self.node_monitor_task.stop = True
        self.task_manager.shutdown(force=True)

    def enable_autofailover(self):
        """
        Enable the autofailover setting with the given timeout.
        :return: True If the setting was set with the timeout, else return
        False
        """
        status = self.rest.update_autofailover_settings(True,
                                                        self.timeout)
        return status

    def disable_autofailover(self):
        """
        Disable the autofailover setting.
        :return: True If the setting was disabled, else return
        False
        """
        status = self.rest.update_autofailover_settings(False, 120)
        return status

    def enable_autofailover_and_validate(self):
        """
        Enable autofailover with given timeout and then validate if the
        settings.
        :return: Nothing
        """
        status = self.enable_autofailover()
        self.assertTrue(status, "Failed to enable autofailover_settings!")
        self.sleep(5)
        settings = self.rest.get_autofailover_settings()
        self.assertTrue(settings.enabled, "Failed to enable "
                                          "autofailover_settings!")
        self.assertEqual(self.timeout, settings.timeout,
                         "Incorrect timeout set. Expected timeout : {0} "
                         "Actual timeout set : {1}".format(self.timeout,
                                                           settings.timeout))

    def disable_autofailover_and_validate(self):
        """
        Disable autofailover setting and then validate if the setting was
        disabled.
        :return: Nothing
        """
        status = self.disable_autofailover()
        self.assertTrue(status, "Failed to change autofailover_settings!")
        settings = self.rest.get_autofailover_settings()
        self.assertFalse(settings.enabled, "Failed to disable "
                                           "autofailover_settings!")

    def start_node_monitors_task(self):
        """
        Start the node monitors task to analyze the node status monitors.
        :return: The NodeMonitorAnalyserTask.
        """
        node_monitor_task = NodeMonitorsAnalyserTask(self.orchestrator)
        self.task_manager.schedule(node_monitor_task, sleep_time=5)
        return node_monitor_task

    def enable_firewall(self):
        """
        Enable firewall on the nodes to fail in the tests.
        :return: Nothing
        """
        node_down_timer_tasks = []
        for node in self.server_to_fail:
            node_failure_timer_task = NodeDownTimerTask(node.ip)
            node_down_timer_tasks.append(node_failure_timer_task)
        task = AutoFailoverNodesFailureTask(self.orchestrator,
                                            self.server_to_fail,
                                            "enable_firewall", self.timeout,
                                            self.pause_between_failover_action,
                                            self.failover_expected,
                                            self.timeout_buffer,
                                            failure_timers=node_down_timer_tasks)
        for node_down_timer_task in node_down_timer_tasks:
            self.node_failure_task_manager.schedule(node_down_timer_task, 2)
        self.task_manager.schedule(task)
        try:
            task.result()
        except Exception, e:
            self.fail("Exception: {}".format(e))
Exemple #30
0
 def __init__(self):
     self.task_manager = TaskManager()
     self.task_manager.start()
Exemple #31
0
 def __init__(self):
     self.task_manager = TaskManager("Cluster_Thread")
     self.task_manager.start()
Exemple #32
0
class Cluster(object):
    """An API for interacting with Couchbase clusters"""
    def __init__(self):
        self.task_manager = TaskManager()
        self.task_manager.start()

    def async_create_default_bucket(self, server, size, replicas=1):
        """Asynchronously creates the default bucket

        Parameters:
            server - The server to create the bucket on. (TestInputServer)
            size - The size of the bucket to be created. (int)
            replicas - The number of replicas for this bucket. (int)

        Returns:
            BucketCreateTask - A task future that is a handle to the scheduled task."""
        _task = BucketCreateTask(server, 'default', replicas, size)
        self.task_manager.schedule(_task)
        return _task

    def async_create_sasl_bucket(self, server, name, password, size, replicas):
        """Asynchronously creates a sasl bucket

        Parameters:
            server - The server to create the bucket on. (TestInputServer)
            name - The name of the bucket to be created. (String)
            password - The password for this bucket. (String)
            replicas - The number of replicas for this bucket. (int)
            size - The size of the bucket to be created. (int)

        Returns:
            BucketCreateTask - A task future that is a handle to the scheduled task."""
        _task = BucketCreateTask(server,
                                 name,
                                 replicas,
                                 size,
                                 password=password)
        self.task_manager.schedule(_task)
        return _task

    def async_create_standard_bucket(self, server, name, port, size, replicas):
        """Asynchronously creates a standard bucket

        Parameters:
            server - The server to create the bucket on. (TestInputServer)
            name - The name of the bucket to be created. (String)
            port - The port to create this bucket on. (String)
            replicas - The number of replicas for this bucket. (int)
            size - The size of the bucket to be created. (int)

        Returns:
            BucketCreateTask - A task future that is a handle to the scheduled task."""
        _task = BucketCreateTask(server, name, replicas, size, port)
        self.task_manager.schedule(_task)
        return _task

    def async_bucket_delete(self, server, bucket='default'):
        """Asynchronously deletes a bucket

        Parameters:
            server - The server to delete the bucket on. (TestInputServer)
            bucket - The name of the bucket to be deleted. (String)

        Returns:
            BucketDeleteTask - A task future that is a handle to the scheduled task."""
        _task = BucketDeleteTask(server, bucket)
        self.task_manager.schedule(_task)
        return _task

    def async_init_node(self, server, disabled_consistent_view=None):
        """Asynchronously initializes a node

        The task scheduled will initialize a nodes username and password and will establish
        the nodes memory quota to be 2/3 of the available system memory.

        Parameters:
            server - The server to initialize. (TestInputServer)
            disabled_consistent_view - disable consistent view

        Returns:
            NodeInitTask - A task future that is a handle to the scheduled task."""
        _task = NodeInitializeTask(server, disabled_consistent_view)
        self.task_manager.schedule(_task)
        return _task

    def async_load_gen_docs(self,
                            server,
                            bucket,
                            generator,
                            kv_store,
                            op_type,
                            exp=0,
                            flag=0,
                            only_store_hash=True,
                            batch_size=1,
                            pause_secs=1,
                            timeout_secs=5):
        if batch_size > 1:
            _task = BatchedLoadDocumentsTask(server, bucket, generator,
                                             kv_store, op_type, exp, flag,
                                             only_store_hash, batch_size,
                                             pause_secs, timeout_secs)
        else:
            _task = LoadDocumentsTask(server, bucket, generator, kv_store,
                                      op_type, exp, flag, only_store_hash)
        self.task_manager.schedule(_task)
        return _task

    def async_workload(self, server, bucket, kv_store, num_ops, create, read,
                       update, delete, exp):
        _task = WorkloadTask(server, bucket, kv_store, num_ops, create, read,
                             update, delete, exp)
        self.task_manager.schedule(_task)
        return _task

    def async_verify_data(self,
                          server,
                          bucket,
                          kv_store,
                          max_verify=None,
                          only_store_hash=True,
                          batch_size=1):
        if batch_size > 1:
            _task = BatchedValidateDataTask(server, bucket, kv_store,
                                            max_verify, only_store_hash,
                                            batch_size)
        else:
            _task = ValidateDataTask(server, bucket, kv_store, max_verify,
                                     only_store_hash)
        self.task_manager.schedule(_task)
        return _task

    def async_verify_revid(self, src_server, dest_server, bucket, kv_store,
                           ops_perf):
        _task = VerifyRevIdTask(src_server, dest_server, bucket, kv_store,
                                ops_perf)
        self.task_manager.schedule(_task)
        return _task

    def async_rebalance(self, servers, to_add, to_remove):
        """Asyncronously rebalances a cluster

        Parameters:
            servers - All servers participating in the rebalance ([TestInputServers])
            to_add - All servers being added to the cluster ([TestInputServers])
            to_remove - All servers being removed from the cluster ([TestInputServers])

        Returns:
            RebalanceTask - A task future that is a handle to the scheduled task"""
        _task = RebalanceTask(servers, to_add, to_remove)
        self.task_manager.schedule(_task)
        return _task

    def async_wait_for_stats(self, servers, bucket, param, stat, comparison,
                             value):
        """Asynchronously wait for stats

        Waits for stats to match the criteria passed by the stats variable. See
        couchbase.stats_tool.StatsCommon.build_stat_check(...) for a description of
        the stats structure and how it can be built.

        Parameters:
            servers - The servers to get stats from. Specifying multiple servers will
                cause the result from each server to be added together before
                comparing. ([TestInputServer])
            bucket - The name of the bucket (String)
            param - The stats parameter to use. (String)
            stat - The stat that we want to get the value from. (String)
            comparison - How to compare the stat result to the value specified.
            value - The value to compare to.

        Returns:
            RebalanceTask - A task future that is a handle to the scheduled task"""
        _task = StatsWaitTask(servers, bucket, param, stat, comparison, value)
        self.task_manager.schedule(_task)
        return _task

    def create_default_bucket(self, server, size, replicas=1, timeout=None):
        """Synchronously creates the default bucket

        Parameters:
            server - The server to create the bucket on. (TestInputServer)
            size - The size of the bucket to be created. (int)
            replicas - The number of replicas for this bucket. (int)

        Returns:
            boolean - Whether or not the bucket was created."""
        _task = self.async_create_default_bucket(server, size, replicas)
        return _task.result(timeout)

    def create_sasl_bucket(self,
                           server,
                           name,
                           password,
                           size,
                           replicas,
                           timeout=None):
        """Synchronously creates a sasl bucket

        Parameters:
            server - The server to create the bucket on. (TestInputServer)
            name - The name of the bucket to be created. (String)
            password - The password for this bucket. (String)
            replicas - The number of replicas for this bucket. (int)
            size - The size of the bucket to be created. (int)

        Returns:
            boolean - Whether or not the bucket was created."""
        _task = self.async_create_sasl_bucket(server, name, password, replicas,
                                              size)
        self.task_manager.schedule(_task)
        return _task.result(timeout)

    def create_standard_bucket(self,
                               server,
                               name,
                               port,
                               size,
                               replicas,
                               timeout=None):
        """Synchronously creates a standard bucket

        Parameters:
            server - The server to create the bucket on. (TestInputServer)
            name - The name of the bucket to be created. (String)
            port - The port to create this bucket on. (String)
            replicas - The number of replicas for this bucket. (int)
            size - The size of the bucket to be created. (int)

        Returns:
            boolean - Whether or not the bucket was created."""
        _task = self.async_create_standard_bucket(server, name, port, size,
                                                  replicas)
        return _task.result(timeout)

    def bucket_delete(self, server, bucket='default', timeout=None):
        """Synchronously deletes a bucket

        Parameters:
            server - The server to delete the bucket on. (TestInputServer)
            bucket - The name of the bucket to be deleted. (String)

        Returns:
            boolean - Whether or not the bucket was deleted."""
        _task = self.async_bucket_delete(server, bucket)
        return _task.result(timeout)

    def init_node(self,
                  server,
                  async_init_node=True,
                  disabled_consistent_view=None):
        """Synchronously initializes a node

        The task scheduled will initialize a nodes username and password and will establish
        the nodes memory quota to be 2/3 of the available system memory.

        Parameters:
            server - The server to initialize. (TestInputServer)
            disabled_consistent_view - disable consistent view

        Returns:
            boolean - Whether or not the node was properly initialized."""
        _task = self.async_init_node(server, async_init_node,
                                     disabled_consistent_view)
        return _task.result()

    def rebalance(self, servers, to_add, to_remove, timeout=None):
        """Syncronously rebalances a cluster

        Parameters:
            servers - All servers participating in the rebalance ([TestInputServers])
            to_add - All servers being added to the cluster ([TestInputServers])
            to_remove - All servers being removed from the cluster ([TestInputServers])

        Returns:
            boolean - Whether or not the rebalance was successful"""
        _task = self.async_rebalance(servers, to_add, to_remove)
        return _task.result(timeout)

    def load_gen_docs(self,
                      server,
                      bucket,
                      generator,
                      kv_store,
                      op_type,
                      exp=0,
                      timeout=None,
                      flag=0,
                      only_store_hash=True,
                      batch_size=1):
        _task = self.async_load_gen_docs(server,
                                         bucket,
                                         generator,
                                         kv_store,
                                         op_type,
                                         exp,
                                         flag,
                                         only_store_hash=only_store_hash,
                                         batch_size=batch_size)
        return _task.result(timeout)

    def workload(self,
                 server,
                 bucket,
                 kv_store,
                 num_ops,
                 create,
                 read,
                 update,
                 delete,
                 exp,
                 timeout=None):
        _task = self.async_workload(server, bucket, kv_store, num_ops, create,
                                    read, update, delete, exp)
        return _task.result(timeout)

    def verify_data(self, server, bucket, kv_store, timeout=None):
        _task = self.async_verify_data(server, bucket, kv_store)
        return _task.result(timeout)

    def wait_for_stats(self,
                       servers,
                       bucket,
                       param,
                       stat,
                       comparison,
                       value,
                       timeout=None):
        """Synchronously wait for stats

        Waits for stats to match the criteria passed by the stats variable. See
        couchbase.stats_tool.StatsCommon.build_stat_check(...) for a description of
        the stats structure and how it can be built.

        Parameters:
            servers - The servers to get stats from. Specifying multiple servers will
                cause the result from each server to be added together before
                comparing. ([TestInputServer])
            bucket - The name of the bucket (String)
            param - The stats parameter to use. (String)
            stat - The stat that we want to get the value from. (String)
            comparison - How to compare the stat result to the value specified.
            value - The value to compare to.

        Returns:
            boolean - Whether or not the correct stats state was seen"""
        _task = self.async_wait_for_stats(servers, bucket, param, stat,
                                          comparison, value)
        return _task.result(timeout)

    def shutdown(self, force=False):
        self.task_manager.shutdown(force)

    def async_create_view(self,
                          server,
                          design_doc_name,
                          view,
                          bucket="default",
                          with_query=True):
        """Asynchronously creates a views in a design doc

        Parameters:
            server - The server to handle create view task. (TestInputServer)
            design_doc_name - Design doc to be created or updated with view(s) being created (String)
            view - The view being created (document.View)
            bucket - The name of the bucket containing items for this view. (String)
            with_query - Wait indexing to get view query results after creation

        Returns:
            ViewCreateTask - A task future that is a handle to the scheduled task."""
        _task = ViewCreateTask(server, design_doc_name, view, bucket,
                               with_query)
        self.task_manager.schedule(_task)
        return _task

    def create_view(self,
                    server,
                    design_doc_name,
                    view,
                    bucket="default",
                    timeout=None,
                    with_query=True):
        """Synchronously creates a views in a design doc

        Parameters:
            server - The server to handle create view task. (TestInputServer)
            design_doc_name - Design doc to be created or updated with view(s) being created (String)
            view - The view being created (document.View)
            bucket - The name of the bucket containing items for this view. (String)
            with_query - Wait indexing to get view query results after creation

        Returns:
            string - revision number of design doc."""
        _task = self.async_create_view(server, design_doc_name, view, bucket,
                                       with_query)
        return _task.result(timeout)

    def async_delete_view(self,
                          server,
                          design_doc_name,
                          view,
                          bucket="default"):
        """Asynchronously deletes a views in a design doc

        Parameters:
            server - The server to handle delete view task. (TestInputServer)
            design_doc_name - Design doc to be deleted or updated with view(s) being deleted (String)
            view - The view being deleted (document.View)
            bucket - The name of the bucket containing items for this view. (String)

        Returns:
            ViewDeleteTask - A task future that is a handle to the scheduled task."""
        _task = ViewDeleteTask(server, design_doc_name, view, bucket)
        self.task_manager.schedule(_task)
        return _task

    def delete_view(self,
                    server,
                    design_doc_name,
                    view,
                    bucket="default",
                    timeout=None):
        """Synchronously deletes a views in a design doc

        Parameters:
            server - The server to handle delete view task. (TestInputServer)
            design_doc_name - Design doc to be deleted or updated with view(s) being deleted (String)
            view - The view being deleted (document.View)
            bucket - The name of the bucket containing items for this view. (String)

        Returns:
            boolean - Whether or not delete view was successful."""
        _task = self.async_delete_view(server, design_doc_name, view, bucket)
        return _task.result(timeout)

    def async_query_view(self,
                         server,
                         design_doc_name,
                         view_name,
                         query,
                         expected_rows=None,
                         bucket="default",
                         retry_time=2):
        """Asynchronously query a views in a design doc

        Parameters:
            server - The server to handle query view task. (TestInputServer)
            design_doc_name - Design doc with view(s) being queried(String)
            view_name - The view being queried (String)
            expected_rows - The number of rows expected to be returned from the query (int)
            bucket - The name of the bucket containing items for this view. (String)
            retry_time - The time in seconds to wait before retrying failed queries (int)

        Returns:
            ViewQueryTask - A task future that is a handle to the scheduled task."""
        _task = ViewQueryTask(server, design_doc_name, view_name, query,
                              expected_rows, bucket, retry_time)
        self.task_manager.schedule(_task)
        return _task

    def query_view(self,
                   server,
                   design_doc_name,
                   view_name,
                   query,
                   expected_rows=None,
                   bucket="default",
                   retry_time=2,
                   timeout=None):
        """Synchronously query a views in a design doc

        Parameters:
            server - The server to handle query view task. (TestInputServer)
            design_doc_name - Design doc with view(s) being queried(String)
            view_name - The view being queried (String)
            expected_rows - The number of rows expected to be returned from the query (int)
            bucket - The name of the bucket containing items for this view. (String)
            retry_time - The time in seconds to wait before retrying failed queries (int)

        Returns:
            ViewQueryTask - A task future that is a handle to the scheduled task."""
        _task = self.async_query_view(server, design_doc_name, view_name,
                                      query, expected_rows, bucket, retry_time)
        return _task.result(timeout)

    def modify_fragmentation_config(self,
                                    server,
                                    config,
                                    bucket="default",
                                    timeout=None):
        """Synchronously modify fragmentation configuration spec

        Parameters:
            server - The server to handle fragmentation config task. (TestInputServer)
            config - New compaction configuration (dict - see task)
            bucket - The name of the bucket fragementation config applies to. (String)

        Returns:
            boolean - True if config values accepted."""

        _task = ModifyFragmentationConfigTask(server, config, bucket)
        self.task_manager.schedule(_task)
        return _task.result(timeout)

    def async_monitor_active_task(self,
                                  server,
                                  type,
                                  target_value,
                                  wait_progress=100,
                                  num_iteration=100,
                                  wait_task=True):
        """Asynchronously monitor active task.

           When active task reached wait_progress this method  will return.

        Parameters:
            server - The server to handle fragmentation config task. (TestInputServer)
            type - task type('indexer' , 'bucket_compaction', 'view_compaction' ) (String)
            target_value - target value (for example "_design/ddoc" for indexing, bucket "default"
                for bucket_compaction or "_design/dev_view" for view_compaction) (String)
            wait_progress - expected progress (int)
            num_iteration - failed test if progress is not changed during num iterations(int)
            wait_task - expect to find task in the first attempt(bool)

        Returns:
            MonitorActiveTask - A task future that is a handle to the scheduled task."""
        _task = MonitorActiveTask(server, type, target_value, wait_progress,
                                  num_iteration, wait_task)
        self.task_manager.schedule(_task)
        return _task

    def async_monitor_view_fragmentation(self,
                                         server,
                                         design_doc_name,
                                         fragmentation_value,
                                         bucket="default"):
        """Asynchronously monitor view fragmentation.

           When <fragmentation_value> is reached on the
           index file for <design_doc_name> this method
           will return.

        Parameters:
            server - The server to handle fragmentation config task. (TestInputServer)
            design_doc_name - design doc with views represented in index file. (String)
            fragmentation_value - target amount of fragmentation within index file to detect. (String)
            bucket - The name of the bucket design_doc belongs to. (String)

        Returns:
            MonitorViewFragmentationTask - A task future that is a handle to the scheduled task."""

        _task = MonitorViewFragmentationTask(server, design_doc_name,
                                             fragmentation_value, bucket)
        self.task_manager.schedule(_task)
        return _task

    def async_generate_expected_view_results(self, doc_generators, view,
                                             query):
        """Asynchronously generate expected view query results

        Parameters:
            doc_generators - Generators used for loading docs (DocumentGenerator[])
            view - The view with map function (View)
            query - Query params to filter docs from the generator. (dict)

        Returns:
            GenerateExpectedViewResultsTask - A task future that is a handle to the scheduled task."""

        _task = GenerateExpectedViewResultsTask(doc_generators, view, query)
        self.task_manager.schedule(_task)
        return _task

    def generate_expected_view_query_results(self,
                                             doc_generators,
                                             view,
                                             query,
                                             timeout=None):
        """Synchronously generate expected view query results

        Parameters:
            doc_generators - Generators used for loading docs (DocumentGenerator[])
            view - The view with map function (View)
            query - Query params to filter docs from the generator. (dict)

        Returns:
            list - A list of rows expected to be returned for given query"""

        _task = self.async_generate_expected_view_results(
            doc_generators, view, query)
        return _task.result(timeout)

    def async_view_query_verification(self,
                                      server,
                                      design_doc_name,
                                      view_name,
                                      query,
                                      expected_rows,
                                      num_verified_docs=20,
                                      bucket="default",
                                      query_timeout=20):
        """Asynchronously query a views in a design doc and does full verification of results

        Parameters:
            server - The server to handle query verification task. (TestInputServer)
            design_doc_name - Design doc with view(s) being queried(String)
            view_name - The view being queried (String)
            query - Query params being used with the query. (dict)
            expected_rows - The number of rows expected to be returned from the query (int)
            num_verified_docs - The number of docs to verify that require memcached gets (int)
            bucket - The name of the bucket containing items for this view. (String)
            query_timeout - The time to allow a query with stale=false to run. (int)
            retry_time - The time in seconds to wait before retrying failed queries (int)

        Returns:
            ViewQueryVerificationTask - A task future that is a handle to the scheduled task."""
        _task = ViewQueryVerificationTask(server, design_doc_name, view_name,
                                          query, expected_rows,
                                          num_verified_docs, bucket,
                                          query_timeout)
        self.task_manager.schedule(_task)
        return _task

    def view_query_verification(self,
                                server,
                                design_doc_name,
                                view_name,
                                query,
                                expected_rows,
                                num_verified_docs=20,
                                bucket="default",
                                query_timeout=20,
                                timeout=None):
        """Synchronously query a views in a design doc and does full verification of results

        Parameters:
            server - The server to handle query verification task. (TestInputServer)
            design_doc_name - Design doc with view(s) being queried(String)
            view_name - The view being queried (String)
            query - Query params being used with the query. (dict)
            expected_rows - The number of rows expected to be returned from the query (int)
            num_verified_docs - The number of docs to verify that require memcached gets (int)
            bucket - The name of the bucket containing items for this view. (String)
            query_timeout - The time to allow a query with stale=false to run. (int)
            retry_time - The time in seconds to wait before retrying failed queries (int)

        Returns:
            dict - An object with keys: passed = True or False
                                        errors = reasons why verification failed """
        _task = self.async_view_query_verification(server, design_doc_name,
                                                   view_name, query,
                                                   expected_rows,
                                                   num_verified_docs, bucket,
                                                   query_timeout)
        return _task.result(timeout)

    def monitor_view_fragmentation(self,
                                   server,
                                   design_doc_name,
                                   fragmentation_value,
                                   bucket="default",
                                   timeout=None):
        """Synchronously monitor view fragmentation.

           When <fragmentation_value> is reached on the
           index file for <design_doc_name> this method
           will return.

        Parameters:
            server - The server to handle fragmentation config task. (TestInputServer)
            design_doc_name - design doc with views represented in index file. (String)
            fragmentation_value - target amount of fragmentation within index file to detect. (String)
            bucket - The name of the bucket design_doc belongs to. (String)

        Returns:
            boolean - True if <fragmentation_value> reached"""

        _task = self.async_monitor_view_fragmentation(server, design_doc_name,
                                                      fragmentation_value,
                                                      bucket)
        self.task_manager.schedule(_task)
        return _task.result(timeout)

    def async_compact_view(self,
                           server,
                           design_doc_name,
                           bucket="default",
                           with_rebalance=False):
        """Asynchronously run view compaction.

        Compacts index file represented by views within the specified <design_doc_name>

        Parameters:
            server - The server to handle fragmentation config task. (TestInputServer)
            design_doc_name - design doc with views represented in index file. (String)
            bucket - The name of the bucket design_doc belongs to. (String)
            with_rebalance - there are two cases that process this parameter:
                "Error occured reading set_view _info" will be ignored if True
                (This applies to rebalance in case),
                and with concurrent updates(for instance, with rebalance)
                it's possible that compaction value has not changed significantly

        Returns:
            ViewCompactionTask - A task future that is a handle to the scheduled task."""

        _task = ViewCompactionTask(server, design_doc_name, bucket,
                                   with_rebalance)
        self.task_manager.schedule(_task)
        return _task

    def compact_view(self,
                     server,
                     design_doc_name,
                     bucket="default",
                     timeout=None,
                     with_rebalance=False):
        """Synchronously run view compaction.

        Compacts index file represented by views within the specified <design_doc_name>

        Parameters:
            server - The server to handle fragmentation config task. (TestInputServer)
            design_doc_name - design doc with views represented in index file. (String)
            bucket - The name of the bucket design_doc belongs to. (String)
            with_rebalance - "Error occured reading set_view _info" will be ignored if True
                and with concurrent updates(for instance, with rebalance)
                it's possible that compaction value has not changed significantly

        Returns:
            boolean - True file size reduced after compaction, False if successful but no work done """

        _task = self.async_compact_view(server, design_doc_name, bucket,
                                        with_rebalance)
        return _task.result(timeout)

    def async_failover(self, servers, to_failover):
        """Asyncronously fails over nodes

        Parameters:
            servers - All servers participating in the failover ([TestInputServers])
            to_failover - All servers being failed over ([TestInputServers])

        Returns:
            FailoverTask - A task future that is a handle to the scheduled task"""
        _task = FailoverTask(servers, to_failover)
        self.task_manager.schedule(_task)
        return _task

    def failover(self, servers, to_failover, timeout=None):
        """Syncronously fails over nodes

        Parameters:
            servers - All servers participating in the failover ([TestInputServers])
            to_failover - All servers being failed over ([TestInputServers])

        Returns:
            boolean - Whether or not the failover was successful"""
        _task = self.async_failover(servers, to_failover)
        return _task.result(timeout)

    def async_bucket_flush(self, server, bucket='default'):
        """Asynchronously flushes a bucket

        Parameters:
            server - The server to flush the bucket on. (TestInputServer)
            bucket - The name of the bucket to be flushed. (String)

        Returns:
            BucketFlushTask - A task future that is a handle to the scheduled task."""
        _task = BucketFlushTask(server, bucket)
        self.task_manager.schedule(_task)
        return _task

    def bucket_flush(self, server, bucket='default', timeout=None):
        """Synchronously flushes a bucket

        Parameters:
            server - The server to flush the bucket on. (TestInputServer)
            bucket - The name of the bucket to be flushed. (String)

        Returns:
            boolean - Whether or not the bucket was flushed."""
        _task = self.async_bucket_flush(server, bucket)
        return _task.result(timeout)