Esempio n. 1
0
class AutoFailoverBaseTest(BaseTestCase):
    MAX_FAIL_DETECT_TIME = 120
    ORCHESTRATOR_TIMEOUT_BUFFER = 60

    def setUp(self):
        super(AutoFailoverBaseTest, self).setUp()
        self._get_params()
        self.rest = RestConnection(self.orchestrator)
        self.task_manager = TaskManager("Autofailover_thread")
        self.task_manager.start()
        self.node_failure_task_manager = TaskManager(
            "Nodes_failure_detector_thread")
        self.node_failure_task_manager.start()
        self.initial_load_gen = BlobGenerator('auto-failover',
                                              'auto-failover-',
                                              self.value_size,
                                              end=self.num_items)
        self.update_load_gen = BlobGenerator('auto-failover',
                                             'auto-failover-',
                                             self.value_size,
                                             end=self.update_items)
        self.delete_load_gen = BlobGenerator('auto-failover',
                                             'auto-failover-',
                                             self.value_size,
                                             start=self.update_items,
                                             end=self.delete_items)
        self._load_all_buckets(self.servers[0], self.initial_load_gen,
                               "create", 0)
        self._async_load_all_buckets(self.orchestrator,
                                     self.update_load_gen, "update", 0)
        self._async_load_all_buckets(self.orchestrator,
                                     self.delete_load_gen, "delete", 0)
        self.server_to_fail = self._servers_to_fail()
        self.servers_to_add = self.servers[self.nodes_init:self.nodes_init +
                                                           self.nodes_in]
        self.servers_to_remove = self.servers[self.nodes_init -
                                              self.nodes_out:self.nodes_init]
        # self.node_monitor_task = self.start_node_monitors_task()

    def tearDown(self):
        self.log.info("============AutoFailoverBaseTest teardown============")
        self._get_params()
        self.task_manager = TaskManager("Autofailover_thread")
        self.task_manager.start()
        self.server_to_fail = self._servers_to_fail()
        self.start_couchbase_server()
        self.sleep(10)
        self.disable_firewall()
        self.rest = RestConnection(self.orchestrator)
        self.rest.reset_autofailover()
        self.disable_autofailover()
        self._cleanup_cluster()
        super(AutoFailoverBaseTest, self).tearDown()
        if hasattr(self, "node_monitor_task"):
            if self.node_monitor_task._exception:
                self.fail("{}".format(self.node_monitor_task._exception))
            self.node_monitor_task.stop = True
        self.task_manager.shutdown(force=True)

    def enable_autofailover(self):
        """
        Enable the autofailover setting with the given timeout.
        :return: True If the setting was set with the timeout, else return
        False
        """
        status = self.rest.update_autofailover_settings(True,
                                                        self.timeout)
        return status

    def disable_autofailover(self):
        """
        Disable the autofailover setting.
        :return: True If the setting was disabled, else return
        False
        """
        status = self.rest.update_autofailover_settings(False, 120)
        return status

    def enable_autofailover_and_validate(self):
        """
        Enable autofailover with given timeout and then validate if the
        settings.
        :return: Nothing
        """
        status = self.enable_autofailover()
        self.assertTrue(status, "Failed to enable autofailover_settings!")
        self.sleep(5)
        settings = self.rest.get_autofailover_settings()
        self.assertTrue(settings.enabled, "Failed to enable "
                                          "autofailover_settings!")
        self.assertEqual(self.timeout, settings.timeout,
                         "Incorrect timeout set. Expected timeout : {0} "
                         "Actual timeout set : {1}".format(self.timeout,
                                                           settings.timeout))

    def disable_autofailover_and_validate(self):
        """
        Disable autofailover setting and then validate if the setting was
        disabled.
        :return: Nothing
        """
        status = self.disable_autofailover()
        self.assertTrue(status, "Failed to change autofailover_settings!")
        settings = self.rest.get_autofailover_settings()
        self.assertFalse(settings.enabled, "Failed to disable "
                                           "autofailover_settings!")

    def start_node_monitors_task(self):
        """
        Start the node monitors task to analyze the node status monitors.
        :return: The NodeMonitorAnalyserTask.
        """
        node_monitor_task = NodeMonitorsAnalyserTask(self.orchestrator)
        self.task_manager.schedule(node_monitor_task, sleep_time=5)
        return node_monitor_task

    def enable_firewall(self):
        """
        Enable firewall on the nodes to fail in the tests.
        :return: Nothing
        """
        node_down_timer_tasks = []
        for node in self.server_to_fail:
            node_failure_timer_task = NodeDownTimerTask(node.ip)
            node_down_timer_tasks.append(node_failure_timer_task)
        task = AutoFailoverNodesFailureTask(self.orchestrator,
                                            self.server_to_fail,
                                            "enable_firewall", self.timeout,
                                            self.pause_between_failover_action,
                                            self.failover_expected,
                                            self.timeout_buffer,
                                            failure_timers=node_down_timer_tasks)
        for node_down_timer_task in node_down_timer_tasks:
            self.node_failure_task_manager.schedule(node_down_timer_task, 2)
        self.task_manager.schedule(task)
        try:
            task.result()
        except Exception, e:
            self.fail("Exception: {}".format(e))
Esempio n. 2
0
class ElasticSearchBase(object):
    def __init__(self, host, logger):
        #host is in the form IP address
        self.__log = logger
        self.__host = host
        self.__document = {}
        self.__mapping = {}
        self.__STATUSOK = 200
        self.__indices = []
        self.__index_types = {}
        self.__connection_url = 'http://{0}:{1}/'.format(
            self.__host.ip, self.__host.port)
        self.es_queries = []
        self.task_manager = TaskManager("ES_Thread")
        self.task_manager.start()
        self.http = httplib2.Http

    def _http_request(self,
                      api,
                      method='GET',
                      params='',
                      headers=None,
                      timeout=120):
        if not headers:
            headers = {'Content-Type': 'application/json', 'Accept': '*/*'}
        try:
            response, content = httplib2.Http(timeout=timeout).request(
                api, method, params, headers)
            if response['status'] in ['200', '201', '202']:
                return True, content, response
            else:
                try:
                    json_parsed = ast.literal_eval(content)
                except ValueError as e:
                    json_parsed = {}
                    json_parsed["error"] = "status: {0}, content: {1}".\
                        format(response['status'], content)
                reason = "unknown"
                if "error" in json_parsed:
                    reason = json_parsed["error"]
                self.__log.error('{0} error {1} reason: {2} {3}'.format(
                    api, response['status'], reason, content.rstrip('\n')))
                return False, content, response
        except socket.error as e:
            self.__log.error(
                "socket error while connecting to {0} error {1} ".format(
                    api, e))
            raise ServerUnavailableException(ip=self.__host.ip)

    def restart_es(self):
        shell = RemoteMachineShellConnection(self.__host)
        es_restart_cmd = "/etc/init.d/elasticsearch restart"
        o, e = shell.execute_non_sudo_command(es_restart_cmd)
        shell.log_command_output(o, e)

        es_start = False
        for i in range(2):
            self.sleep(10)
            if self.is_running():
                es_start = True
                break
        if not es_start:
            self.fail("Could not reach Elastic Search server on %s" % self.ip)
        else:
            self.__log.info("Restarted ES server %s successfully" %
                            self.__host.ip)

    def is_running(self):
        """
         make sure ES is up and running
         check the service is running , if not abort the test
        """

        try:
            status, content, _ = self._http_request(self.__connection_url,
                                                    'GET')
            if status:
                return True
            else:
                return False
        except Exception as e:
            raise e

    def delete_index(self, index_name):
        """
        Deletes index
        """
        try:
            url = self.__connection_url + index_name
            status, content, _ = self._http_request(url, 'DELETE')
        except Exception as e:
            raise e

    def delete_indices(self):
        """
        Delete all indices present
        """
        for index_name in self.__indices:
            self.delete_index(index_name)
            self.__log.info("ES index %s deleted" % index_name)

    def create_empty_index(self, index_name):
        """
        Creates an empty index, given the name
        """
        try:
            self.delete_index(index_name)
            status, content, _ = self._http_request(
                self.__connection_url + index_name, 'PUT')
            if status:
                self.__indices.append(index_name)
        except Exception as e:
            raise Exception("Could not create ES index : %s" % e)

    def create_empty_index_with_bleve_equivalent_std_analyzer(
            self, index_name):
        """
        Refer:
        https://www.elastic.co/guide/en/elasticsearch/guide/current/
        configuring-analyzers.html
        """
        try:
            self.delete_index(index_name)
            status, content, _ = self._http_request(
                self.__connection_url + index_name, 'PUT',
                json.dumps(BLEVE.STD_ANALYZER))
            if status:
                self.__indices.append(index_name)
        except Exception as e:
            raise Exception(
                "Could not create index with ES std analyzer : %s" % e)

    def create_index_mapping(self, index_name, es_mapping, fts_mapping=None):
        """
        Creates a new default index, with the given mapping
        """
        self.delete_index(index_name)

        if not fts_mapping:
            map = {
                "mappings": es_mapping,
                "settings": BLEVE.STD_ANALYZER['settings']
            }
        else:
            # Find the ES equivalent char_filter, token_filter and tokenizer
            es_settings = self.populate_es_settings(
                fts_mapping['params']['mapping']['analysis']['analyzers'])

            # Create an ES custom index definition
            map = {"mappings": es_mapping, "settings": es_settings['settings']}

        # Create ES index
        try:
            self.__log.info("Creating %s with mapping %s" %
                            (index_name, json.dumps(map, indent=3)))
            status, content, _ = self._http_request(
                self.__connection_url + index_name, 'PUT', json.dumps(map))
            if status:
                self.__log.info("SUCCESS: ES index created with above mapping")
            else:
                raise Exception("Could not create ES index")
        except Exception as e:
            raise Exception("Could not create ES index : %s" % e)

    def populate_es_settings(self, fts_custom_analyzers_def):
        """
        Populates the custom analyzer defintion of the ES Index Definition.
        Refers to the FTS Custom Analyzers definition and creates an
            equivalent definition for each ES custom analyzer
        :param fts_custom_analyzers_def: FTS Custom Analyzer Definition
        :return:
        """

        num_custom_analyzers = len(fts_custom_analyzers_def)
        n = 1
        analyzer_map = {}
        while n <= num_custom_analyzers:
            customAnalyzerName = fts_custom_analyzers_def.keys()[n - 1]
            fts_char_filters = fts_custom_analyzers_def[customAnalyzerName][
                "char_filters"]
            fts_tokenizer = fts_custom_analyzers_def[customAnalyzerName][
                "tokenizer"]
            fts_token_filters = fts_custom_analyzers_def[customAnalyzerName][
                "token_filters"]

            analyzer_map[customAnalyzerName] = {}
            analyzer_map[customAnalyzerName]["char_filter"] = []
            analyzer_map[customAnalyzerName]["filter"] = []
            analyzer_map[customAnalyzerName]["tokenizer"] = ""

            for fts_char_filter in fts_char_filters:
                analyzer_map[customAnalyzerName]['char_filter'].append( \
                    BLEVE.FTS_ES_ANALYZER_MAPPING['char_filters'][fts_char_filter])

            analyzer_map[customAnalyzerName]['tokenizer'] = \
                BLEVE.FTS_ES_ANALYZER_MAPPING['tokenizers'][fts_tokenizer]

            for fts_token_filter in fts_token_filters:
                analyzer_map[customAnalyzerName]['filter'].append( \
                    BLEVE.FTS_ES_ANALYZER_MAPPING['token_filters'][fts_token_filter])

            n += 1

        analyzer = BLEVE.CUSTOM_ANALYZER
        analyzer['settings']['analysis']['analyzer'] = analyzer_map
        return analyzer

    def create_alias(self, name, indexes):
        """
        @name: alias name
        @indexes: list of target indexes
        """
        try:
            self.__log.info(
                "Checking if ES alias '{0}' exists...".format(name))
            self.delete_index(name)
            alias_info = {"actions": []}
            for index in indexes:
                alias_info['actions'].append(
                    {"add": {
                        "index": index,
                        "alias": name
                    }})
            self.__log.info("Creating ES alias '{0}' on {1}...".format(
                name, indexes))
            status, content, _ = self._http_request(
                self.__connection_url + "_aliases", 'POST',
                json.dumps(alias_info))
            if status:
                self.__log.info("ES alias '{0}' created".format(name))
                self.__indices.append(name)
        except Exception as ex:
            raise Exception("Could not create ES alias : %s" % ex)

    def async_load_ES(self, index_name, gen, op_type='create'):
        """
        Asynchronously run query against FTS and ES and compare result
        note: every task runs a single query
        """

        _task = ESLoadGeneratorTask(es_instance=self,
                                    index_name=index_name,
                                    generator=gen,
                                    op_type=op_type)
        self.task_manager.schedule(_task)
        return _task

    def async_bulk_load_ES(self,
                           index_name,
                           gen,
                           op_type='create',
                           batch=5000):
        _task = ESBulkLoadGeneratorTask(es_instance=self,
                                        index_name=index_name,
                                        generator=gen,
                                        op_type=op_type,
                                        batch=batch)
        self.task_manager.schedule(_task)
        return _task

    def load_bulk_data(self, filename):
        """
        Bulk load to ES from a file
        curl -s -XPOST 172.23.105.25:9200/_bulk --data-binary @req
        cat req:
        { "index" : { "_index" : "default_es_index", "_type" : "aruna", "_id" : "1" } }
        { "field1" : "value1" , "field2" : "value2"}
        { "index" : { "_index" : "default_es_index", "_type" : "aruna", "_id" : "2" } }
        { "field1" : "value1" , "field2" : "value2"}
        """
        try:
            import os
            url = self.__connection_url + "/_bulk"
            data = open(filename, "rb").read()
            status, content, _ = self._http_request(url, 'POST', data)
            return status
        except Exception as e:
            raise e

    def load_data(self, index_name, document_json, doc_type, doc_id):
        """
        index_name : name of index into which the doc is loaded
        document_json: json doc
        doc_type : type of doc. Usually the '_type' field in the doc body
        doc_id : document id
        """
        try:
            url = self.__connection_url + index_name + '/' + doc_type + '/' +\
                  doc_id
            status, content, _ = self._http_request(url, 'POST', document_json)
        except Exception as e:
            raise e

    def update_index(self, index_name):
        """
        This procedure will refresh index when insert is performed .
        Need to call this API to take search in effect.
        :param index_name:
        :return:
        """
        try:
            status, content, _ = self._http_request(
                self.__connection_url + index_name + '/_refresh', 'POST')
        except Exception as e:
            raise e

    def search(self, index_name, query, result_size=1000000):
        """
           This function will be used for search . based on the query
           :param index_name:
           :param query:
           :return: number of matches found, doc_ids and time taken
        """
        try:
            doc_ids = []
            self.__log.info("ES query '{0}' ".format(query))
            url = self.__connection_url + index_name + '/_search?size='+ \
                  str(result_size)
            status, content, _ = self._http_request(url, 'POST',
                                                    json.dumps(query))
            if status:
                content = json.loads(content)
                for doc in content['hits']['hits']:
                    doc_ids.append(doc['_id'])
                return content['hits']['total'], doc_ids, content['took']
        except Exception as e:
            self.__log.error("Couldn't run query on ES: %s, reason : %s" %
                             (json.dumps(query), e))
            raise e

    def get_index_count(self, index_name):
        """
         Returns count of docs in the index
        """
        try:
            status, content, _ = self._http_request(
                self.__connection_url + index_name + '/_count', 'POST')
            if status:
                return json.loads(content)['count']
        except Exception as e:
            raise e

    def get_indices(self):
        """
        Return all the indices created
        :return: List of all indices
        """
        return self.__indices

    def sleep(self, timeout=1, message=""):
        self.__log.info("sleep for {0} secs. {1} ...".format(timeout, message))
        time.sleep(timeout)
Esempio n. 3
0
class Cluster(object):
    """An API for interacting with Couchbase clusters"""

    def __init__(self):
        self.task_manager = TaskManager("Cluster_Thread")
        self.task_manager.start()

    def async_create_default_bucket(self, server, size, replicas=1, enable_replica_index=1, eviction_policy='valueOnly'):
        """Asynchronously creates the default bucket

        Parameters:
            server - The server to create the bucket on. (TestInputServer)
            size - The size of the bucket to be created. (int)
            replicas - The number of replicas for this bucket. (int)

        Returns:
            BucketCreateTask - A task future that is a handle to the scheduled task."""

        _task = BucketCreateTask(server, 'default', replicas, size,
                                 enable_replica_index=enable_replica_index, eviction_policy=eviction_policy)
        self.task_manager.schedule(_task)
        return _task

    def async_create_sasl_bucket(self, server, name, password, size, replicas, enable_replica_index=1, eviction_policy='valueOnly'):
        """Asynchronously creates a sasl bucket

        Parameters:
            server - The server to create the bucket on. (TestInputServer)
            name - The name of the bucket to be created. (String)
            password - The password for this bucket. (String)
            replicas - The number of replicas for this bucket. (int)
            size - The size of the bucket to be created. (int)

        Returns:
            BucketCreateTask - A task future that is a handle to the scheduled task."""
        _task = BucketCreateTask(server, name, replicas, size, password=password,
                                 enable_replica_index=enable_replica_index, eviction_policy=eviction_policy)
        self.task_manager.schedule(_task)
        return _task

    def async_create_standard_bucket(self, server, name, port, size, replicas, enable_replica_index=1, eviction_policy='valueOnly'):
        """Asynchronously creates a standard bucket

        Parameters:
            server - The server to create the bucket on. (TestInputServer)
            name - The name of the bucket to be created. (String)
            port - The port to create this bucket on. (String)
            replicas - The number of replicas for this bucket. (int)
            size - The size of the bucket to be created. (int)

        Returns:
            BucketCreateTask - A task future that is a handle to the scheduled task."""
        _task = BucketCreateTask(server, name, replicas, size, port,
                                 enable_replica_index=enable_replica_index, eviction_policy=eviction_policy)
        self.task_manager.schedule(_task)
        return _task

    def async_create_memcached_bucket(self, server, name, port, size, replicas):
        """Asynchronously creates a standard bucket

        Parameters:
            server - The server to create the bucket on. (TestInputServer)
            name - The name of the bucket to be created. (String)
            port - The port to create this bucket on. (String)
            replicas - The number of replicas for this bucket. (int)
            size - The size of the bucket to be created. (int)

        Returns:
            BucketCreateTask - A task future that is a handle to the scheduled task."""
        _task = BucketCreateTask(server, name, replicas, size, port, bucket_type="memcached")
        self.task_manager.schedule(_task)
        return _task

    def async_bucket_delete(self, server, bucket='default'):
        """Asynchronously deletes a bucket

        Parameters:
            server - The server to delete the bucket on. (TestInputServer)
            bucket - The name of the bucket to be deleted. (String)

        Returns:
            BucketDeleteTask - A task future that is a handle to the scheduled task."""
        _task = BucketDeleteTask(server, bucket)
        self.task_manager.schedule(_task)
        return _task

    def async_init_node(self, server, disabled_consistent_view=None,
                        rebalanceIndexWaitingDisabled=None, rebalanceIndexPausingDisabled=None,
                        maxParallelIndexers=None, maxParallelReplicaIndexers=None, port=None,
                        quota_percent=None):
        """Asynchronously initializes a node

        The task scheduled will initialize a nodes username and password and will establish
        the nodes memory quota to be 2/3 of the available system memory.

        Parameters:
            server - The server to initialize. (TestInputServer)
            disabled_consistent_view - disable consistent view
            rebalanceIndexWaitingDisabled - index waiting during rebalance(Boolean)
            rebalanceIndexPausingDisabled - index pausing during rebalance(Boolean)
            maxParallelIndexers - max parallel indexers threads(Int)
            maxParallelReplicaIndexers - max parallel replica indexers threads(int)
            port - port to initialize cluster
            quota_percent - percent of memory to initialize
        Returns:
            NodeInitTask - A task future that is a handle to the scheduled task."""
        _task = NodeInitializeTask(server, disabled_consistent_view, rebalanceIndexWaitingDisabled,
                          rebalanceIndexPausingDisabled, maxParallelIndexers, maxParallelReplicaIndexers,
                          port, quota_percent)
        self.task_manager.schedule(_task)
        return _task

    def async_load_gen_docs(self, server, bucket, generator, kv_store, op_type, exp=0, flag=0, only_store_hash=True,
                            batch_size=1, pause_secs=1, timeout_secs=5, proxy_client=None):
        if batch_size > 1:
            _task = BatchedLoadDocumentsTask(server, bucket, generator, kv_store, op_type, exp, flag, only_store_hash, batch_size, pause_secs, timeout_secs)
        else:
            if isinstance(generator, list):
                _task = LoadDocumentsGeneratorsTask(server, bucket, generator, kv_store, op_type, exp, flag, only_store_hash)
            else:
                _task = LoadDocumentsTask(server, bucket, generator, kv_store, op_type, exp, flag, only_store_hash, proxy_client)
        self.task_manager.schedule(_task)
        return _task

    def async_workload(self, server, bucket, kv_store, num_ops, create, read, update,
                       delete, exp):
        _task = WorkloadTask(server, bucket, kv_store, num_ops, create, read, update,
                             delete, exp)
        self.task_manager.schedule(_task)
        return _task

    def async_verify_data(self, server, bucket, kv_store, max_verify=None,
                          only_store_hash=True, batch_size=1, replica_to_read=None, timeout_sec=5):
        if batch_size > 1:
            _task = BatchedValidateDataTask(server, bucket, kv_store, max_verify, only_store_hash, batch_size, timeout_sec)
        else:
            _task = ValidateDataTask(server, bucket, kv_store, max_verify, only_store_hash, replica_to_read)
        self.task_manager.schedule(_task)
        return _task

    def async_verify_revid(self, src_server, dest_server, bucket, kv_store, ops_perf):
        _task = VerifyRevIdTask(src_server, dest_server, bucket, kv_store, ops_perf)
        self.task_manager.schedule(_task)
        return _task

    def async_rebalance(self, servers, to_add, to_remove, use_hostnames=False):
        """Asyncronously rebalances a cluster

        Parameters:
            servers - All servers participating in the rebalance ([TestInputServers])
            to_add - All servers being added to the cluster ([TestInputServers])
            to_remove - All servers being removed from the cluster ([TestInputServers])
            use_hostnames - True if nodes should be added using hostnames (Boolean)

        Returns:
            RebalanceTask - A task future that is a handle to the scheduled task"""
        _task = RebalanceTask(servers, to_add, to_remove, use_hostnames=use_hostnames)
        self.task_manager.schedule(_task)
        return _task

    def async_wait_for_stats(self, servers, bucket, param, stat, comparison, value):
        """Asynchronously wait for stats

        Waits for stats to match the criteria passed by the stats variable. See
        couchbase.stats_tool.StatsCommon.build_stat_check(...) for a description of
        the stats structure and how it can be built.

        Parameters:
            servers - The servers to get stats from. Specifying multiple servers will
                cause the result from each server to be added together before
                comparing. ([TestInputServer])
            bucket - The name of the bucket (String)
            param - The stats parameter to use. (String)
            stat - The stat that we want to get the value from. (String)
            comparison - How to compare the stat result to the value specified.
            value - The value to compare to.

        Returns:
            RebalanceTask - A task future that is a handle to the scheduled task"""
        _task = StatsWaitTask(servers, bucket, param, stat, comparison, value)
        self.task_manager.schedule(_task)
        return _task

    def create_default_bucket(self, server, size, replicas=1, timeout=600,
                              enable_replica_index=1, eviction_policy='valueOnly'):
        """Synchronously creates the default bucket

        Parameters:
            server - The server to create the bucket on. (TestInputServer)
            size - The size of the bucket to be created. (int)
            replicas - The number of replicas for this bucket. (int)

        Returns:
            boolean - Whether or not the bucket was created."""

        _task = self.async_create_default_bucket(server, size, replicas,
                                                 enable_replica_index=enable_replica_index, eviction_policy=eviction_policy)
        return _task.result(timeout)

    def create_sasl_bucket(self, server, name, password, size, replicas, timeout=None):
        """Synchronously creates a sasl bucket

        Parameters:
            server - The server to create the bucket on. (TestInputServer)
            name - The name of the bucket to be created. (String)
            password - The password for this bucket. (String)
            replicas - The number of replicas for this bucket. (int)
            size - The size of the bucket to be created. (int)

        Returns:
            boolean - Whether or not the bucket was created."""
        _task = self.async_create_sasl_bucket(server, name, password, replicas, size)
        self.task_manager.schedule(_task)
        return _task.result(timeout)

    def create_standard_bucket(self, server, name, port, size, replicas, timeout=None):
        """Synchronously creates a standard bucket

        Parameters:
            server - The server to create the bucket on. (TestInputServer)
            name - The name of the bucket to be created. (String)
            port - The port to create this bucket on. (String)
            replicas - The number of replicas for this bucket. (int)
            size - The size of the bucket to be created. (int)

        Returns:
            boolean - Whether or not the bucket was created."""
        _task = self.async_create_standard_bucket(server, name, port, size, replicas)
        return _task.result(timeout)

    def bucket_delete(self, server, bucket='default', timeout=None):
        """Synchronously deletes a bucket

        Parameters:
            server - The server to delete the bucket on. (TestInputServer)
            bucket - The name of the bucket to be deleted. (String)

        Returns:
            boolean - Whether or not the bucket was deleted."""
        _task = self.async_bucket_delete(server, bucket)
        return _task.result(timeout)

    def init_node(self, server, async_init_node=True, disabled_consistent_view=None):
        """Synchronously initializes a node

        The task scheduled will initialize a nodes username and password and will establish
        the nodes memory quota to be 2/3 of the available system memory.

        Parameters:
            server - The server to initialize. (TestInputServer)
            disabled_consistent_view - disable consistent view

        Returns:
            boolean - Whether or not the node was properly initialized."""
        _task = self.async_init_node(server, async_init_node, disabled_consistent_view)
        return _task.result()

    def rebalance(self, servers, to_add, to_remove, timeout=None, use_hostnames=False):
        """Syncronously rebalances a cluster

        Parameters:
            servers - All servers participating in the rebalance ([TestInputServers])
            to_add - All servers being added to the cluster ([TestInputServers])
            to_remove - All servers being removed from the cluster ([TestInputServers])
            use_hostnames - True if nodes should be added using their hostnames (Boolean)

        Returns:
            boolean - Whether or not the rebalance was successful"""
        _task = self.async_rebalance(servers, to_add, to_remove, use_hostnames)
        return _task.result(timeout)

    def load_gen_docs(self, server, bucket, generator, kv_store, op_type, exp=0, timeout=None,
                      flag=0, only_store_hash=True, batch_size=1, proxy_client=None):
        _task = self.async_load_gen_docs(server, bucket, generator, kv_store, op_type, exp, flag,
                                         only_store_hash=only_store_hash, batch_size=batch_size, proxy_client=proxy_client)
        return _task.result(timeout)

    def workload(self, server, bucket, kv_store, num_ops, create, read, update, delete, exp, timeout=None):
        _task = self.async_workload(server, bucket, kv_store, num_ops, create, read, update,
                                    delete, exp)
        return _task.result(timeout)

    def verify_data(self, server, bucket, kv_store, timeout=None):
        _task = self.async_verify_data(server, bucket, kv_store)
        return _task.result(timeout)

    def wait_for_stats(self, servers, bucket, param, stat, comparison, value, timeout=None):
        """Synchronously wait for stats

        Waits for stats to match the criteria passed by the stats variable. See
        couchbase.stats_tool.StatsCommon.build_stat_check(...) for a description of
        the stats structure and how it can be built.

        Parameters:
            servers - The servers to get stats from. Specifying multiple servers will
                cause the result from each server to be added together before
                comparing. ([TestInputServer])
            bucket - The name of the bucket (String)
            param - The stats parameter to use. (String)
            stat - The stat that we want to get the value from. (String)
            comparison - How to compare the stat result to the value specified.
            value - The value to compare to.

        Returns:
            boolean - Whether or not the correct stats state was seen"""
        _task = self.async_wait_for_stats(servers, bucket, param, stat, comparison, value)
        return _task.result(timeout)

    def shutdown(self, force=False):
        self.task_manager.shutdown(force)

    def async_create_view(self, server, design_doc_name, view, bucket="default", with_query=True,
                          check_replication=False, ddoc_options=None):
        """Asynchronously creates a views in a design doc

        Parameters:
            server - The server to handle create view task. (TestInputServer)
            design_doc_name - Design doc to be created or updated with view(s) being created (String)
            view - The view being created (document.View)
            bucket - The name of the bucket containing items for this view. (String) or (Bucket)
            with_query - Wait indexing to get view query results after creation
            check_replication - Should the test check replication or not (Boolean)
            ddoc_options - DDoc options to define automatic index building (minUpdateChanges, updateInterval ...) (Dict)
        Returns:
            ViewCreateTask - A task future that is a handle to the scheduled task."""
        _task = ViewCreateTask(server, design_doc_name, view, bucket, with_query, check_replication, ddoc_options)
        self.task_manager.schedule(_task)
        return _task

    def create_view(self, server, design_doc_name, view, bucket="default", timeout=None, with_query=True, check_replication=False):
        """Synchronously creates a views in a design doc

        Parameters:
            server - The server to handle create view task. (TestInputServer)
            design_doc_name - Design doc to be created or updated with view(s) being created (String)
            view - The view being created (document.View)
            bucket - The name of the bucket containing items for this view. (String) or (Bucket)
            with_query - Wait indexing to get view query results after creation

        Returns:
            string - revision number of design doc."""
        _task = self.async_create_view(server, design_doc_name, view, bucket, with_query, check_replication)
        return _task.result(timeout)

    def async_delete_view(self, server, design_doc_name, view, bucket="default"):
        """Asynchronously deletes a views in a design doc

        Parameters:
            server - The server to handle delete view task. (TestInputServer)
            design_doc_name - Design doc to be deleted or updated with view(s) being deleted (String)
            view - The view being deleted (document.View)
            bucket - The name of the bucket containing items for this view. (String) or (Bucket)

        Returns:
            ViewDeleteTask - A task future that is a handle to the scheduled task."""
        _task = ViewDeleteTask(server, design_doc_name, view, bucket)
        self.task_manager.schedule(_task)
        return _task

    def delete_view(self, server, design_doc_name, view, bucket="default", timeout=None):
        """Synchronously deletes a views in a design doc

        Parameters:
            server - The server to handle delete view task. (TestInputServer)
            design_doc_name - Design doc to be deleted or updated with view(s) being deleted (String)
            view - The view being deleted (document.View)
            bucket - The name of the bucket containing items for this view. (String) or (Bucket)

        Returns:
            boolean - Whether or not delete view was successful."""
        _task = self.async_delete_view(server, design_doc_name, view, bucket)
        return _task.result(timeout)


    def async_query_view(self, server, design_doc_name, view_name, query,
                         expected_rows=None, bucket="default", retry_time=2):
        """Asynchronously query a views in a design doc

        Parameters:
            server - The server to handle query view task. (TestInputServer)
            design_doc_name - Design doc with view(s) being queried(String)
            view_name - The view being queried (String)
            expected_rows - The number of rows expected to be returned from the query (int)
            bucket - The name of the bucket containing items for this view. (String)
            retry_time - The time in seconds to wait before retrying failed queries (int)

        Returns:
            ViewQueryTask - A task future that is a handle to the scheduled task."""
        _task = ViewQueryTask(server, design_doc_name, view_name, query, expected_rows, bucket, retry_time)
        self.task_manager.schedule(_task)
        return _task

    def query_view(self, server, design_doc_name, view_name, query,
                   expected_rows=None, bucket="default", retry_time=2, timeout=None):
        """Synchronously query a views in a design doc

        Parameters:
            server - The server to handle query view task. (TestInputServer)
            design_doc_name - Design doc with view(s) being queried(String)
            view_name - The view being queried (String)
            expected_rows - The number of rows expected to be returned from the query (int)
            bucket - The name of the bucket containing items for this view. (String)
            retry_time - The time in seconds to wait before retrying failed queries (int)

        Returns:
            ViewQueryTask - A task future that is a handle to the scheduled task."""
        _task = self.async_query_view(server, design_doc_name, view_name, query, expected_rows, bucket, retry_time)
        return _task.result(timeout)


    def modify_fragmentation_config(self, server, config, bucket="default", timeout=None):
        """Synchronously modify fragmentation configuration spec

        Parameters:
            server - The server to handle fragmentation config task. (TestInputServer)
            config - New compaction configuration (dict - see task)
            bucket - The name of the bucket fragementation config applies to. (String)

        Returns:
            boolean - True if config values accepted."""

        _task = ModifyFragmentationConfigTask(server, config, bucket)
        self.task_manager.schedule(_task)
        return _task.result(timeout)

    def async_monitor_active_task(self, servers,
                                  type_task,
                                  target_value,
                                  wait_progress=100,
                                  num_iteration=100,
                                  wait_task=True):
        """Asynchronously monitor active task.

           When active task reached wait_progress this method  will return.

        Parameters:
            servers - list of servers or The server to handle fragmentation config task. (TestInputServer)
            type_task - task type('indexer' , 'bucket_compaction', 'view_compaction' ) (String)
            target_value - target value (for example "_design/ddoc" for indexing, bucket "default"
                for bucket_compaction or "_design/dev_view" for view_compaction) (String)
            wait_progress - expected progress (int)
            num_iteration - failed test if progress is not changed during num iterations(int)
            wait_task - expect to find task in the first attempt(bool)

        Returns:
            list of MonitorActiveTask - A task future that is a handle to the scheduled task."""
        _tasks = []
        if type(servers) != types.ListType:
            servers = [servers, ]
        for server in servers:
            _task = MonitorActiveTask(server, type_task, target_value, wait_progress, num_iteration, wait_task)
            self.task_manager.schedule(_task)
            _tasks.append(_task)
        return _tasks

    def async_monitor_view_fragmentation(self, server,
                                         design_doc_name,
                                         fragmentation_value,
                                         bucket="default"):
        """Asynchronously monitor view fragmentation.

           When <fragmentation_value> is reached on the
           index file for <design_doc_name> this method
           will return.

        Parameters:
            server - The server to handle fragmentation config task. (TestInputServer)
            design_doc_name - design doc with views represented in index file. (String)
            fragmentation_value - target amount of fragmentation within index file to detect. (String)
            bucket - The name of the bucket design_doc belongs to. (String)

        Returns:
            MonitorViewFragmentationTask - A task future that is a handle to the scheduled task."""

        _task = MonitorViewFragmentationTask(server, design_doc_name,
                                             fragmentation_value, bucket)
        self.task_manager.schedule(_task)
        return _task

    def async_generate_expected_view_results(self, doc_generators, view, query, type_query="view"):
        """Asynchronously generate expected view query results

        Parameters:
            doc_generators - Generators used for loading docs (DocumentGenerator[])
            view - The view with map function (View)
            query - Query params to filter docs from the generator. (dict)
            type_query - type of query: "view" or "all_doc" (String)

        Returns:
            GenerateExpectedViewResultsTask - A task future that is a handle to the scheduled task."""

        _task = GenerateExpectedViewResultsTask(doc_generators, view, query, type_query)
        self.task_manager.schedule(_task)
        return _task

    def generate_expected_view_query_results(self, doc_generators, view, query, timeout=None, type_query='view'):
        """Synchronously generate expected view query results

        Parameters:
            doc_generators - Generators used for loading docs (DocumentGenerator[])
            view - The view with map function (View)
            query - Query params to filter docs from the generator. (dict)

        Returns:
            list - A list of rows expected to be returned for given query"""

        _task = self.async_generate_expected_view_results(doc_generators, view, query, type_query)
        return _task.result(timeout)

    def async_monitor_view_query(self, servers, design_doc_name, view_name,
                                 query, expected_docs=None, bucket="default",
                                 retries=100, error=None, verify_rows=False,
                                 server_to_query=0, type_query="view"):
        """
        Asynchronously monitor view query results:
        waits for expected rows length match with returned rows length

        Parameters:
            servers - servers to be checked (List of TestInputServer)
            design_doc_name - name of ddoc to query (String)
            view_name - name of view to query (String)
            query - query params (dict)
            expected_docs - expected emitted rows(list)
            bucket - bucket which contains ddoc (String or Bucket)
            retries - how much times it will try to get correct result
            error - for negative tests, expected error raised by query results (String)
            verify_rows - verify values of returned results
            server_to_query - index of server to query (int)
            type_query - "view" or "all_doc" (String)
        """
        _task = MonitorViewQueryResultsTask(servers, design_doc_name, view_name,
                 query, expected_docs, bucket, retries, error, verify_rows, server_to_query,
                 type_query)
        self.task_manager.schedule(_task)
        return _task

    def async_view_query_verification(self, design_doc_name, view_name, query, expected_rows, num_verified_docs=20, bucket="default", query_timeout=20,
                                      results=None, server=None):
        """Asynchronously query a views in a design doc and does full verification of results

        Parameters:
            server - The server to handle query verification task. (TestInputServer)
            design_doc_name - Design doc with view(s) being queried(String)
            view_name - The view being queried (String)
            query - Query params being used with the query. (dict)
            expected_rows - The number of rows expected to be returned from the query (int)
            num_verified_docs - The number of docs to verify that require memcached gets (int)
            bucket - The name of the bucket containing items for this view. (String)
            query_timeout - The time to allow a query with stale=false to run. (int)
            retry_time - The time in seconds to wait before retrying failed queries (int)
            results - already gotten results to check, if None task will newly get results(dict)

        Returns:
            ViewQueryVerificationTask - A task future that is a handle to the scheduled task."""
        _task = ViewQueryVerificationTask(design_doc_name, view_name, query, expected_rows, server, num_verified_docs, bucket, query_timeout, results=results)
        self.task_manager.schedule(_task)
        return _task

    def view_query_verification(self, server, design_doc_name, view_name, query,
                                expected_rows, num_verified_docs=20,
                                bucket="default", query_timeout=20, timeout=None,
                                results=None):
        """Synchronously query a views in a design doc and does full verification of results

        Parameters:
            server - The server to handle query verification task. (TestInputServer)
            design_doc_name - Design doc with view(s) being queried(String)
            view_name - The view being queried (String)
            query - Query params being used with the query. (dict)
            expected_rows - The number of rows expected to be returned from the query (int)
            num_verified_docs - The number of docs to verify that require memcached gets (int)
            bucket - The name of the bucket containing items for this view. (String)
            query_timeout - The time to allow a query with stale=false to run. (int)
            retry_time - The time in seconds to wait before retrying failed queries (int)
            results - already gotten results to check, if None task will newly get results(dict)

        Returns:
            dict - An object with keys: passed = True or False
                                        errors = reasons why verification failed """
        _task = self.async_view_query_verification(server, design_doc_name, view_name, query, expected_rows, num_verified_docs, bucket, query_timeout, results)
        return _task.result(timeout)


    def monitor_view_fragmentation(self, server,
                                   design_doc_name,
                                   fragmentation_value,
                                   bucket="default",
                                   timeout=None):
        """Synchronously monitor view fragmentation.

           When <fragmentation_value> is reached on the
           index file for <design_doc_name> this method
           will return.

        Parameters:
            server - The server to handle fragmentation config task. (TestInputServer)
            design_doc_name - design doc with views represented in index file. (String)
            fragmentation_value - target amount of fragmentation within index file to detect. (String)
            bucket - The name of the bucket design_doc belongs to. (String)

        Returns:
            boolean - True if <fragmentation_value> reached"""

        _task = self.async_monitor_view_fragmentation(server, design_doc_name,
                                                      fragmentation_value,
                                                      bucket)
        self.task_manager.schedule(_task)
        return _task.result(timeout)

    def async_compact_view(self, server, design_doc_name, bucket="default", with_rebalance=False):
        """Asynchronously run view compaction.

        Compacts index file represented by views within the specified <design_doc_name>

        Parameters:
            server - The server to handle fragmentation config task. (TestInputServer)
            design_doc_name - design doc with views represented in index file. (String)
            bucket - The name of the bucket design_doc belongs to. (String)
            with_rebalance - there are two cases that process this parameter:
                "Error occured reading set_view _info" will be ignored if True
                (This applies to rebalance in case),
                and with concurrent updates(for instance, with rebalance)
                it's possible that compaction value has not changed significantly

        Returns:
            ViewCompactionTask - A task future that is a handle to the scheduled task."""


        _task = ViewCompactionTask(server, design_doc_name, bucket, with_rebalance)
        self.task_manager.schedule(_task)
        return _task

    def compact_view(self, server, design_doc_name, bucket="default", timeout=None, with_rebalance=False):
        """Synchronously run view compaction.

        Compacts index file represented by views within the specified <design_doc_name>

        Parameters:
            server - The server to handle fragmentation config task. (TestInputServer)
            design_doc_name - design doc with views represented in index file. (String)
            bucket - The name of the bucket design_doc belongs to. (String)
            with_rebalance - "Error occured reading set_view _info" will be ignored if True
                and with concurrent updates(for instance, with rebalance)
                it's possible that compaction value has not changed significantly

        Returns:
            boolean - True file size reduced after compaction, False if successful but no work done """

        _task = self.async_compact_view(server, design_doc_name, bucket, with_rebalance)
        return _task.result(timeout)

    def async_failover(self, servers, to_failover):
        """Asyncronously fails over nodes

        Parameters:
            servers - All servers participating in the failover ([TestInputServers])
            to_failover - All servers being failed over ([TestInputServers])

        Returns:
            FailoverTask - A task future that is a handle to the scheduled task"""
        _task = FailoverTask(servers, to_failover)
        self.task_manager.schedule(_task)
        return _task

    def failover(self, servers, to_failover, timeout=None):
        """Syncronously fails over nodes

        Parameters:
            servers - All servers participating in the failover ([TestInputServers])
            to_failover - All servers being failed over ([TestInputServers])

        Returns:
            boolean - Whether or not the failover was successful"""
        _task = self.async_failover(servers, to_failover)
        return _task.result(timeout)

    def async_bucket_flush(self, server, bucket='default'):
        """Asynchronously flushes a bucket

        Parameters:
            server - The server to flush the bucket on. (TestInputServer)
            bucket - The name of the bucket to be flushed. (String)

        Returns:
            BucketFlushTask - A task future that is a handle to the scheduled task."""
        _task = BucketFlushTask(server, bucket)
        self.task_manager.schedule(_task)
        return _task

    def bucket_flush(self, server, bucket='default', timeout=None):
        """Synchronously flushes a bucket

        Parameters:
            server - The server to flush the bucket on. (TestInputServer)
            bucket - The name of the bucket to be flushed. (String)

        Returns:
            boolean - Whether or not the bucket was flushed."""
        _task = self.async_bucket_flush(server, bucket)
        return _task.result(timeout)

    def async_monitor_db_fragmentation(self, server, fragmentation, bucket):
        """Asyncronously monitor db fragmentation

        Parameters:
            servers - server to check(TestInputServers)
            bucket - bucket to check
            fragmentation - fragmentation to reach

        Returns:
            MonitorDBFragmentationTask - A task future that is a handle to the scheduled task"""
        _task = MonitorDBFragmentationTask(server, fragmentation, bucket)
        self.task_manager.schedule(_task)
        return _task

    def cbrecovery(self, src_server, dest_server, bucket_src='', bucket_dest='', username='', password='',
                 username_dest='', password_dest='', verbose=False, wait_completed=True):
        """Synchronously run and monitor cbrecovery

        Parameters:
            src_server - source cluster to restore data from(TestInputServers)
            dest_server - destination cluster to restore data to(TestInputServers)
            bucket_src - source bucket to recover from
            bucket_dest - destination bucket to recover to
            username - REST username for source cluster
            password - REST password for source cluster
            username_dest - REST username for destination cluster or server node
            password_dest - REST password for destination cluster or server node
            verbose - verbose logging; more -v's provide more verbosity
            wait_completed - wait for the end of the cbrecovery

        Returns:
            boolean - Whether or not the cbrecovery completed successfully"""
        _task = self.async_cbrecovery(server, src_server, dest_server, bucket_src, bucket_dest, username, password,
                 username_dest, password_dest, verbose, wait_completed)
        return _task.result(timeout)

    def async_cbrecovery(self, src_server, dest_server, bucket_src='', bucket_dest='', username='', password='',
                 username_dest='', password_dest='', verbose=False, wait_completed=True):
        """Asyncronously run/monitor cbrecovery

        Parameters:
            src_server - source cluster to restore data from(TestInputServers)
            dest_server - destination cluster to restore data to(TestInputServers)
            bucket_src - source bucket to recover from
            bucket_dest - destination bucket to recover to
            username - REST username for source cluster
            password - REST password for source cluster
            username_dest - REST username for destination cluster or server node
            password_dest - REST password for destination cluster or server node
            verbose - verbose logging; more -v's provide more verbosity
            wait_completed - wait for the end of the cbrecovery

        Returns:
            CBRecoveryTask - A task future that is a handle to the scheduled task"""
        _task = CBRecoveryTask(src_server, dest_server, bucket_src, bucket_dest, username, password,
                 username_dest, password_dest, verbose, wait_completed)
        self.task_manager.schedule(_task)
        return _task
Esempio n. 4
0
class ServerTasks(object):
    """A Task API for performing various operations synchronously or asynchronously on Couchbase cluster."""

    def __init__(self, task_manager=jython_task_manager()):
        self.task_manager = TaskManager("Cluster_Thread")
        self.jython_task_manager = task_manager

    def async_create_bucket(self, server, bucket):
        """Asynchronously creates the default bucket

        Parameters:
            bucket_params - a dictionary containing bucket creation parameters. (Dict)
        Returns:
            BucketCreateTask - A task future that is a handle to the scheduled task."""
#         bucket_params['bucket_name'] = 'default'
        _task = conc.BucketCreateTask(server, bucket, task_manager=self.task_manager)
        self.task_manager.schedule(_task)
        return _task

    def sync_create_bucket(self, server, bucket):
        """Synchronously creates the default bucket

        Parameters:
            bucket_params - a dictionary containing bucket creation parameters. (Dict)
        Returns:
            BucketCreateTask - A task future that is a handle to the scheduled task."""
#         bucket_params['bucket_name'] = 'default'
        _task = conc.BucketCreateTask(server, bucket, task_manager=self.task_manager)
        self.task_manager.schedule(_task)
        return _task.get_result()
    
    def async_bucket_delete(self, server, bucket='default'):
        """Asynchronously deletes a bucket

        Parameters:
            server - The server to delete the bucket on. (TestInputServer)
            bucket - The name of the bucket to be deleted. (String)

        Returns:
            BucketDeleteTask - A task future that is a handle to the scheduled task."""
        _task = conc.BucketDeleteTask(server, self.task_manager, bucket)
        self.task_manager.schedule(_task)
        return _task

    def async_failover(self, servers=[], failover_nodes=[], graceful=False,
                       use_hostnames=False, wait_for_pending=0):
        """Asynchronously failover a set of nodes

        Parameters:
            servers - servers used for connection. (TestInputServer)
            failover_nodes - The set of servers that will under go failover .(TestInputServer)
            graceful = True/False. True - graceful, False - hard. (Boolean)

        Returns:
            FailOverTask - A task future that is a handle to the scheduled task."""
        _task = conc.FailoverTask(servers, task_manager=self.task_manager,
                             to_failover=failover_nodes,
                             graceful=graceful, use_hostnames=use_hostnames,
                             wait_for_pending=wait_for_pending)
        self.task_manager.schedule(_task)
        return _task

    def async_init_node(self, server, disabled_consistent_view=None,
                        rebalanceIndexWaitingDisabled=None, rebalanceIndexPausingDisabled=None,
                        maxParallelIndexers=None, maxParallelReplicaIndexers=None, port=None,
                        quota_percent=None, services=None, index_quota_percent=None, gsi_type='forestdb'):
        """Asynchronously initializes a node

        The task scheduled will initialize a nodes username and password and will establish
        the nodes memory quota to be 2/3 of the available system memory.

        Parameters:
            server - The server to initialize. (TestInputServer)
            disabled_consistent_view - disable consistent view
            rebalanceIndexWaitingDisabled - index waiting during rebalance(Boolean)
            rebalanceIndexPausingDisabled - index pausing during rebalance(Boolean)
            maxParallelIndexers - max parallel indexers threads(Int)
            index_quota_percent - index quote used by GSI service (added due to sherlock)
            maxParallelReplicaIndexers - max parallel replica indexers threads(int)
            port - port to initialize cluster
            quota_percent - percent of memory to initialize
            services - can be kv, n1ql, index
            gsi_type - Indexer Storage Mode
        Returns:
            NodeInitTask - A task future that is a handle to the scheduled task."""
        _task = conc.NodeInitializeTask(server, self.task_manager, disabled_consistent_view,
                                        rebalanceIndexWaitingDisabled, rebalanceIndexPausingDisabled,
                                        maxParallelIndexers, maxParallelReplicaIndexers,
                                        port, quota_percent, services=services,
                                        index_quota_percent=index_quota_percent,
                                        gsi_type=gsi_type)

        self.task_manager.schedule(_task)
        return _task

    def async_load_gen_docs(self, cluster, bucket, generator, op_type, exp=0, flag=0, persist_to=0, replicate_to=0,
                            only_store_hash=True, batch_size=1, pause_secs=1, timeout_secs=5, compression=True,
                            process_concurrency=8):

        log.info("Loading documents to {}".format(bucket.name))
        client = VBucketAwareMemcached(RestConnection(cluster.master), bucket)
        _task = jython_tasks.LoadDocumentsGeneratorsTask(cluster, self.jython_task_manager, bucket, client, [generator],
                                                        op_type, exp, flag=flag, persist_to=persist_to,
                                                        replicate_to=replicate_to, only_store_hash=only_store_hash,
                                                        batch_size=batch_size,
                                                        pause_secs=pause_secs, timeout_secs=timeout_secs,
                                                        compression=compression,
                                                        process_concurrency=process_concurrency)
        self.jython_task_manager.add_new_task(_task)
        return _task

    def async_validate_docs(self, cluster, bucket, generator, opt_type, exp=0, flag=0, only_store_hash=True,
                            batch_size=1, pause_secs=1, timeout_secs=5, compression=True, process_concurrency=4):
        log.info("Validating documents")
        client = VBucketAwareMemcached(RestConnection(cluster.master), bucket)
        _task = jython_tasks.DocumentsValidatorTask(cluster, self.jython_task_manager, bucket, client, [generator],
                                                    opt_type, exp, flag=flag, only_store_hash=only_store_hash, batch_size=batch_size,
                                                        pause_secs=pause_secs, timeout_secs=timeout_secs, compression=compression,
                                                        process_concurrency=process_concurrency)
        self.jython_task_manager.add_new_task(_task)
        return _task

#     def async_load_gen_docs_java(self, server, bucket, start_from, num_items=10000):
#         def read_json_tempelate(path):
#             import json
#             istream = open(path);
#             with istream as data_file:    
#                 data = json.load(data_file)
#             return data["key"], data["value"]
#         
#         path = "b/testdata.json"
#         k,v = read_json_tempelate(path)
#         
#         _task = conc.LoadDocumentsTask_java(self.task_manager, server, bucket, num_items, start_from, k, v)
# 
#         self.task_manager.schedule(_task)
#         return _task

    def async_rebalance(self, servers, to_add, to_remove, use_hostnames=False, services = None, check_vbucket_shuffling=True):
        """Asyncronously rebalances a cluster

        Parameters:
            servers - All servers participating in the rebalance ([TestInputServers])
            to_add - All servers being added to the cluster ([TestInputServers])
            to_remove - All servers being removed from the cluster ([TestInputServers])
            use_hostnames - True if nodes should be added using hostnames (Boolean)

        Returns:
            RebalanceTask - A task future that is a handle to the scheduled task"""
        _task = jython_tasks.rebalanceTask(servers, to_add, to_remove,
                 use_hostnames=use_hostnames, services=services, check_vbucket_shuffling=check_vbucket_shuffling)
        self.jython_task_manager.add_new_task(_task)
        return _task

    def async_wait_for_stats(self, cluster, bucket, param, stat, comparison, value):
        """Asynchronously wait for stats

        Waits for stats to match the criteria passed by the stats variable. See
        couchbase.stats_tool.StatsCommon.build_stat_check(...) for a description of
        the stats structure and how it can be built.

        Parameters:
            servers - The servers to get stats from. Specifying multiple servers will
                cause the result from each server to be added together before
                comparing. ([TestInputServer])
            bucket - The name of the bucket (String)
            param - The stats parameter to use. (String)
            stat - The stat that we want to get the value from. (String)
            comparison - How to compare the stat result to the value specified.
            value - The value to compare to.

        Returns:
            RebalanceTask - A task future that is a handle to the scheduled task"""
        _task = jython_tasks.StatsWaitTask(cluster, bucket, param, stat, comparison, value)
        self.jython_task_manager.add_new_task(_task)
        return _task

    def create_default_bucket(self, bucket_params, timeout=600):
        """Synchronously creates the default bucket

        Parameters:
            bucket_params - A dictionary containing a list of bucket creation parameters. (Dict)

        Returns:
            boolean - Whether or not the bucket was created."""

        _task = self.async_create_default_bucket(bucket_params)
        return _task.get_result(timeout)

    def create_sasl_bucket(self, name, password,bucket_params, timeout=None):
        """Synchronously creates a sasl bucket

        Parameters:
            bucket_params - A dictionary containing a list of bucket creation parameters. (Dict)

        Returns:
            boolean - Whether or not the bucket was created."""

        _task = self.async_create_sasl_bucket(name, password, bucket_params)
        self.task_manager.schedule(_task)
        return _task.get_result(timeout)

    def create_standard_bucket(self, name, port, bucket_params, timeout=None):
        """Synchronously creates a standard bucket
        Parameters:
            bucket_params - A dictionary containing a list of bucket creation parameters. (Dict)
        Returns:
            boolean - Whether or not the bucket was created."""
        _task = self.async_create_standard_bucket(name, port, bucket_params)
        return _task.get_result(timeout)

    def bucket_delete(self, server, bucket='default', timeout=None):
        """Synchronously deletes a bucket

        Parameters:
            server - The server to delete the bucket on. (TestInputServer)
            bucket - The name of the bucket to be deleted. (String)

        Returns:
            boolean - Whether or not the bucket was deleted."""
        _task = self.async_bucket_delete(server, bucket)
        return _task.get_result(timeout)

    def init_node(self, server, async_init_node=True, disabled_consistent_view=None, services = None, index_quota_percent = None):
        """Synchronously initializes a node

        The task scheduled will initialize a nodes username and password and will establish
        the nodes memory quota to be 2/3 of the available system memory.

        Parameters:
            server - The server to initialize. (TestInputServer)
            index_quota_percent - index quota percentage
            disabled_consistent_view - disable consistent view

        Returns:
            boolean - Whether or not the node was properly initialized."""
        _task = self.async_init_node(server, async_init_node, disabled_consistent_view, services = services, index_quota_percent= index_quota_percent)
        return _task.result()

    def rebalance(self, servers, to_add, to_remove, timeout=None, use_hostnames=False, services = None):
        """Syncronously rebalances a cluster

        Parameters:
            servers - All servers participating in the rebalance ([TestInputServers])
            to_add - All servers being added to the cluster ([TestInputServers])
            to_remove - All servers being removed from the cluster ([TestInputServers])
            use_hostnames - True if nodes should be added using their hostnames (Boolean)
            services - Services definition per Node, default is None (this is since Sherlock release)
        Returns:
            boolean - Whether or not the rebalance was successful"""
        _task = self.async_rebalance(servers, to_add, to_remove, use_hostnames, services = services)
        result = self.jython_task_manager.get_task_result(_task)
        return result

    def load_gen_docs(self, cluster, bucket, generator, op_type, exp=0,
                      flag=0, persist_to=0, replicate_to=0, only_store_hash=True,
                      batch_size=1, compression=True):
        _task = self.async_load_gen_docs(cluster, bucket, generator, op_type, exp, flag, persist_to=persist_to,
                                         replicate_to=replicate_to,
                                         only_store_hash=only_store_hash, batch_size=batch_size, 
                                         compression=compression)
        return self.jython_task_manager.get_task_result(_task)

    def verify_data(self, server, bucket, kv_store, timeout=None, compression=True):
        _task = self.async_verify_data(server, bucket, kv_store, compression=compression)
        return _task.result(timeout)

    def async_verify_data(self, server, bucket, kv_store, max_verify=None,
                          only_store_hash=True, batch_size=1, replica_to_read=None, timeout_sec=5, compression=True):
        if batch_size > 1:
            _task = conc.BatchedValidateDataTask(server, bucket, kv_store, max_verify, only_store_hash, batch_size, 
                                                 timeout_sec, self.task_manager, compression=compression)
        else:
            _task = conc.ValidateDataTask(server, bucket, kv_store, max_verify, only_store_hash, replica_to_read, 
                                          self.task_manager, compression=compression)
        self.task_manager.schedule(_task)
        return _task
    
    def wait_for_stats(self, cluster, bucket, param, stat, comparison, value, timeout=None):
        """Synchronously wait for stats

        Waits for stats to match the criteria passed by the stats variable. See
        couchbase.stats_tool.StatsCommon.build_stat_check(...) for a description of
        the stats structure and how it can be built.

        Parameters:
            servers - The servers to get stats from. Specifying multiple servers will
                cause the result from each server to be added together before
                comparing. ([TestInputServer])
            bucket - The name of the bucket (String)
            param - The stats parameter to use. (String)
            stat - The stat that we want to get the value from. (String)
            comparison - How to compare the stat result to the value specified.
            value - The value to compare to.

        Returns:
            boolean - Whether or not the correct stats state was seen"""
        _task = self.async_wait_for_stats(cluster, bucket, param, stat, comparison, value)
        return self.jython_task_manager.get_task_result(_task)

    def shutdown(self, force=False):
        self.task_manager.shutdown(force)
        if force:
            log.info("Cluster instance shutdown with force")

    def async_n1ql_query_verification(self, server, bucket, query, n1ql_helper=None,
                                      expected_result=None, is_explain_query=False,
                                      index_name=None, verify_results=True, retry_time=2,
                                      scan_consistency=None, scan_vector=None):
        """Asynchronously runs n1ql querya and verifies result if required

        Parameters:
            server - The server to handle query verification task. (TestInputServer)
            query - Query params being used with the query. (dict)
            expected_result - expected result after querying
            is_explain_query - is query explain query
            index_name - index related to query
            bucket - The name of the bucket containing items for this view. (String)
            verify_results -  Verify results after query runs successfully
            retry_time - The time in seconds to wait before retrying failed queries (int)
            n1ql_helper - n1ql helper object
            scan_consistency - consistency value for querying
            scan_vector - scan vector used for consistency
        Returns:
            N1QLQueryTask - A task future that is a handle to the scheduled task."""
        _task = jython_tasks.N1QLQueryTask(n1ql_helper = n1ql_helper,
                 server = server, bucket = bucket,
                 query = query, expected_result=expected_result,
                 verify_results = verify_results,
                 is_explain_query = is_explain_query,
                 index_name = index_name,
                 retry_time= retry_time,
                 scan_consistency = scan_consistency,
                 scan_vector = scan_vector)
        self.jython_task_manager.add_new_task(_task)
        return _task

    def n1ql_query_verification(self, server, bucket, query, n1ql_helper = None,
                                expected_result=None, is_explain_query = False,
                                index_name = None, verify_results = True,
                                scan_consistency = None, scan_vector = None,
                                retry_time=2, timeout = 60):
        """Synchronously runs n1ql querya and verifies result if required

        Parameters:
            server - The server to handle query verification task. (TestInputServer)
            query - Query params being used with the query. (dict)
            expected_result - expected result after querying
            is_explain_query - is query explain query
            index_name - index related to query
            bucket - The name of the bucket containing items for this view. (String)
            verify_results -  Verify results after query runs successfully
            retry_time - The time in seconds to wait before retrying failed queries (int)
            n1ql_helper - n1ql helper object
            scan_consistency - consistency used during querying
            scan_vector - vector used during querying
            timeout - timeout for task
        Returns:
            N1QLQueryTask - A task future that is a handle to the scheduled task."""
        _task = self.async_n1ql_query_verification(n1ql_helper = n1ql_helper,
                 server = server, bucket = bucket,
                 query = query, expected_result=expected_result,
                 is_explain_query = is_explain_query,
                 index_name = index_name,
                 verify_results = verify_results,
                 retry_time= retry_time,
                 scan_consistency = scan_consistency,
                 scan_vector = scan_vector)
        return self.jython_task_manager.get_task_result(_task)

    def async_create_index(self, server, bucket, query, n1ql_helper = None,
                           index_name = None, defer_build = False, retry_time=2,
                           timeout = 240):
        """Asynchronously runs create index task

        Parameters:
            server - The server to handle query verification task. (TestInputServer)
            query - Query params being used with the query.
            bucket - The name of the bucket containing items for this view. (String)
            index_name - Name of the index to be created
            defer_build - build is defered
            retry_time - The time in seconds to wait before retrying failed queries (int)
            n1ql_helper - n1ql helper object
            timeout - timeout for index to come online
        Returns:
            CreateIndexTask - A task future that is a handle to the scheduled task."""
        _task = jython_tasks.CreateIndexTask(n1ql_helper = n1ql_helper,
                 server = server, bucket = bucket,
                 defer_build = defer_build,
                 index_name = index_name,
                 query = query,
                 retry_time= retry_time,
                 timeout = timeout)
        self.jython_task_manager.add_new_task(_task)
        return _task

    def async_monitor_index(self, server, bucket, n1ql_helper = None,
                            index_name = None, retry_time=2, timeout = 240):
        """Asynchronously runs create index task

        Parameters:
            server - The server to handle query verification task. (TestInputServer)
            query - Query params being used with the query.
            bucket - The name of the bucket containing items for this view. (String)
            index_name - Name of the index to be created
            retry_time - The time in seconds to wait before retrying failed queries (int)
            timeout - timeout for index to come online
            n1ql_helper - n1ql helper object
        Returns:
            MonitorIndexTask - A task future that is a handle to the scheduled task."""
        _task = jython_tasks.MonitorIndexTask(n1ql_helper = n1ql_helper,
                 server = server, bucket = bucket,
                 index_name = index_name,
                 retry_time= retry_time,
                 timeout = timeout)
        self.jython_task_manager.add_new_task(_task)
        return _task

    def async_build_index(self, server, bucket, query, n1ql_helper = None, retry_time=2):
        """Asynchronously runs create index task

        Parameters:
            server - The server to handle query verification task. (TestInputServer)
            query - Query params being used with the query.
            bucket - The name of the bucket containing items for this view. (String)
            retry_time - The time in seconds to wait before retrying failed queries (int)
            n1ql_helper - n1ql helper object
        Returns:
            BuildIndexTask - A task future that is a handle to the scheduled task."""
        _task = jython_tasks.BuildIndexTask(n1ql_helper = n1ql_helper,
                 server = server, bucket = bucket,
                 query = query,
                 retry_time= retry_time)
        self.jython_task_manager.add_new_task(_task)
        return _task

    def create_index(self, server, bucket, query, n1ql_helper = None, index_name = None,
                     defer_build = False, retry_time=2, timeout= 60):
        """Asynchronously runs drop index task

        Parameters:
            server - The server to handle query verification task. (TestInputServer)
            query - Query params being used with the query.
            bucket - The name of the bucket containing items for this view. (String)
            index_name - Name of the index to be created
            retry_time - The time in seconds to wait before retrying failed queries (int)
            n1ql_helper - n1ql helper object
            defer_build - defer the build
            timeout - timeout for the task
        Returns:
            N1QLQueryTask - A task future that is a handle to the scheduled task."""
        _task = self.async_create_index(n1ql_helper = n1ql_helper,
                 server = server, bucket = bucket,
                 query = query,
                 index_name = index_name,
                 defer_build = defer_build,
                 retry_time= retry_time)
        return self.jython_task_manager.get_task_result(_task)

    def async_drop_index(self, server = None, bucket = "default", query = None,
                         n1ql_helper = None, index_name = None, retry_time=2):
        """Synchronously runs drop index task

        Parameters:
            server - The server to handle query verification task. (TestInputServer)
            query - Query params being used with the query.
            bucket - The name of the bucket containing items for this view. (String)
            index_name - Name of the index to be dropped
            retry_time - The time in seconds to wait before retrying failed queries (int)
            n1ql_helper - n1ql helper object
        Returns:
            DropIndexTask - A task future that is a handle to the scheduled task."""
        _task = jython_tasks.DropIndexTask(n1ql_helper = n1ql_helper,
                 server = server, bucket = bucket,
                 query = query,
                 index_name = index_name,
                 retry_time= retry_time)
        self.jython_task_manager.add_new_task(_task)
        return _task

    def drop_index(self, server, bucket, query, n1ql_helper = None,
                   index_name = None, retry_time=2, timeout = 60):
        """Synchronously runs drop index task

        Parameters:
            server - The server to handle query verification task. (TestInputServer)
            query - Query params being used with the query. (dict)
            bucket - The name of the bucket containing items for this view. (String)
            index_name - Name of the index to be created
            retry_time - The time in seconds to wait before retrying failed queries (int)
            n1ql_helper - n1ql helper object
            timeout - timeout for the task
        Returns:
            N1QLQueryTask - A task future that is a handle to the scheduled task."""
        _task = self.async_drop_index(n1ql_helper = n1ql_helper,
                 server = server, bucket = bucket,
                 query = query,
                 index_name = index_name,
                 retry_time= retry_time)
        return self.jython_task_manager.get_task_result(_task)

    def failover(self, servers=[], failover_nodes=[], graceful=False, use_hostnames=False,timeout=None):
        """Synchronously flushes a bucket

        Parameters:
            servers - node used for connection (TestInputServer)
            failover_nodes - servers to be failovered, i.e. removed from the cluster. (TestInputServer)
            bucket - The name of the bucket to be flushed. (String)

        Returns:
            boolean - Whether or not the bucket was flushed."""
        _task = self.async_failover(servers, failover_nodes, graceful, use_hostnames)
        return _task.result(timeout)

    def async_bucket_flush(self, server, bucket='default'):
        """Asynchronously flushes a bucket

        Parameters:
            server - The server to flush the bucket on. (TestInputServer)
            bucket - The name of the bucket to be flushed. (String)

        Returns:
            BucketFlushTask - A task future that is a handle to the scheduled task."""
        _task = conc.BucketFlushTask(server,self.task_manager,bucket)
        self.task_manager.schedule(_task)
        return _task

    def bucket_flush(self, server, bucket='default', timeout=None):
        """Synchronously flushes a bucket

        Parameters:
            server - The server to flush the bucket on. (TestInputServer)
            bucket - The name of the bucket to be flushed. (String)

        Returns:
            boolean - Whether or not the bucket was flushed."""
        _task = self.async_bucket_flush(server, bucket)
        return _task.get_result(timeout)

    def async_compact_bucket(self, server, bucket="default"):
        """Asynchronously starts bucket compaction

        Parameters:
            server - source couchbase server
            bucket - bucket to compact

        Returns:
            boolean - Whether or not the compaction started successfully"""
        _task = conc.CompactBucketTask(server, self.task_manager, bucket)
        self.task_manager.schedule(_task)
        return _task

    def compact_bucket(self, server, bucket="default"):
        """Synchronously runs bucket compaction and monitors progress

        Parameters:
            server - source couchbase server
            bucket - bucket to compact

        Returns:
            boolean - Whether or not the cbrecovery completed successfully"""
        _task = self.async_compact_bucket(server, bucket)
        status = _task.get_result()
        return status

    def async_cbas_query_execute(self, master, cbas_server, cbas_endpoint, statement, bucket='default', mode=None, pretty=True):
        """
        Asynchronously execute a CBAS query
        :param master: Master server
        :param cbas_server: CBAS server
        :param cbas_endpoint: CBAS Endpoint URL (/analytics/service)
        :param statement: Query to be executed
        :param bucket: bucket to connect
        :param mode: Query Execution mode
        :param pretty: Pretty formatting
        :return: task with the output or error message
        """
        _task = conc.CBASQueryExecuteTask(master, cbas_server, self.task_manager, cbas_endpoint, statement, bucket,
                                          mode, pretty)
        self.task_manager.schedule(_task)
        return _task
class AutoFailoverBaseTest(BaseTestCase):
    MAX_FAIL_DETECT_TIME = 120
    ORCHESTRATOR_TIMEOUT_BUFFER = 60

    def setUp(self):
        super(AutoFailoverBaseTest, self).setUp()
        self._get_params()
        self.rest = RestConnection(self.orchestrator)
        self.task_manager = TaskManager("Autofailover_thread")
        self.task_manager.start()
        self.node_failure_task_manager = TaskManager(
            "Nodes_failure_detector_thread")
        self.node_failure_task_manager.start()
        self.initial_load_gen = BlobGenerator('auto-failover',
                                              'auto-failover-',
                                              self.value_size,
                                              end=self.num_items)
        self.update_load_gen = BlobGenerator('auto-failover',
                                             'auto-failover-',
                                             self.value_size,
                                             end=self.update_items)
        self.delete_load_gen = BlobGenerator('auto-failover',
                                             'auto-failover-',
                                             self.value_size,
                                             start=self.update_items,
                                             end=self.delete_items)
        self._load_all_buckets(self.servers[0], self.initial_load_gen,
                               "create", 0)
        self._async_load_all_buckets(self.orchestrator, self.update_load_gen,
                                     "update", 0)
        self._async_load_all_buckets(self.orchestrator, self.delete_load_gen,
                                     "delete", 0)
        self.server_index_to_fail = self.input.param("server_index_to_fail",
                                                     None)
        if self.server_index_to_fail is None:
            self.server_to_fail = self._servers_to_fail()
        else:
            self.server_to_fail = [self.servers[self.server_index_to_fail]]
        self.servers_to_add = self.servers[self.nodes_init:self.nodes_init +
                                           self.nodes_in]
        self.servers_to_remove = self.servers[self.nodes_init -
                                              self.nodes_out:self.nodes_init]

    def bareSetUp(self):
        super(AutoFailoverBaseTest, self).setUp()
        self._get_params()
        self.rest = RestConnection(self.orchestrator)
        self.task_manager = TaskManager("Autofailover_thread")
        self.task_manager.start()
        self.node_failure_task_manager = TaskManager(
            "Nodes_failure_detector_thread")
        self.node_failure_task_manager.start()
        self.initial_load_gen = BlobGenerator('auto-failover',
                                              'auto-failover-',
                                              self.value_size,
                                              end=self.num_items)
        self.update_load_gen = BlobGenerator('auto-failover',
                                             'auto-failover-',
                                             self.value_size,
                                             end=self.update_items)
        self.delete_load_gen = BlobGenerator('auto-failover',
                                             'auto-failover-',
                                             self.value_size,
                                             start=self.update_items,
                                             end=self.delete_items)
        self.server_to_fail = self._servers_to_fail()
        self.servers_to_add = self.servers[self.nodes_init:self.nodes_init +
                                           self.nodes_in]
        self.servers_to_remove = self.servers[self.nodes_init -
                                              self.nodes_out:self.nodes_init]

    def tearDown(self):
        self.log.info("============AutoFailoverBaseTest teardown============")
        self._get_params()
        self.task_manager = TaskManager("Autofailover_thread")
        self.task_manager.start()
        self.server_to_fail = self._servers_to_fail()
        self.start_couchbase_server()
        self.sleep(10)
        self.disable_firewall()
        self.rest = RestConnection(self.orchestrator)
        self.rest.reset_autofailover()
        self.disable_autofailover()
        self._cleanup_cluster()
        super(AutoFailoverBaseTest, self).tearDown()
        if hasattr(self, "node_monitor_task"):
            if self.node_monitor_task._exception:
                self.fail("{}".format(self.node_monitor_task._exception))
            self.node_monitor_task.stop = True
        self.task_manager.shutdown(force=True)

    def shuffle_nodes_between_zones_and_rebalance(self, to_remove=None):
        """
        Shuffle the nodes present in the cluster if zone > 1. Rebalance the nodes in the end.
        Nodes are divided into groups iteratively i.e. 1st node in Group 1, 2nd in Group 2, 3rd in Group 1 and so on, when
        zone=2.
        :param to_remove: List of nodes to be removed.
        """
        if not to_remove:
            to_remove = []
        serverinfo = self.orchestrator
        rest = RestConnection(serverinfo)
        zones = ["Group 1"]
        nodes_in_zone = {"Group 1": [serverinfo.ip]}
        # Create zones, if not existing, based on params zone in test.
        # Shuffle the nodes between zones.
        if int(self.zone) > 1:
            for i in range(1, int(self.zone)):
                a = "Group "
                zones.append(a + str(i + 1))
                if not rest.is_zone_exist(zones[i]):
                    rest.add_zone(zones[i])
                nodes_in_zone[zones[i]] = []
            # Divide the nodes between zones.
            nodes_in_cluster = [
                node.ip for node in self.get_nodes_in_cluster()
            ]
            nodes_to_remove = [node.ip for node in to_remove]
            for i in range(1, len(self.servers)):
                if self.servers[i].ip in nodes_in_cluster and self.servers[
                        i].ip not in nodes_to_remove:
                    server_group = i % int(self.zone)
                    nodes_in_zone[zones[server_group]].append(
                        self.servers[i].ip)
            # Shuffle the nodesS
            for i in range(1, self.zone):
                node_in_zone = list(
                    set(nodes_in_zone[zones[i]]) -
                    set([node for node in rest.get_nodes_in_zone(zones[i])]))
                rest.shuffle_nodes_in_zones(node_in_zone, zones[0], zones[i])
        self.zones = nodes_in_zone
        otpnodes = [node.id for node in rest.node_statuses()]
        nodes_to_remove = [
            node.id for node in rest.node_statuses()
            if node.ip in [t.ip for t in to_remove]
        ]
        # Start rebalance and monitor it.
        started = rest.rebalance(otpNodes=otpnodes,
                                 ejectedNodes=nodes_to_remove)
        if started:
            result = rest.monitorRebalance()
            msg = "successfully rebalanced cluster {0}"
            self.log.info(msg.format(result))

    def enable_autofailover(self):
        """
        Enable the autofailover setting with the given timeout.
        :return: True If the setting was set with the timeout, else return
        False
        """
        status = self.rest.update_autofailover_settings(
            True,
            self.timeout,
            self.can_abort_rebalance,
            maxCount=self.max_count,
            enableServerGroup=self.server_group_failover)
        return status

    def disable_autofailover(self):
        """
        Disable the autofailover setting.
        :return: True If the setting was disabled, else return
        False
        """
        status = self.rest.update_autofailover_settings(False, 120, False)
        return status

    def enable_autofailover_and_validate(self):
        """
        Enable autofailover with given timeout and then validate if the
        settings.
        :return: Nothing
        """
        status = self.enable_autofailover()
        self.assertTrue(status, "Failed to enable autofailover_settings!")
        self.sleep(5)
        settings = self.rest.get_autofailover_settings()
        self.assertTrue(settings.enabled, "Failed to enable "
                        "autofailover_settings!")
        self.assertEqual(
            self.timeout, settings.timeout,
            "Incorrect timeout set. Expected timeout : {0} "
            "Actual timeout set : {1}".format(self.timeout, settings.timeout))
        self.assertEqual(
            self.can_abort_rebalance, settings.can_abort_rebalance,
            "Incorrect can_abort_rebalance set. Expected can_abort_rebalance : {0} "
            "Actual can_abort_rebalance set : {1}".format(
                self.can_abort_rebalance, settings.can_abort_rebalance))

    def disable_autofailover_and_validate(self):
        """
        Disable autofailover setting and then validate if the setting was
        disabled.
        :return: Nothing
        """
        status = self.disable_autofailover()
        self.assertTrue(status, "Failed to change autofailover_settings!")
        settings = self.rest.get_autofailover_settings()
        self.assertFalse(settings.enabled, "Failed to disable "
                         "autofailover_settings!")

    def start_node_monitors_task(self):
        """
        Start the node monitors task to analyze the node status monitors.
        :return: The NodeMonitorAnalyserTask.
        """
        node_monitor_task = NodeMonitorsAnalyserTask(self.orchestrator)
        self.task_manager.schedule(node_monitor_task, sleep_time=5)
        return node_monitor_task

    def enable_firewall(self):
        """
        Enable firewall on the nodes to fail in the tests.
        :return: Nothing
        """
        node_down_timer_tasks = []
        for node in self.server_to_fail:
            node_failure_timer_task = NodeDownTimerTask(node.ip)
            node_down_timer_tasks.append(node_failure_timer_task)
        task = AutoFailoverNodesFailureTask(
            self.orchestrator,
            self.server_to_fail,
            "enable_firewall",
            self.timeout,
            self.pause_between_failover_action,
            self.failover_expected,
            self.timeout_buffer,
            failure_timers=node_down_timer_tasks)
        for node_down_timer_task in node_down_timer_tasks:
            self.node_failure_task_manager.schedule(node_down_timer_task, 2)
        self.task_manager.schedule(task)
        try:
            task.result()
        except Exception, e:
            self.fail("Exception: {}".format(e))
Esempio n. 6
0
class Cluster(object):
    """An API for interacting with Couchbase clusters"""

    def __init__(self):
        self.task_manager = TaskManager()
        self.task_manager.start()

    def async_create_default_bucket(self, server, size, replicas=1):
        """Asynchronously creates the default bucket

        Parameters:
            server - The server to create the bucket on. (TestInputServer)
            size - The size of the bucket to be created. (int)
            replicas - The number of replicas for this bucket. (int)

        Returns:
            BucketCreateTask - A task future that is a handle to the scheduled task."""
        _task = BucketCreateTask(server, 'default', replicas, size)
        self.task_manager.schedule(_task)
        return _task

    def async_create_sasl_bucket(self, server, name, password, size, replicas):
        """Asynchronously creates a sasl bucket

        Parameters:
            server - The server to create the bucket on. (TestInputServer)
            name - The name of the bucket to be created. (String)
            password - The password for this bucket. (String)
            replicas - The number of replicas for this bucket. (int)
            size - The size of the bucket to be created. (int)

        Returns:
            BucketCreateTask - A task future that is a handle to the scheduled task."""
        _task = BucketCreateTask(server, name, replicas, size, password=password)
        self.task_manager.schedule(_task)
        return _task

    def async_create_standard_bucket(self, server, name, port, size, replicas):
        """Asynchronously creates a standard bucket

        Parameters:
            server - The server to create the bucket on. (TestInputServer)
            name - The name of the bucket to be created. (String)
            port - The port to create this bucket on. (String)
            replicas - The number of replicas for this bucket. (int)
            size - The size of the bucket to be created. (int)

        Returns:
            BucketCreateTask - A task future that is a handle to the scheduled task."""
        _task = BucketCreateTask(server, name, replicas, size, port)
        self.task_manager.schedule(_task)
        return _task

    def async_bucket_delete(self, server, bucket='default'):
        """Asynchronously deletes a bucket

        Parameters:
            server - The server to delete the bucket on. (TestInputServer)
            bucket - The name of the bucket to be deleted. (String)

        Returns:
            BucketDeleteTask - A task future that is a handle to the scheduled task."""
        _task = BucketDeleteTask(server, bucket)
        self.task_manager.schedule(_task)
        return _task

    def async_init_node(self, server):
        """Asynchronously initializes a node

        The task scheduled will initialize a nodes username and password and will establish
        the nodes memory quota to be 2/3 of the available system memory.

        Parameters:
            server - The server to initialize. (TestInputServer)

        Returns:
            NodeInitTask - A task future that is a handle to the scheduled task."""
        _task = NodeInitializeTask(server)
        self.task_manager.schedule(_task)
        return _task

    def async_load_gen_docs(self, server, bucket, generator, kv_store, op_type, exp = 0):
        _task = LoadDocumentsTask(server, bucket, generator, kv_store, op_type, exp)
        self.task_manager.schedule(_task)
        return _task

    def async_workload(self, server, bucket, kv_store, num_ops, create, read, update,
                       delete, exp):
        _task = WorkloadTask(server, bucket, kv_store, num_ops, create, read, update,
                             delete, exp)
        self.task_manager.schedule(_task)
        return _task

    def async_verify_data(self, server, bucket, kv_store):
        _task = ValidateDataTask(server, bucket, kv_store)
        self.task_manager.schedule(_task)
        return _task

    def async_rebalance(self, servers, to_add, to_remove):
        """Asyncronously rebalances a cluster

        Parameters:
            servers - All servers participating in the rebalance ([TestInputServers])
            to_add - All servers being added to the cluster ([TestInputServers])
            to_remove - All servers being removed from the cluster ([TestInputServers])

        Returns:
            RebalanceTask - A task future that is a handle to the scheduled task"""
        _task = RebalanceTask(servers, to_add, to_remove)
        self.task_manager.schedule(_task)
        return _task

    def async_wait_for_stats(self, servers, bucket, param, stat, comparison, value):
        """Asynchronously wait for stats

        Waits for stats to match the criteria passed by the stats variable. See
        couchbase.stats_tool.StatsCommon.build_stat_check(...) for a description of
        the stats structure and how it can be built.

        Parameters:
            servers - The servers to get stats from. Specifying multiple servers will
                cause the result from each server to be added together before
                comparing. ([TestInputServer])
            bucket - The name of the bucket (String)
            param - The stats parameter to use. (String)
            stat - The stat that we want to get the value from. (String)
            comparison - How to compare the stat result to the value specified.
            value - The value to compare to.

        Returns:
            RebalanceTask - A task future that is a handle to the scheduled task"""
        _task = StatsWaitTask(servers, bucket, param, stat, comparison, value)
        self.task_manager.schedule(_task)
        return _task

    def create_default_bucket(self, server, size, replicas=1, timeout=None):
        """Synchronously creates the default bucket

        Parameters:
            server - The server to create the bucket on. (TestInputServer)
            size - The size of the bucket to be created. (int)
            replicas - The number of replicas for this bucket. (int)

        Returns:
            boolean - Whether or not the bucket was created."""
        _task = self.async_create_default_bucket(server, size, replicas)
        return _task.result(timeout)

    def create_sasl_bucket(self, server, name, password, size, replicas, timeout=None):
        """Synchronously creates a sasl bucket

        Parameters:
            server - The server to create the bucket on. (TestInputServer)
            name - The name of the bucket to be created. (String)
            password - The password for this bucket. (String)
            replicas - The number of replicas for this bucket. (int)
            size - The size of the bucket to be created. (int)

        Returns:
            boolean - Whether or not the bucket was created."""
        _task = async_create_sasl_bucket(server, name, password, replicas, size)
        self.task_manager.schedule(_task)
        return _task.result(timeout)

    def create_standard_bucket(self, server, name, port, size, replicas, timeout=None):
        """Synchronously creates a standard bucket

        Parameters:
            server - The server to create the bucket on. (TestInputServer)
            name - The name of the bucket to be created. (String)
            port - The port to create this bucket on. (String)
            replicas - The number of replicas for this bucket. (int)
            size - The size of the bucket to be created. (int)

        Returns:
            boolean - Whether or not the bucket was created."""
        _task = self.async_create_standard_bucket(server, name, port, size, replicas)
        return _task.result(timeout)

    def bucket_delete(self, server, bucket='default', timeout=None):
        """Synchronously deletes a bucket

        Parameters:
            server - The server to delete the bucket on. (TestInputServer)
            bucket - The name of the bucket to be deleted. (String)

        Returns:
            boolean - Whether or not the bucket was deleted."""
        _task = self.async_bucket_delete(server, bucket)
        return _task.result(timeout)

    def init_node(self, server):
        """Synchronously initializes a node

        The task scheduled will initialize a nodes username and password and will establish
        the nodes memory quota to be 2/3 of the available system memory.

        Parameters:
            server - The server to initialize. (TestInputServer)

        Returns:
            boolean - Whether or not the node was properly initialized."""
        _task = self.async_init_node(server)
        return _task.result()

    def rebalance(self, servers, to_add, to_remove, timeout=None):
        """Syncronously rebalances a cluster

        Parameters:
            servers - All servers participating in the rebalance ([TestInputServers])
            to_add - All servers being added to the cluster ([TestInputServers])
            to_remove - All servers being removed from the cluster ([TestInputServers])

        Returns:
            boolean - Whether or not the rebalance was successful"""
        _task = self.async_rebalance(servers, to_add, to_remove)
        return _task.result(timeout)

    def load_gen_docs(self, server, bucket, generator, kv_store, op_type, exp = 0, timeout=None):
        _task = self.async_load_gen_docs(server, bucket, generator, kv_store, op_type, exp)
        return _task.result(timeout)

    def workload(self, server, bucket, kv_store, num_ops, create, read, update, delete, exp, timeout=None):
        _task = self.async_workload(server, bucket, kv_store, num_ops, create, read, update,
                                    delete, exp)
        return _task.result(timeout)

    def verify_data(self, server, bucket, kv_store, timeout=None):
        _task = self.async_verify_data(server, bucket, kv_store)
        return _task.result(timeout)

    def wait_for_stats(self, servers, bucket, param, stat, comparison, value, timeout=None):
        """Synchronously wait for stats

        Waits for stats to match the criteria passed by the stats variable. See
        couchbase.stats_tool.StatsCommon.build_stat_check(...) for a description of
        the stats structure and how it can be built.

        Parameters:
            servers - The servers to get stats from. Specifying multiple servers will
                cause the result from each server to be added together before
                comparing. ([TestInputServer])
            bucket - The name of the bucket (String)
            param - The stats parameter to use. (String)
            stat - The stat that we want to get the value from. (String)
            comparison - How to compare the stat result to the value specified.
            value - The value to compare to.

        Returns:
            boolean - Whether or not the correct stats state was seen"""
        _task = self.async_wait_for_stats(servers, bucket, param, stat, comparison, value)
        return _task.result(timeout)

    def shutdown(self, force=False):
        self.task_manager.shutdown(force)

    def async_create_view(self, server, design_doc_name, view, bucket = "default"):
        """Asynchronously creates a views in a design doc

        Parameters:
            server - The server to handle create view task. (TestInputServer)
            design_doc_name - Design doc to be created or updated with view(s) being created (String)
            view - The view being created (document.View)
            bucket - The name of the bucket containing items for this view. (String)

        Returns:
            ViewCreateTask - A task future that is a handle to the scheduled task."""
        _task = ViewCreateTask(server, design_doc_name, view, bucket)
        self.task_manager.schedule(_task)
        return _task

    def create_view(self, server, design_doc_name, view, bucket = "default", timeout=None):
        """Synchronously creates a views in a design doc

        Parameters:
            server - The server to handle create view task. (TestInputServer)
            design_doc_name - Design doc to be created or updated with view(s) being created (String)
            view - The view being created (document.View)
            bucket - The name of the bucket containing items for this view. (String)

        Returns:
            string - revision number of design doc."""
        _task = self.async_create_view(server, design_doc_name, view, bucket)
        return _task.result(timeout)

    def async_delete_view(self, server, design_doc_name, view, bucket = "default"):
        """Asynchronously deletes a views in a design doc

        Parameters:
            server - The server to handle delete view task. (TestInputServer)
            design_doc_name - Design doc to be deleted or updated with view(s) being deleted (String)
            view - The view being deleted (document.View)
            bucket - The name of the bucket containing items for this view. (String)

        Returns:
            ViewDeleteTask - A task future that is a handle to the scheduled task."""
        _task = ViewDeleteTask(server, design_doc_name, view, bucket)
        self.task_manager.schedule(_task)
        return _task

    def delete_view(self, server, design_doc_name, view, bucket = "default", timeout=None):
        """Synchronously deletes a views in a design doc

        Parameters:
            server - The server to handle delete view task. (TestInputServer)
            design_doc_name - Design doc to be deleted or updated with view(s) being deleted (String)
            view - The view being deleted (document.View)
            bucket - The name of the bucket containing items for this view. (String)

        Returns:
            boolean - Whether or not delete view was successful."""
        _task = self.async_delete_view(server, design_doc_name, view, bucket)
        return _task.result(timeout)


    def async_query_view(self, server, design_doc_name, view_name, query,
                         expected_rows = None, bucket = "default", retry_time = 2):
        """Asynchronously query a views in a design doc

        Parameters:
            server - The server to handle query view task. (TestInputServer)
            design_doc_name - Design doc with view(s) being queried(String)
            view_name - The view being queried (String)
            expected_rows - The number of rows expected to be returned from the query (int)
            bucket - The name of the bucket containing items for this view. (String)
            retry_time - The time in seconds to wait before retrying failed queries (int)

        Returns:
            ViewQueryTask - A task future that is a handle to the scheduled task."""
        _task = ViewQueryTask(server, design_doc_name, view_name, query, expected_rows, bucket, retry_time)
        self.task_manager.schedule(_task)
        return _task

    def query_view(self, server, design_doc_name, view_name, query,
                   expected_rows = None, bucket = "default", retry_time = 2, timeout=None):
        """Synchronously query a views in a design doc

        Parameters:
            server - The server to handle query view task. (TestInputServer)
            design_doc_name - Design doc with view(s) being queried(String)
            view_name - The view being queried (String)
            expected_rows - The number of rows expected to be returned from the query (int)
            bucket - The name of the bucket containing items for this view. (String)
            retry_time - The time in seconds to wait before retrying failed queries (int)

        Returns:
            ViewQueryTask - A task future that is a handle to the scheduled task."""
        _task = self.async_query_view(server, design_doc_name, view_name, query, expected_rows, bucket, retry_time)
        return _task.result(timeout)


    def modify_fragmentation_config(self, server, config, bucket = "default", timeout=None):
        """Synchronously modify fragmentation configuration spec

        Parameters:
            server - The server to handle fragmentation config task. (TestInputServer)
            config - New compaction configuration (dict - see task)
            bucket - The name of the bucket fragementation config applies to. (String)

        Returns:
            boolean - True if config values accepted."""

        _task = ModifyFragmentationConfigTask(server, config, bucket)
        self.task_manager.schedule(_task)
        return _task.result(timeout)


    def async_monitor_view_fragmentation(self, server,
                                         design_doc_name,
                                         fragmentation_value,
                                         bucket = "default",
                                         timeout = None):
        """Asynchronously monitor view fragmentation.

           When <fragmentation_value> is reached on the
           index file for <design_doc_name> this method
           will return.

        Parameters:
            server - The server to handle fragmentation config task. (TestInputServer)
            design_doc_name - design doc with views represented in index file. (String)
            fragmentation_value - target amount of fragmentation within index file to detect. (String)
            bucket - The name of the bucket design_doc belongs to. (String)

        Returns:
            MonitorViewFragmentationTask - A task future that is a handle to the scheduled task."""

        _task = MonitorViewFragmentationTask(server, design_doc_name,
                                             fragmentation_value, bucket)
        self.task_manager.schedule(_task)
        return _task


    def monitor_view_fragmentation(self, server,
                                   design_doc_name,
                                   fragmentation_value,
                                   bucket = "default",
                                   timeout = None):
        """Synchronously monitor view fragmentation.

           When <fragmentation_value> is reached on the
           index file for <design_doc_name> this method
           will return.

        Parameters:
            server - The server to handle fragmentation config task. (TestInputServer)
            design_doc_name - design doc with views represented in index file. (String)
            fragmentation_value - target amount of fragmentation within index file to detect. (String)
            bucket - The name of the bucket design_doc belongs to. (String)

        Returns:
            boolean - True if <fragmentation_value> reached"""

        _task = self.async_monitor_view_fragmentation(server, design_doc_name,
                                                      fragmentation_value,
                                                      bucket)
        self.task_manager.schedule(_task)
        return _task.result(timeout)

    def async_compact_view(self, server, design_doc_name, bucket = "default"):
        """Asynchronously run view compaction.

        Compacts index file represented by views within the specified <design_doc_name>

        Parameters:
            server - The server to handle fragmentation config task. (TestInputServer)
            design_doc_name - design doc with views represented in index file. (String)
            bucket - The name of the bucket design_doc belongs to. (String)

        Returns:
            ViewCompactionTask - A task future that is a handle to the scheduled task."""


        _task = ViewCompactionTask(server, design_doc_name, bucket)
        self.task_manager.schedule(_task)
        return _task

    def compact_view(self, server, design_doc_name, bucket = "default", timeout=None):
        """Synchronously run view compaction.

        Compacts index file represented by views within the specified <design_doc_name>

        Parameters:
            server - The server to handle fragmentation config task. (TestInputServer)
            design_doc_name - design doc with views represented in index file. (String)
            bucket - The name of the bucket design_doc belongs to. (String)

        Returns:
            boolean - True file size reduced after compaction, False if successful but no work done """

        _task = self.async_compact_view(server, design_doc_name, bucket)
        return _task.result(timeout)
Esempio n. 7
0
class Cluster(object):
    """An API for interacting with Couchbase clusters"""
    def __init__(self):
        self.task_manager = TaskManager()
        self.task_manager.start()

    def async_create_default_bucket(self, server, size, replicas=1):
        """Asynchronously creates the default bucket

        Parameters:
            server - The server to create the bucket on. (TestInputServer)
            size - The size of the bucket to be created. (int)
            replicas - The number of replicas for this bucket. (int)

        Returns:
            BucketCreateTask - A task future that is a handle to the scheduled task."""
        _task = BucketCreateTask(server, 'default', replicas, size)
        self.task_manager.schedule(_task)
        return _task

    def async_create_sasl_bucket(self, server, name, password, size, replicas):
        """Asynchronously creates a sasl bucket

        Parameters:
            server - The server to create the bucket on. (TestInputServer)
            name - The name of the bucket to be created. (String)
            password - The password for this bucket. (String)
            replicas - The number of replicas for this bucket. (int)
            size - The size of the bucket to be created. (int)

        Returns:
            BucketCreateTask - A task future that is a handle to the scheduled task."""
        _task = BucketCreateTask(server,
                                 name,
                                 replicas,
                                 size,
                                 password=password)
        self.task_manager.schedule(_task)
        return _task

    def async_create_standard_bucket(self, server, name, port, size, replicas):
        """Asynchronously creates a standard bucket

        Parameters:
            server - The server to create the bucket on. (TestInputServer)
            name - The name of the bucket to be created. (String)
            port - The port to create this bucket on. (String)
            replicas - The number of replicas for this bucket. (int)
            size - The size of the bucket to be created. (int)

        Returns:
            BucketCreateTask - A task future that is a handle to the scheduled task."""
        _task = BucketCreateTask(server, name, replicas, size, port)
        self.task_manager.schedule(_task)
        return _task

    def async_bucket_delete(self, server, bucket='default'):
        """Asynchronously deletes a bucket

        Parameters:
            server - The server to delete the bucket on. (TestInputServer)
            bucket - The name of the bucket to be deleted. (String)

        Returns:
            BucketDeleteTask - A task future that is a handle to the scheduled task."""
        _task = BucketDeleteTask(server, bucket)
        self.task_manager.schedule(_task)
        return _task

    def async_init_node(self, server, disabled_consistent_view=None):
        """Asynchronously initializes a node

        The task scheduled will initialize a nodes username and password and will establish
        the nodes memory quota to be 2/3 of the available system memory.

        Parameters:
            server - The server to initialize. (TestInputServer)
            disabled_consistent_view - disable consistent view

        Returns:
            NodeInitTask - A task future that is a handle to the scheduled task."""
        _task = NodeInitializeTask(server, disabled_consistent_view)
        self.task_manager.schedule(_task)
        return _task

    def async_load_gen_docs(self,
                            server,
                            bucket,
                            generator,
                            kv_store,
                            op_type,
                            exp=0,
                            flag=0,
                            only_store_hash=True,
                            batch_size=1,
                            pause_secs=1,
                            timeout_secs=5):
        if batch_size > 1:
            _task = BatchedLoadDocumentsTask(server, bucket, generator,
                                             kv_store, op_type, exp, flag,
                                             only_store_hash, batch_size,
                                             pause_secs, timeout_secs)
        else:
            _task = LoadDocumentsTask(server, bucket, generator, kv_store,
                                      op_type, exp, flag, only_store_hash)
        self.task_manager.schedule(_task)
        return _task

    def async_workload(self, server, bucket, kv_store, num_ops, create, read,
                       update, delete, exp):
        _task = WorkloadTask(server, bucket, kv_store, num_ops, create, read,
                             update, delete, exp)
        self.task_manager.schedule(_task)
        return _task

    def async_verify_data(self,
                          server,
                          bucket,
                          kv_store,
                          max_verify=None,
                          only_store_hash=True,
                          batch_size=1):
        if batch_size > 1:
            _task = BatchedValidateDataTask(server, bucket, kv_store,
                                            max_verify, only_store_hash,
                                            batch_size)
        else:
            _task = ValidateDataTask(server, bucket, kv_store, max_verify,
                                     only_store_hash)
        self.task_manager.schedule(_task)
        return _task

    def async_verify_revid(self, src_server, dest_server, bucket, kv_store,
                           ops_perf):
        _task = VerifyRevIdTask(src_server, dest_server, bucket, kv_store,
                                ops_perf)
        self.task_manager.schedule(_task)
        return _task

    def async_rebalance(self, servers, to_add, to_remove):
        """Asyncronously rebalances a cluster

        Parameters:
            servers - All servers participating in the rebalance ([TestInputServers])
            to_add - All servers being added to the cluster ([TestInputServers])
            to_remove - All servers being removed from the cluster ([TestInputServers])

        Returns:
            RebalanceTask - A task future that is a handle to the scheduled task"""
        _task = RebalanceTask(servers, to_add, to_remove)
        self.task_manager.schedule(_task)
        return _task

    def async_wait_for_stats(self, servers, bucket, param, stat, comparison,
                             value):
        """Asynchronously wait for stats

        Waits for stats to match the criteria passed by the stats variable. See
        couchbase.stats_tool.StatsCommon.build_stat_check(...) for a description of
        the stats structure and how it can be built.

        Parameters:
            servers - The servers to get stats from. Specifying multiple servers will
                cause the result from each server to be added together before
                comparing. ([TestInputServer])
            bucket - The name of the bucket (String)
            param - The stats parameter to use. (String)
            stat - The stat that we want to get the value from. (String)
            comparison - How to compare the stat result to the value specified.
            value - The value to compare to.

        Returns:
            RebalanceTask - A task future that is a handle to the scheduled task"""
        _task = StatsWaitTask(servers, bucket, param, stat, comparison, value)
        self.task_manager.schedule(_task)
        return _task

    def create_default_bucket(self, server, size, replicas=1, timeout=None):
        """Synchronously creates the default bucket

        Parameters:
            server - The server to create the bucket on. (TestInputServer)
            size - The size of the bucket to be created. (int)
            replicas - The number of replicas for this bucket. (int)

        Returns:
            boolean - Whether or not the bucket was created."""
        _task = self.async_create_default_bucket(server, size, replicas)
        return _task.result(timeout)

    def create_sasl_bucket(self,
                           server,
                           name,
                           password,
                           size,
                           replicas,
                           timeout=None):
        """Synchronously creates a sasl bucket

        Parameters:
            server - The server to create the bucket on. (TestInputServer)
            name - The name of the bucket to be created. (String)
            password - The password for this bucket. (String)
            replicas - The number of replicas for this bucket. (int)
            size - The size of the bucket to be created. (int)

        Returns:
            boolean - Whether or not the bucket was created."""
        _task = self.async_create_sasl_bucket(server, name, password, replicas,
                                              size)
        self.task_manager.schedule(_task)
        return _task.result(timeout)

    def create_standard_bucket(self,
                               server,
                               name,
                               port,
                               size,
                               replicas,
                               timeout=None):
        """Synchronously creates a standard bucket

        Parameters:
            server - The server to create the bucket on. (TestInputServer)
            name - The name of the bucket to be created. (String)
            port - The port to create this bucket on. (String)
            replicas - The number of replicas for this bucket. (int)
            size - The size of the bucket to be created. (int)

        Returns:
            boolean - Whether or not the bucket was created."""
        _task = self.async_create_standard_bucket(server, name, port, size,
                                                  replicas)
        return _task.result(timeout)

    def bucket_delete(self, server, bucket='default', timeout=None):
        """Synchronously deletes a bucket

        Parameters:
            server - The server to delete the bucket on. (TestInputServer)
            bucket - The name of the bucket to be deleted. (String)

        Returns:
            boolean - Whether or not the bucket was deleted."""
        _task = self.async_bucket_delete(server, bucket)
        return _task.result(timeout)

    def init_node(self,
                  server,
                  async_init_node=True,
                  disabled_consistent_view=None):
        """Synchronously initializes a node

        The task scheduled will initialize a nodes username and password and will establish
        the nodes memory quota to be 2/3 of the available system memory.

        Parameters:
            server - The server to initialize. (TestInputServer)
            disabled_consistent_view - disable consistent view

        Returns:
            boolean - Whether or not the node was properly initialized."""
        _task = self.async_init_node(server, async_init_node,
                                     disabled_consistent_view)
        return _task.result()

    def rebalance(self, servers, to_add, to_remove, timeout=None):
        """Syncronously rebalances a cluster

        Parameters:
            servers - All servers participating in the rebalance ([TestInputServers])
            to_add - All servers being added to the cluster ([TestInputServers])
            to_remove - All servers being removed from the cluster ([TestInputServers])

        Returns:
            boolean - Whether or not the rebalance was successful"""
        _task = self.async_rebalance(servers, to_add, to_remove)
        return _task.result(timeout)

    def load_gen_docs(self,
                      server,
                      bucket,
                      generator,
                      kv_store,
                      op_type,
                      exp=0,
                      timeout=None,
                      flag=0,
                      only_store_hash=True,
                      batch_size=1):
        _task = self.async_load_gen_docs(server,
                                         bucket,
                                         generator,
                                         kv_store,
                                         op_type,
                                         exp,
                                         flag,
                                         only_store_hash=only_store_hash,
                                         batch_size=batch_size)
        return _task.result(timeout)

    def workload(self,
                 server,
                 bucket,
                 kv_store,
                 num_ops,
                 create,
                 read,
                 update,
                 delete,
                 exp,
                 timeout=None):
        _task = self.async_workload(server, bucket, kv_store, num_ops, create,
                                    read, update, delete, exp)
        return _task.result(timeout)

    def verify_data(self, server, bucket, kv_store, timeout=None):
        _task = self.async_verify_data(server, bucket, kv_store)
        return _task.result(timeout)

    def wait_for_stats(self,
                       servers,
                       bucket,
                       param,
                       stat,
                       comparison,
                       value,
                       timeout=None):
        """Synchronously wait for stats

        Waits for stats to match the criteria passed by the stats variable. See
        couchbase.stats_tool.StatsCommon.build_stat_check(...) for a description of
        the stats structure and how it can be built.

        Parameters:
            servers - The servers to get stats from. Specifying multiple servers will
                cause the result from each server to be added together before
                comparing. ([TestInputServer])
            bucket - The name of the bucket (String)
            param - The stats parameter to use. (String)
            stat - The stat that we want to get the value from. (String)
            comparison - How to compare the stat result to the value specified.
            value - The value to compare to.

        Returns:
            boolean - Whether or not the correct stats state was seen"""
        _task = self.async_wait_for_stats(servers, bucket, param, stat,
                                          comparison, value)
        return _task.result(timeout)

    def shutdown(self, force=False):
        self.task_manager.shutdown(force)

    def async_create_view(self,
                          server,
                          design_doc_name,
                          view,
                          bucket="default",
                          with_query=True):
        """Asynchronously creates a views in a design doc

        Parameters:
            server - The server to handle create view task. (TestInputServer)
            design_doc_name - Design doc to be created or updated with view(s) being created (String)
            view - The view being created (document.View)
            bucket - The name of the bucket containing items for this view. (String)
            with_query - Wait indexing to get view query results after creation

        Returns:
            ViewCreateTask - A task future that is a handle to the scheduled task."""
        _task = ViewCreateTask(server, design_doc_name, view, bucket,
                               with_query)
        self.task_manager.schedule(_task)
        return _task

    def create_view(self,
                    server,
                    design_doc_name,
                    view,
                    bucket="default",
                    timeout=None,
                    with_query=True):
        """Synchronously creates a views in a design doc

        Parameters:
            server - The server to handle create view task. (TestInputServer)
            design_doc_name - Design doc to be created or updated with view(s) being created (String)
            view - The view being created (document.View)
            bucket - The name of the bucket containing items for this view. (String)
            with_query - Wait indexing to get view query results after creation

        Returns:
            string - revision number of design doc."""
        _task = self.async_create_view(server, design_doc_name, view, bucket,
                                       with_query)
        return _task.result(timeout)

    def async_delete_view(self,
                          server,
                          design_doc_name,
                          view,
                          bucket="default"):
        """Asynchronously deletes a views in a design doc

        Parameters:
            server - The server to handle delete view task. (TestInputServer)
            design_doc_name - Design doc to be deleted or updated with view(s) being deleted (String)
            view - The view being deleted (document.View)
            bucket - The name of the bucket containing items for this view. (String)

        Returns:
            ViewDeleteTask - A task future that is a handle to the scheduled task."""
        _task = ViewDeleteTask(server, design_doc_name, view, bucket)
        self.task_manager.schedule(_task)
        return _task

    def delete_view(self,
                    server,
                    design_doc_name,
                    view,
                    bucket="default",
                    timeout=None):
        """Synchronously deletes a views in a design doc

        Parameters:
            server - The server to handle delete view task. (TestInputServer)
            design_doc_name - Design doc to be deleted or updated with view(s) being deleted (String)
            view - The view being deleted (document.View)
            bucket - The name of the bucket containing items for this view. (String)

        Returns:
            boolean - Whether or not delete view was successful."""
        _task = self.async_delete_view(server, design_doc_name, view, bucket)
        return _task.result(timeout)

    def async_query_view(self,
                         server,
                         design_doc_name,
                         view_name,
                         query,
                         expected_rows=None,
                         bucket="default",
                         retry_time=2):
        """Asynchronously query a views in a design doc

        Parameters:
            server - The server to handle query view task. (TestInputServer)
            design_doc_name - Design doc with view(s) being queried(String)
            view_name - The view being queried (String)
            expected_rows - The number of rows expected to be returned from the query (int)
            bucket - The name of the bucket containing items for this view. (String)
            retry_time - The time in seconds to wait before retrying failed queries (int)

        Returns:
            ViewQueryTask - A task future that is a handle to the scheduled task."""
        _task = ViewQueryTask(server, design_doc_name, view_name, query,
                              expected_rows, bucket, retry_time)
        self.task_manager.schedule(_task)
        return _task

    def query_view(self,
                   server,
                   design_doc_name,
                   view_name,
                   query,
                   expected_rows=None,
                   bucket="default",
                   retry_time=2,
                   timeout=None):
        """Synchronously query a views in a design doc

        Parameters:
            server - The server to handle query view task. (TestInputServer)
            design_doc_name - Design doc with view(s) being queried(String)
            view_name - The view being queried (String)
            expected_rows - The number of rows expected to be returned from the query (int)
            bucket - The name of the bucket containing items for this view. (String)
            retry_time - The time in seconds to wait before retrying failed queries (int)

        Returns:
            ViewQueryTask - A task future that is a handle to the scheduled task."""
        _task = self.async_query_view(server, design_doc_name, view_name,
                                      query, expected_rows, bucket, retry_time)
        return _task.result(timeout)

    def modify_fragmentation_config(self,
                                    server,
                                    config,
                                    bucket="default",
                                    timeout=None):
        """Synchronously modify fragmentation configuration spec

        Parameters:
            server - The server to handle fragmentation config task. (TestInputServer)
            config - New compaction configuration (dict - see task)
            bucket - The name of the bucket fragementation config applies to. (String)

        Returns:
            boolean - True if config values accepted."""

        _task = ModifyFragmentationConfigTask(server, config, bucket)
        self.task_manager.schedule(_task)
        return _task.result(timeout)

    def async_monitor_active_task(self,
                                  server,
                                  type,
                                  target_value,
                                  wait_progress=100,
                                  num_iteration=100,
                                  wait_task=True):
        """Asynchronously monitor active task.

           When active task reached wait_progress this method  will return.

        Parameters:
            server - The server to handle fragmentation config task. (TestInputServer)
            type - task type('indexer' , 'bucket_compaction', 'view_compaction' ) (String)
            target_value - target value (for example "_design/ddoc" for indexing, bucket "default"
                for bucket_compaction or "_design/dev_view" for view_compaction) (String)
            wait_progress - expected progress (int)
            num_iteration - failed test if progress is not changed during num iterations(int)
            wait_task - expect to find task in the first attempt(bool)

        Returns:
            MonitorActiveTask - A task future that is a handle to the scheduled task."""
        _task = MonitorActiveTask(server, type, target_value, wait_progress,
                                  num_iteration, wait_task)
        self.task_manager.schedule(_task)
        return _task

    def async_monitor_view_fragmentation(self,
                                         server,
                                         design_doc_name,
                                         fragmentation_value,
                                         bucket="default"):
        """Asynchronously monitor view fragmentation.

           When <fragmentation_value> is reached on the
           index file for <design_doc_name> this method
           will return.

        Parameters:
            server - The server to handle fragmentation config task. (TestInputServer)
            design_doc_name - design doc with views represented in index file. (String)
            fragmentation_value - target amount of fragmentation within index file to detect. (String)
            bucket - The name of the bucket design_doc belongs to. (String)

        Returns:
            MonitorViewFragmentationTask - A task future that is a handle to the scheduled task."""

        _task = MonitorViewFragmentationTask(server, design_doc_name,
                                             fragmentation_value, bucket)
        self.task_manager.schedule(_task)
        return _task

    def async_generate_expected_view_results(self, doc_generators, view,
                                             query):
        """Asynchronously generate expected view query results

        Parameters:
            doc_generators - Generators used for loading docs (DocumentGenerator[])
            view - The view with map function (View)
            query - Query params to filter docs from the generator. (dict)

        Returns:
            GenerateExpectedViewResultsTask - A task future that is a handle to the scheduled task."""

        _task = GenerateExpectedViewResultsTask(doc_generators, view, query)
        self.task_manager.schedule(_task)
        return _task

    def generate_expected_view_query_results(self,
                                             doc_generators,
                                             view,
                                             query,
                                             timeout=None):
        """Synchronously generate expected view query results

        Parameters:
            doc_generators - Generators used for loading docs (DocumentGenerator[])
            view - The view with map function (View)
            query - Query params to filter docs from the generator. (dict)

        Returns:
            list - A list of rows expected to be returned for given query"""

        _task = self.async_generate_expected_view_results(
            doc_generators, view, query)
        return _task.result(timeout)

    def async_view_query_verification(self,
                                      server,
                                      design_doc_name,
                                      view_name,
                                      query,
                                      expected_rows,
                                      num_verified_docs=20,
                                      bucket="default",
                                      query_timeout=20):
        """Asynchronously query a views in a design doc and does full verification of results

        Parameters:
            server - The server to handle query verification task. (TestInputServer)
            design_doc_name - Design doc with view(s) being queried(String)
            view_name - The view being queried (String)
            query - Query params being used with the query. (dict)
            expected_rows - The number of rows expected to be returned from the query (int)
            num_verified_docs - The number of docs to verify that require memcached gets (int)
            bucket - The name of the bucket containing items for this view. (String)
            query_timeout - The time to allow a query with stale=false to run. (int)
            retry_time - The time in seconds to wait before retrying failed queries (int)

        Returns:
            ViewQueryVerificationTask - A task future that is a handle to the scheduled task."""
        _task = ViewQueryVerificationTask(server, design_doc_name, view_name,
                                          query, expected_rows,
                                          num_verified_docs, bucket,
                                          query_timeout)
        self.task_manager.schedule(_task)
        return _task

    def view_query_verification(self,
                                server,
                                design_doc_name,
                                view_name,
                                query,
                                expected_rows,
                                num_verified_docs=20,
                                bucket="default",
                                query_timeout=20,
                                timeout=None):
        """Synchronously query a views in a design doc and does full verification of results

        Parameters:
            server - The server to handle query verification task. (TestInputServer)
            design_doc_name - Design doc with view(s) being queried(String)
            view_name - The view being queried (String)
            query - Query params being used with the query. (dict)
            expected_rows - The number of rows expected to be returned from the query (int)
            num_verified_docs - The number of docs to verify that require memcached gets (int)
            bucket - The name of the bucket containing items for this view. (String)
            query_timeout - The time to allow a query with stale=false to run. (int)
            retry_time - The time in seconds to wait before retrying failed queries (int)

        Returns:
            dict - An object with keys: passed = True or False
                                        errors = reasons why verification failed """
        _task = self.async_view_query_verification(server, design_doc_name,
                                                   view_name, query,
                                                   expected_rows,
                                                   num_verified_docs, bucket,
                                                   query_timeout)
        return _task.result(timeout)

    def monitor_view_fragmentation(self,
                                   server,
                                   design_doc_name,
                                   fragmentation_value,
                                   bucket="default",
                                   timeout=None):
        """Synchronously monitor view fragmentation.

           When <fragmentation_value> is reached on the
           index file for <design_doc_name> this method
           will return.

        Parameters:
            server - The server to handle fragmentation config task. (TestInputServer)
            design_doc_name - design doc with views represented in index file. (String)
            fragmentation_value - target amount of fragmentation within index file to detect. (String)
            bucket - The name of the bucket design_doc belongs to. (String)

        Returns:
            boolean - True if <fragmentation_value> reached"""

        _task = self.async_monitor_view_fragmentation(server, design_doc_name,
                                                      fragmentation_value,
                                                      bucket)
        self.task_manager.schedule(_task)
        return _task.result(timeout)

    def async_compact_view(self,
                           server,
                           design_doc_name,
                           bucket="default",
                           with_rebalance=False):
        """Asynchronously run view compaction.

        Compacts index file represented by views within the specified <design_doc_name>

        Parameters:
            server - The server to handle fragmentation config task. (TestInputServer)
            design_doc_name - design doc with views represented in index file. (String)
            bucket - The name of the bucket design_doc belongs to. (String)
            with_rebalance - there are two cases that process this parameter:
                "Error occured reading set_view _info" will be ignored if True
                (This applies to rebalance in case),
                and with concurrent updates(for instance, with rebalance)
                it's possible that compaction value has not changed significantly

        Returns:
            ViewCompactionTask - A task future that is a handle to the scheduled task."""

        _task = ViewCompactionTask(server, design_doc_name, bucket,
                                   with_rebalance)
        self.task_manager.schedule(_task)
        return _task

    def compact_view(self,
                     server,
                     design_doc_name,
                     bucket="default",
                     timeout=None,
                     with_rebalance=False):
        """Synchronously run view compaction.

        Compacts index file represented by views within the specified <design_doc_name>

        Parameters:
            server - The server to handle fragmentation config task. (TestInputServer)
            design_doc_name - design doc with views represented in index file. (String)
            bucket - The name of the bucket design_doc belongs to. (String)
            with_rebalance - "Error occured reading set_view _info" will be ignored if True
                and with concurrent updates(for instance, with rebalance)
                it's possible that compaction value has not changed significantly

        Returns:
            boolean - True file size reduced after compaction, False if successful but no work done """

        _task = self.async_compact_view(server, design_doc_name, bucket,
                                        with_rebalance)
        return _task.result(timeout)

    def async_failover(self, servers, to_failover):
        """Asyncronously fails over nodes

        Parameters:
            servers - All servers participating in the failover ([TestInputServers])
            to_failover - All servers being failed over ([TestInputServers])

        Returns:
            FailoverTask - A task future that is a handle to the scheduled task"""
        _task = FailoverTask(servers, to_failover)
        self.task_manager.schedule(_task)
        return _task

    def failover(self, servers, to_failover, timeout=None):
        """Syncronously fails over nodes

        Parameters:
            servers - All servers participating in the failover ([TestInputServers])
            to_failover - All servers being failed over ([TestInputServers])

        Returns:
            boolean - Whether or not the failover was successful"""
        _task = self.async_failover(servers, to_failover)
        return _task.result(timeout)

    def async_bucket_flush(self, server, bucket='default'):
        """Asynchronously flushes a bucket

        Parameters:
            server - The server to flush the bucket on. (TestInputServer)
            bucket - The name of the bucket to be flushed. (String)

        Returns:
            BucketFlushTask - A task future that is a handle to the scheduled task."""
        _task = BucketFlushTask(server, bucket)
        self.task_manager.schedule(_task)
        return _task

    def bucket_flush(self, server, bucket='default', timeout=None):
        """Synchronously flushes a bucket

        Parameters:
            server - The server to flush the bucket on. (TestInputServer)
            bucket - The name of the bucket to be flushed. (String)

        Returns:
            boolean - Whether or not the bucket was flushed."""
        _task = self.async_bucket_flush(server, bucket)
        return _task.result(timeout)
class AutoFailoverBaseTest(BaseTestCase):
    MAX_FAIL_DETECT_TIME = 120
    ORCHESTRATOR_TIMEOUT_BUFFER = 60

    def setUp(self):
        super(AutoFailoverBaseTest, self).setUp()
        self._get_params()
        self.rest = RestConnection(self.orchestrator)
        self.task_manager = TaskManager("Autofailover_thread")
        self.task_manager.start()
        self.node_failure_task_manager = TaskManager(
            "Nodes_failure_detector_thread")
        self.node_failure_task_manager.start()
        self.initial_load_gen = BlobGenerator('auto-failover',
                                              'auto-failover-',
                                              self.value_size,
                                              end=self.num_items)
        self.update_load_gen = BlobGenerator('auto-failover',
                                             'auto-failover-',
                                             self.value_size,
                                             end=self.update_items)
        self.delete_load_gen = BlobGenerator('auto-failover',
                                             'auto-failover-',
                                             self.value_size,
                                             start=self.update_items,
                                             end=self.delete_items)
        self._load_all_buckets(self.servers[0], self.initial_load_gen,
                               "create", 0)
        self._async_load_all_buckets(self.orchestrator,
                                     self.update_load_gen, "update", 0)
        self._async_load_all_buckets(self.orchestrator,
                                     self.delete_load_gen, "delete", 0)
        self.server_to_fail = self._servers_to_fail()
        self.servers_to_add = self.servers[self.nodes_init:self.nodes_init +
                                                           self.nodes_in]
        self.servers_to_remove = self.servers[self.nodes_init -
                                              self.nodes_out:self.nodes_init]
        # self.node_monitor_task = self.start_node_monitors_task()

    def tearDown(self):
        self.log.info("============AutoFailoverBaseTest teardown============")
        self._get_params()
        self.task_manager = TaskManager("Autofailover_thread")
        self.task_manager.start()
        self.server_to_fail = self._servers_to_fail()
        self.start_couchbase_server()
        self.sleep(10)
        self.disable_firewall()
        self.rest = RestConnection(self.orchestrator)
        self.rest.reset_autofailover()
        self.disable_autofailover()
        self._cleanup_cluster()
        super(AutoFailoverBaseTest, self).tearDown()
        if hasattr(self, "node_monitor_task"):
            if self.node_monitor_task._exception:
                self.fail("{}".format(self.node_monitor_task._exception))
            self.node_monitor_task.stop = True
        self.task_manager.shutdown(force=True)

    def enable_autofailover(self):
        """
        Enable the autofailover setting with the given timeout.
        :return: True If the setting was set with the timeout, else return
        False
        """
        status = self.rest.update_autofailover_settings(True,
                                                        self.timeout)
        return status

    def disable_autofailover(self):
        """
        Disable the autofailover setting.
        :return: True If the setting was disabled, else return
        False
        """
        status = self.rest.update_autofailover_settings(False, 120)
        return status

    def enable_autofailover_and_validate(self):
        """
        Enable autofailover with given timeout and then validate if the
        settings.
        :return: Nothing
        """
        status = self.enable_autofailover()
        self.assertTrue(status, "Failed to enable autofailover_settings!")
        self.sleep(5)
        settings = self.rest.get_autofailover_settings()
        self.assertTrue(settings.enabled, "Failed to enable "
                                          "autofailover_settings!")
        self.assertEqual(self.timeout, settings.timeout,
                         "Incorrect timeout set. Expected timeout : {0} "
                         "Actual timeout set : {1}".format(self.timeout,
                                                           settings.timeout))

    def disable_autofailover_and_validate(self):
        """
        Disable autofailover setting and then validate if the setting was
        disabled.
        :return: Nothing
        """
        status = self.disable_autofailover()
        self.assertTrue(status, "Failed to change autofailover_settings!")
        settings = self.rest.get_autofailover_settings()
        self.assertFalse(settings.enabled, "Failed to disable "
                                           "autofailover_settings!")

    def start_node_monitors_task(self):
        """
        Start the node monitors task to analyze the node status monitors.
        :return: The NodeMonitorAnalyserTask.
        """
        node_monitor_task = NodeMonitorsAnalyserTask(self.orchestrator)
        self.task_manager.schedule(node_monitor_task, sleep_time=5)
        return node_monitor_task

    def enable_firewall(self):
        """
        Enable firewall on the nodes to fail in the tests.
        :return: Nothing
        """
        node_down_timer_tasks = []
        for node in self.server_to_fail:
            node_failure_timer_task = NodeDownTimerTask(node.ip)
            node_down_timer_tasks.append(node_failure_timer_task)
        task = AutoFailoverNodesFailureTask(self.orchestrator,
                                            self.server_to_fail,
                                            "enable_firewall", self.timeout,
                                            self.pause_between_failover_action,
                                            self.failover_expected,
                                            self.timeout_buffer,
                                            failure_timers=node_down_timer_tasks)
        for node_down_timer_task in node_down_timer_tasks:
            self.node_failure_task_manager.schedule(node_down_timer_task, 2)
        self.task_manager.schedule(task)
        try:
            task.result()
        except Exception, e:
            self.fail("Exception: {}".format(e))
Esempio n. 9
0
class AutoFailoverBaseTest(BaseTestCase):
    MAX_FAIL_DETECT_TIME = 120
    ORCHESTRATOR_TIMEOUT_BUFFER = 60

    def setUp(self):
        super(AutoFailoverBaseTest, self).setUp()
        self._get_params()
        self.rest = RestConnection(self.orchestrator)
        self.task_manager = TaskManager("Autofailover_thread")
        self.task_manager.start()
        self.node_failure_task_manager = TaskManager(
            "Nodes_failure_detector_thread")
        self.node_failure_task_manager.start()
        self.initial_load_gen = BlobGenerator('auto-failover',
                                              'auto-failover-',
                                              self.value_size,
                                              end=self.num_items)
        self.update_load_gen = BlobGenerator('auto-failover',
                                             'auto-failover-',
                                             self.value_size,
                                             end=self.update_items)
        self.delete_load_gen = BlobGenerator('auto-failover',
                                             'auto-failover-',
                                             self.value_size,
                                             start=self.update_items,
                                             end=self.delete_items)
        self._load_all_buckets(self.servers[0], self.initial_load_gen,
                               "create", 0)
        self._async_load_all_buckets(self.orchestrator, self.update_load_gen,
                                     "update", 0)
        self._async_load_all_buckets(self.orchestrator, self.delete_load_gen,
                                     "delete", 0)
        self.server_index_to_fail = self.input.param("server_index_to_fail",
                                                     None)
        if self.server_index_to_fail is None:
            self.server_to_fail = self._servers_to_fail()
        else:
            self.server_to_fail = [self.servers[self.server_index_to_fail]]
        self.servers_to_add = self.servers[self.nodes_init:self.nodes_init +
                                           self.nodes_in]
        self.servers_to_remove = self.servers[self.nodes_init -
                                              self.nodes_out:self.nodes_init]

    def bareSetUp(self):
        super(AutoFailoverBaseTest, self).setUp()
        self._get_params()
        self.rest = RestConnection(self.orchestrator)
        self.task_manager = TaskManager("Autofailover_thread")
        self.task_manager.start()
        self.node_failure_task_manager = TaskManager(
            "Nodes_failure_detector_thread")
        self.node_failure_task_manager.start()
        self.initial_load_gen = BlobGenerator('auto-failover',
                                              'auto-failover-',
                                              self.value_size,
                                              end=self.num_items)
        self.update_load_gen = BlobGenerator('auto-failover',
                                             'auto-failover-',
                                             self.value_size,
                                             end=self.update_items)
        self.delete_load_gen = BlobGenerator('auto-failover',
                                             'auto-failover-',
                                             self.value_size,
                                             start=self.update_items,
                                             end=self.delete_items)
        self.server_to_fail = self._servers_to_fail()
        self.servers_to_add = self.servers[self.nodes_init:self.nodes_init +
                                           self.nodes_in]
        self.servers_to_remove = self.servers[self.nodes_init -
                                              self.nodes_out:self.nodes_init]

    def tearDown(self):
        self.log.info("============AutoFailoverBaseTest teardown============")
        self._get_params()
        self.task_manager = TaskManager("Autofailover_thread")
        self.task_manager.start()
        self.server_to_fail = self._servers_to_fail()
        self.start_couchbase_server()
        self.sleep(10)
        self.disable_firewall()
        self.rest = RestConnection(self.orchestrator)
        self.rest.reset_autofailover()
        self.disable_autofailover()
        self._cleanup_cluster()
        super(AutoFailoverBaseTest, self).tearDown()
        if hasattr(self, "node_monitor_task"):
            if self.node_monitor_task._exception:
                self.fail("{}".format(self.node_monitor_task._exception))
            self.node_monitor_task.stop = True
        self.task_manager.shutdown(force=True)

    def shuffle_nodes_between_zones_and_rebalance(self, to_remove=None):
        """
        Shuffle the nodes present in the cluster if zone > 1. Rebalance the nodes in the end.
        Nodes are divided into groups iteratively i.e. 1st node in Group 1, 2nd in Group 2, 3rd in Group 1 and so on, when
        zone=2.
        :param to_remove: List of nodes to be removed.
        """
        if not to_remove:
            to_remove = []
        serverinfo = self.orchestrator
        rest = RestConnection(serverinfo)
        zones = ["Group 1"]
        nodes_in_zone = {"Group 1": [serverinfo.ip]}
        # Create zones, if not existing, based on params zone in test.
        # Shuffle the nodes between zones.
        if int(self.zone) > 1:
            for i in range(1, int(self.zone)):
                a = "Group "
                zones.append(a + str(i + 1))
                if not rest.is_zone_exist(zones[i]):
                    rest.add_zone(zones[i])
                nodes_in_zone[zones[i]] = []
            # Divide the nodes between zones.
            nodes_in_cluster = [
                node.ip for node in self.get_nodes_in_cluster()
            ]
            nodes_to_remove = [node.ip for node in to_remove]
            for i in range(1, len(self.servers)):
                if self.servers[i].ip in nodes_in_cluster and self.servers[
                        i].ip not in nodes_to_remove:
                    server_group = i % int(self.zone)
                    nodes_in_zone[zones[server_group]].append(
                        self.servers[i].ip)
            # Shuffle the nodesS
            for i in range(1, self.zone):
                node_in_zone = list(
                    set(nodes_in_zone[zones[i]]) -
                    set([node for node in rest.get_nodes_in_zone(zones[i])]))
                rest.shuffle_nodes_in_zones(node_in_zone, zones[0], zones[i])
        self.zones = nodes_in_zone
        otpnodes = [node.id for node in rest.node_statuses()]
        nodes_to_remove = [
            node.id for node in rest.node_statuses()
            if node.ip in [t.ip for t in to_remove]
        ]
        # Start rebalance and monitor it.
        started = rest.rebalance(otpNodes=otpnodes,
                                 ejectedNodes=nodes_to_remove)
        if started:
            result = rest.monitorRebalance()
            msg = "successfully rebalanced cluster {0}"
            self.log.info(msg.format(result))

    def enable_autofailover(self):
        """
        Enable the autofailover setting with the given timeout.
        :return: True If the setting was set with the timeout, else return
        False
        """
        status = self.rest.update_autofailover_settings(
            True,
            self.timeout,
            self.can_abort_rebalance,
            maxCount=self.max_count,
            enableServerGroup=self.server_group_failover)
        return status

    def disable_autofailover(self):
        """
        Disable the autofailover setting.
        :return: True If the setting was disabled, else return
        False
        """
        status = self.rest.update_autofailover_settings(False, 120, False)
        return status

    def enable_autofailover_and_validate(self):
        """
        Enable autofailover with given timeout and then validate if the
        settings.
        :return: Nothing
        """
        status = self.enable_autofailover()
        self.assertTrue(status, "Failed to enable autofailover_settings!")
        self.sleep(5)
        settings = self.rest.get_autofailover_settings()
        self.assertTrue(settings.enabled, "Failed to enable "
                        "autofailover_settings!")
        self.assertEqual(
            self.timeout, settings.timeout,
            "Incorrect timeout set. Expected timeout : {0} "
            "Actual timeout set : {1}".format(self.timeout, settings.timeout))
        self.assertEqual(
            self.can_abort_rebalance, settings.can_abort_rebalance,
            "Incorrect can_abort_rebalance set. Expected can_abort_rebalance : {0} "
            "Actual can_abort_rebalance set : {1}".format(
                self.can_abort_rebalance, settings.can_abort_rebalance))

    def disable_autofailover_and_validate(self):
        """
        Disable autofailover setting and then validate if the setting was
        disabled.
        :return: Nothing
        """
        status = self.disable_autofailover()
        self.assertTrue(status, "Failed to change autofailover_settings!")
        settings = self.rest.get_autofailover_settings()
        self.assertFalse(settings.enabled, "Failed to disable "
                         "autofailover_settings!")

    def start_node_monitors_task(self):
        """
        Start the node monitors task to analyze the node status monitors.
        :return: The NodeMonitorAnalyserTask.
        """
        node_monitor_task = NodeMonitorsAnalyserTask(self.orchestrator)
        self.task_manager.schedule(node_monitor_task, sleep_time=5)
        return node_monitor_task

    def enable_firewall(self):
        """
        Enable firewall on the nodes to fail in the tests.
        :return: Nothing
        """
        node_down_timer_tasks = []
        for node in self.server_to_fail:
            node_failure_timer_task = NodeDownTimerTask(node.ip)
            node_down_timer_tasks.append(node_failure_timer_task)
        task = AutoFailoverNodesFailureTask(
            self.orchestrator,
            self.server_to_fail,
            "enable_firewall",
            self.timeout,
            self.pause_between_failover_action,
            self.failover_expected,
            self.timeout_buffer,
            failure_timers=node_down_timer_tasks)
        for node_down_timer_task in node_down_timer_tasks:
            self.node_failure_task_manager.schedule(node_down_timer_task, 2)
        self.task_manager.schedule(task)
        try:
            task.result()
        except Exception as e:
            self.fail("Exception: {}".format(e))

    def disable_firewall(self):
        """
        Disable firewall on the nodes to fail in the tests
        :return: Nothing
        """
        self.time_start = time.time()
        task = AutoFailoverNodesFailureTask(self.orchestrator,
                                            self.server_to_fail,
                                            "disable_firewall", self.timeout,
                                            self.pause_between_failover_action,
                                            False, self.timeout_buffer, False)
        self.task_manager.schedule(task)
        try:
            task.result()
        except Exception as e:
            self.fail("Exception: {}".format(e))

    def restart_couchbase_server(self):
        """
        Restart couchbase server on the nodes to fail in the tests
        :return: Nothing
        """
        node_down_timer_tasks = []
        for node in self.server_to_fail:
            node_failure_timer_task = NodeDownTimerTask(node.ip, node.port)
            node_down_timer_tasks.append(node_failure_timer_task)
        task = AutoFailoverNodesFailureTask(
            self.orchestrator,
            self.server_to_fail,
            "restart_couchbase",
            self.timeout,
            self.pause_between_failover_action,
            self.failover_expected,
            self.timeout_buffer,
            failure_timers=node_down_timer_tasks)
        for node_down_timer_task in node_down_timer_tasks:
            self.node_failure_task_manager.schedule(node_down_timer_task, 2)
        self.task_manager.schedule(task)
        try:
            task.result()
        except Exception as e:
            self.fail("Exception: {}".format(e))

    def stop_couchbase_server(self):
        """
        Stop couchbase server on the nodes to fail in the tests
        :return: Nothing
        """
        node_down_timer_tasks = []
        for node in self.server_to_fail:
            node_failure_timer_task = NodeDownTimerTask(node.ip, node.port)
            node_down_timer_tasks.append(node_failure_timer_task)
        task = AutoFailoverNodesFailureTask(
            self.orchestrator,
            self.server_to_fail,
            "stop_couchbase",
            self.timeout,
            self.pause_between_failover_action,
            self.failover_expected,
            self.timeout_buffer,
            failure_timers=node_down_timer_tasks)
        for node_down_timer_task in node_down_timer_tasks:
            self.node_failure_task_manager.schedule(node_down_timer_task, 2)
        self.task_manager.schedule(task)
        try:
            task.result()
        except Exception as e:
            self.fail("Exception: {}".format(e))

    def start_couchbase_server(self):
        """
        Start the couchbase server on the nodes to fail in the tests
        :return: Nothing
        """
        task = AutoFailoverNodesFailureTask(self.orchestrator,
                                            self.server_to_fail,
                                            "start_couchbase", self.timeout, 0,
                                            False, self.timeout_buffer, False)
        self.task_manager.schedule(task)
        try:
            task.result()
        except Exception as e:
            self.fail("Exception: {}".format(e))

    def stop_restart_network(self):
        """
        Stop and restart network for said timeout period on the nodes to
        fail in the tests
        :return: Nothing
        """

        node_down_timer_tasks = []
        for node in self.server_to_fail:
            node_failure_timer_task = NodeDownTimerTask(node.ip)
            node_down_timer_tasks.append(node_failure_timer_task)
        task = AutoFailoverNodesFailureTask(
            self.orchestrator,
            self.server_to_fail,
            "restart_network",
            self.timeout,
            self.pause_between_failover_action,
            self.failover_expected,
            self.timeout_buffer,
            failure_timers=node_down_timer_tasks)
        for node_down_timer_task in node_down_timer_tasks:
            self.node_failure_task_manager.schedule(node_down_timer_task, 2)
        self.task_manager.schedule(task)
        try:
            task.result()
        except Exception as e:
            self.fail("Exception: {}".format(e))

    def restart_machine(self):
        """
        Restart the nodes to fail in the tests
        :return: Nothing
        """

        node_down_timer_tasks = []
        for node in self.server_to_fail:
            node_failure_timer_task = NodeDownTimerTask(node.ip)
            node_down_timer_tasks.append(node_failure_timer_task)
        task = AutoFailoverNodesFailureTask(
            self.orchestrator,
            self.server_to_fail,
            "restart_machine",
            self.timeout,
            self.pause_between_failover_action,
            self.failover_expected,
            self.timeout_buffer,
            failure_timers=node_down_timer_tasks)
        for node_down_timer_task in node_down_timer_tasks:
            self.node_failure_task_manager.schedule(node_down_timer_task, 2)
        self.task_manager.schedule(task)
        try:
            task.result()
        except Exception as e:

            self.fail("Exception: {}".format(e))
        finally:
            self.sleep(120, "Sleeping for 2 min for the machines to restart")
            for node in self.server_to_fail:
                for i in range(0, 2):
                    try:
                        shell = RemoteMachineShellConnection(node)
                        break
                    except:
                        self.log.info("Unable to connect to the host. "
                                      "Machine has not restarted")
                        self.sleep(60, "Sleep for another minute and try "
                                   "again")

    def stop_memcached(self):
        """
        Stop the memcached on the nodes to fail in the tests
        :return: Nothing
        """
        node_down_timer_tasks = []
        for node in self.server_to_fail:
            node_failure_timer_task = NodeDownTimerTask(node.ip, 11211)
            node_down_timer_tasks.append(node_failure_timer_task)
        self.timeout_buffer += 3
        task = AutoFailoverNodesFailureTask(
            self.orchestrator,
            self.server_to_fail,
            "stop_memcached",
            self.timeout,
            self.pause_between_failover_action,
            self.failover_expected,
            self.timeout_buffer,
            failure_timers=node_down_timer_tasks)
        for node_down_timer_task in node_down_timer_tasks:
            self.node_failure_task_manager.schedule(node_down_timer_task, 2)
        self.task_manager.schedule(task)
        try:
            task.result()
        except Exception as e:
            self.fail("Exception: {}".format(e))
        finally:
            task = AutoFailoverNodesFailureTask(self.orchestrator,
                                                self.server_to_fail,
                                                "start_memcached",
                                                self.timeout,
                                                0,
                                                False,
                                                0,
                                                check_for_failover=False)
            self.task_manager.schedule(task)
            task.result()

    def split_network(self):
        """
        Split the network in the cluster. Stop network traffic from few
        nodes while allowing the traffic from rest of the cluster.
        :return: Nothing
        """
        self.time_start = time.time()
        if self.server_to_fail.__len__() < 2:
            self.fail("Need atleast 2 servers to fail")
        task = AutoFailoverNodesFailureTask(self.orchestrator,
                                            self.server_to_fail,
                                            "network_split", self.timeout,
                                            self.pause_between_failover_action,
                                            False, self.timeout_buffer)
        self.task_manager.schedule(task)
        try:
            task.result()
        except Exception as e:
            self.fail("Exception: {}".format(e))
        self.disable_firewall()

    def bring_back_failed_nodes_up(self):
        """
        Bring back the failed nodes.
        :return: Nothing
        """
        if self.failover_action == "firewall":
            self.disable_firewall()
        elif self.failover_action == "stop_server":
            self.start_couchbase_server()

    def _servers_to_fail(self):
        """
        Select the nodes to be failed in the tests.
        :return: Nothing
        """
        if self.failover_orchestrator:
            servers_to_fail = self.servers[0:self.num_node_failures]
        else:
            servers_to_fail = self.servers[1:self.num_node_failures + 1]
        return servers_to_fail

    def _get_params(self):
        """
        Initialize the test parameters.
        :return:  Nothing
        """
        self.timeout = self.input.param("timeout", 60)
        self.max_count = self.input.param("maxCount", 1)
        self.server_group_failover = self.input.param("serverGroupFailover",
                                                      False)
        self.failover_action = self.input.param("failover_action",
                                                "stop_server")
        self.failover_orchestrator = self.input.param("failover_orchestrator",
                                                      False)
        self.multiple_node_failure = self.input.param("multiple_nodes_failure",
                                                      False)
        self.num_items = self.input.param("num_items", 1000000)
        self.update_items = self.input.param("update_items", 100000)
        self.delete_items = self.input.param("delete_items", 100000)
        self.add_back_node = self.input.param("add_back_node", True)
        self.recovery_strategy = self.input.param("recovery_strategy", "delta")
        self.multi_node_failures = self.input.param("multi_node_failures",
                                                    False)
        self.can_abort_rebalance = self.input.param("can_abort_rebalance",
                                                    True)
        self.num_node_failures = self.input.param("num_node_failures", 1)
        self.services = self.input.param("services", None)
        self.zone = self.input.param("zone", 1)
        self.multi_services_node = self.input.param("multi_services_node",
                                                    False)
        self.pause_between_failover_action = self.input.param(
            "pause_between_failover_action", 0)
        self.remove_after_failover = self.input.param("remove_after_failover",
                                                      False)
        self.timeout_buffer = 120 if self.failover_orchestrator else 10
        failover_not_expected = (
            self.max_count == 1 and self.num_node_failures > 1
            and self.pause_between_failover_action < self.timeout
            or self.num_replicas < 1)
        failover_not_expected = failover_not_expected or (
            1 < self.max_count < self.num_node_failures
            and self.pause_between_failover_action < self.timeout
            or self.num_replicas < self.max_count)
        self.failover_expected = not failover_not_expected
        if self.failover_action is "restart_server":
            self.num_items *= 100
        self.orchestrator = self.servers[0] if not \
            self.failover_orchestrator else self.servers[
            self.num_node_failures]

    def _cleanup_cluster(self):
        """
        Cleaup the cluster. Delete all the buckets in the nodes and remove
        the nodes from any cluster that has been formed.
        :return:
        """
        BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self)
        for node in self.servers:
            master = node
            try:
                ClusterOperationHelper.cleanup_cluster(self.servers,
                                                       master=master)
            except:
                continue

    failover_actions = {
        "firewall": enable_firewall,
        "stop_server": stop_couchbase_server,
        "restart_server": restart_couchbase_server,
        "restart_machine": restart_machine,
        "restart_network": stop_restart_network,
        "stop_memcached": stop_memcached,
        "network_split": split_network
    }

    def _auto_failover_message_present_in_logs(self, ipaddress):
        return any(
            "Rebalance interrupted due to auto-failover of nodes ['ns_1@{0}']."
            .format(ipaddress) in d.values()[2]
            for d in self.rest.get_logs(20))

    def wait_for_failover_or_assert(self, expected_failover_count, timeout):
        time_start = time.time()
        time_max_end = time_start + timeout
        actual_failover_count = 0
        while time.time() < time_max_end:
            actual_failover_count = self.get_failover_count()
            if actual_failover_count == expected_failover_count:
                break
            time.sleep(20)
        time_end = time.time()
        self.assertTrue(
            actual_failover_count == expected_failover_count,
            "{0} nodes failed over, expected : {1}".format(
                actual_failover_count, expected_failover_count))
        self.log.info(
            "{0} nodes failed over as expected in {1} seconds".format(
                actual_failover_count, time_end - time_start))

    def get_failover_count(self):
        rest = RestConnection(self.master)
        cluster_status = rest.cluster_status()
        failover_count = 0
        # check for inactiveFailed
        for node in cluster_status['nodes']:
            if node['clusterMembership'] == "inactiveFailed":
                failover_count += 1
        return failover_count
Esempio n. 10
0
class Cluster(object):
    """An API for interacting with Couchbase clusters"""

    def __init__(self):
        self.task_manager = TaskManager()
        self.task_manager.start()

    def async_create_default_bucket(self, server, size, replicas=1):
        """Asynchronously creates the default bucket

        Parameters:
            server - The server to create the bucket on. (TestInputServer)
            size - The size of the bucket to be created. (int)
            replicas - The number of replicas for this bucket. (int)

        Returns:
            BucketCreateTask - A task future that is a handle to the scheduled task."""
        _task = BucketCreateTask(server, 'default', replicas, size)
        self.task_manager.schedule(_task)
        return _task

    def async_create_sasl_bucket(self, server, name, password, size, replicas):
        """Asynchronously creates a sasl bucket

        Parameters:
            server - The server to create the bucket on. (TestInputServer)
            name - The name of the bucket to be created. (String)
            password - The password for this bucket. (String)
            replicas - The number of replicas for this bucket. (int)
            size - The size of the bucket to be created. (int)

        Returns:
            BucketCreateTask - A task future that is a handle to the scheduled task."""
        _task = BucketCreateTask(server, name, replicas, size, password=password)
        self.task_manager.schedule(_task)
        return _task

    def async_create_standard_bucket(self, server, name, port, size, replicas):
        """Asynchronously creates a standard bucket

        Parameters:
            server - The server to create the bucket on. (TestInputServer)
            name - The name of the bucket to be created. (String)
            port - The port to create this bucket on. (String)
            replicas - The number of replicas for this bucket. (int)
            size - The size of the bucket to be created. (int)

        Returns:
            BucketCreateTask - A task future that is a handle to the scheduled task."""
        _task = BucketCreateTask(server, name, replicas, size, port)
        self.task_manager.schedule(_task)
        return _task

    def async_bucket_delete(self, server, bucket='default'):
        """Asynchronously deletes a bucket

        Parameters:
            server - The server to delete the bucket on. (TestInputServer)
            bucket - The name of the bucket to be deleted. (String)

        Returns:
            BucketDeleteTask - A task future that is a handle to the scheduled task."""
        _task = BucketDeleteTask(server, bucket)
        self.task_manager.schedule(_task)
        return _task

    def async_init_node(self, server, disabled_consistent_view=None):
        """Asynchronously initializes a node

        The task scheduled will initialize a nodes username and password and will establish
        the nodes memory quota to be 2/3 of the available system memory.

        Parameters:
            server - The server to initialize. (TestInputServer)
            disabled_consistent_view - disable consistent view

        Returns:
            NodeInitTask - A task future that is a handle to the scheduled task."""
        _task = NodeInitializeTask(server, disabled_consistent_view)
        self.task_manager.schedule(_task)
        return _task

    def async_load_gen_docs(self, server, bucket, generator, kv_store, op_type, exp=0, flag=0, only_store_hash=False, batch_size=1, pause_secs=1, timeout_secs=5):
        if batch_size > 1:
            _task = BatchedLoadDocumentsTask(server, bucket, generator, kv_store, op_type, exp, flag, only_store_hash, batch_size, pause_secs, timeout_secs)
        else:
            _task = LoadDocumentsTask(server, bucket, generator, kv_store, op_type, exp, flag, only_store_hash)
        self.task_manager.schedule(_task)
        return _task

    def async_workload(self, server, bucket, kv_store, num_ops, create, read, update,
                       delete, exp):
        _task = WorkloadTask(server, bucket, kv_store, num_ops, create, read, update,
                             delete, exp)
        self.task_manager.schedule(_task)
        return _task

    def async_verify_data(self, server, bucket, kv_store, max_verify=None, only_store_hash=False, batch_size=1):
        if batch_size > 1:
            _task = BatchedValidateDataTask(server, bucket, kv_store, max_verify, only_store_hash, batch_size)
        else:
            _task = ValidateDataTask(server, bucket, kv_store, max_verify, only_store_hash)
        self.task_manager.schedule(_task)
        return _task

    def async_verify_revid(self, src_server, dest_server, bucket, kv_store, ops_perf):
        _task = VerifyRevIdTask(src_server, dest_server, bucket, kv_store, ops_perf)
        self.task_manager.schedule(_task)
        return _task

    def async_rebalance(self, servers, to_add, to_remove):
        """Asyncronously rebalances a cluster

        Parameters:
            servers - All servers participating in the rebalance ([TestInputServers])
            to_add - All servers being added to the cluster ([TestInputServers])
            to_remove - All servers being removed from the cluster ([TestInputServers])

        Returns:
            RebalanceTask - A task future that is a handle to the scheduled task"""
        _task = RebalanceTask(servers, to_add, to_remove)
        self.task_manager.schedule(_task)
        return _task

    def async_wait_for_stats(self, servers, bucket, param, stat, comparison, value):
        """Asynchronously wait for stats

        Waits for stats to match the criteria passed by the stats variable. See
        couchbase.stats_tool.StatsCommon.build_stat_check(...) for a description of
        the stats structure and how it can be built.

        Parameters:
            servers - The servers to get stats from. Specifying multiple servers will
                cause the result from each server to be added together before
                comparing. ([TestInputServer])
            bucket - The name of the bucket (String)
            param - The stats parameter to use. (String)
            stat - The stat that we want to get the value from. (String)
            comparison - How to compare the stat result to the value specified.
            value - The value to compare to.

        Returns:
            RebalanceTask - A task future that is a handle to the scheduled task"""
        _task = StatsWaitTask(servers, bucket, param, stat, comparison, value)
        self.task_manager.schedule(_task)
        return _task

    def create_default_bucket(self, server, size, replicas=1, timeout=None):
        """Synchronously creates the default bucket

        Parameters:
            server - The server to create the bucket on. (TestInputServer)
            size - The size of the bucket to be created. (int)
            replicas - The number of replicas for this bucket. (int)

        Returns:
            boolean - Whether or not the bucket was created."""
        _task = self.async_create_default_bucket(server, size, replicas)
        return _task.result(timeout)

    def create_sasl_bucket(self, server, name, password, size, replicas, timeout=None):
        """Synchronously creates a sasl bucket

        Parameters:
            server - The server to create the bucket on. (TestInputServer)
            name - The name of the bucket to be created. (String)
            password - The password for this bucket. (String)
            replicas - The number of replicas for this bucket. (int)
            size - The size of the bucket to be created. (int)

        Returns:
            boolean - Whether or not the bucket was created."""
        _task = self.async_create_sasl_bucket(server, name, password, replicas, size)
        self.task_manager.schedule(_task)
        return _task.result(timeout)

    def create_standard_bucket(self, server, name, port, size, replicas, timeout=None):
        """Synchronously creates a standard bucket

        Parameters:
            server - The server to create the bucket on. (TestInputServer)
            name - The name of the bucket to be created. (String)
            port - The port to create this bucket on. (String)
            replicas - The number of replicas for this bucket. (int)
            size - The size of the bucket to be created. (int)

        Returns:
            boolean - Whether or not the bucket was created."""
        _task = self.async_create_standard_bucket(server, name, port, size, replicas)
        return _task.result(timeout)

    def bucket_delete(self, server, bucket='default', timeout=None):
        """Synchronously deletes a bucket

        Parameters:
            server - The server to delete the bucket on. (TestInputServer)
            bucket - The name of the bucket to be deleted. (String)

        Returns:
            boolean - Whether or not the bucket was deleted."""
        _task = self.async_bucket_delete(server, bucket)
        return _task.result(timeout)

    def init_node(self, server, async_init_node=True, disabled_consistent_view=None):
        """Synchronously initializes a node

        The task scheduled will initialize a nodes username and password and will establish
        the nodes memory quota to be 2/3 of the available system memory.

        Parameters:
            server - The server to initialize. (TestInputServer)
            disabled_consistent_view - disable consistent view

        Returns:
            boolean - Whether or not the node was properly initialized."""
        _task = self.async_init_node(server, async_init_node, disabled_consistent_view)
        return _task.result()

    def rebalance(self, servers, to_add, to_remove, timeout=None):
        """Syncronously rebalances a cluster

        Parameters:
            servers - All servers participating in the rebalance ([TestInputServers])
            to_add - All servers being added to the cluster ([TestInputServers])
            to_remove - All servers being removed from the cluster ([TestInputServers])

        Returns:
            boolean - Whether or not the rebalance was successful"""
        _task = self.async_rebalance(servers, to_add, to_remove)
        return _task.result(timeout)

    def load_gen_docs(self, server, bucket, generator, kv_store, op_type, exp=0, timeout=None, flag=0, only_store_hash=False, batch_size=1):
        _task = self.async_load_gen_docs(server, bucket, generator, kv_store, op_type, exp, flag, only_store_hash=only_store_hash, batch_size=batch_size)
        return _task.result(timeout)

    def workload(self, server, bucket, kv_store, num_ops, create, read, update, delete, exp, timeout=None):
        _task = self.async_workload(server, bucket, kv_store, num_ops, create, read, update,
                                    delete, exp)
        return _task.result(timeout)

    def verify_data(self, server, bucket, kv_store, timeout=None):
        _task = self.async_verify_data(server, bucket, kv_store)
        return _task.result(timeout)

    def wait_for_stats(self, servers, bucket, param, stat, comparison, value, timeout=None):
        """Synchronously wait for stats

        Waits for stats to match the criteria passed by the stats variable. See
        couchbase.stats_tool.StatsCommon.build_stat_check(...) for a description of
        the stats structure and how it can be built.

        Parameters:
            servers - The servers to get stats from. Specifying multiple servers will
                cause the result from each server to be added together before
                comparing. ([TestInputServer])
            bucket - The name of the bucket (String)
            param - The stats parameter to use. (String)
            stat - The stat that we want to get the value from. (String)
            comparison - How to compare the stat result to the value specified.
            value - The value to compare to.

        Returns:
            boolean - Whether or not the correct stats state was seen"""
        _task = self.async_wait_for_stats(servers, bucket, param, stat, comparison, value)
        return _task.result(timeout)

    def shutdown(self, force=False):
        self.task_manager.shutdown(force)

    def async_create_view(self, server, design_doc_name, view, bucket="default"):
        """Asynchronously creates a views in a design doc

        Parameters:
            server - The server to handle create view task. (TestInputServer)
            design_doc_name - Design doc to be created or updated with view(s) being created (String)
            view - The view being created (document.View)
            bucket - The name of the bucket containing items for this view. (String)

        Returns:
            ViewCreateTask - A task future that is a handle to the scheduled task."""
        _task = ViewCreateTask(server, design_doc_name, view, bucket)
        self.task_manager.schedule(_task)
        return _task

    def create_view(self, server, design_doc_name, view, bucket="default", timeout=None):
        """Synchronously creates a views in a design doc

        Parameters:
            server - The server to handle create view task. (TestInputServer)
            design_doc_name - Design doc to be created or updated with view(s) being created (String)
            view - The view being created (document.View)
            bucket - The name of the bucket containing items for this view. (String)

        Returns:
            string - revision number of design doc."""
        _task = self.async_create_view(server, design_doc_name, view, bucket)
        return _task.result(timeout)

    def async_delete_view(self, server, design_doc_name, view, bucket="default"):
        """Asynchronously deletes a views in a design doc

        Parameters:
            server - The server to handle delete view task. (TestInputServer)
            design_doc_name - Design doc to be deleted or updated with view(s) being deleted (String)
            view - The view being deleted (document.View)
            bucket - The name of the bucket containing items for this view. (String)

        Returns:
            ViewDeleteTask - A task future that is a handle to the scheduled task."""
        _task = ViewDeleteTask(server, design_doc_name, view, bucket)
        self.task_manager.schedule(_task)
        return _task

    def delete_view(self, server, design_doc_name, view, bucket="default", timeout=None):
        """Synchronously deletes a views in a design doc

        Parameters:
            server - The server to handle delete view task. (TestInputServer)
            design_doc_name - Design doc to be deleted or updated with view(s) being deleted (String)
            view - The view being deleted (document.View)
            bucket - The name of the bucket containing items for this view. (String)

        Returns:
            boolean - Whether or not delete view was successful."""
        _task = self.async_delete_view(server, design_doc_name, view, bucket)
        return _task.result(timeout)


    def async_query_view(self, server, design_doc_name, view_name, query,
                         expected_rows=None, bucket="default", retry_time=2):
        """Asynchronously query a views in a design doc

        Parameters:
            server - The server to handle query view task. (TestInputServer)
            design_doc_name - Design doc with view(s) being queried(String)
            view_name - The view being queried (String)
            expected_rows - The number of rows expected to be returned from the query (int)
            bucket - The name of the bucket containing items for this view. (String)
            retry_time - The time in seconds to wait before retrying failed queries (int)

        Returns:
            ViewQueryTask - A task future that is a handle to the scheduled task."""
        _task = ViewQueryTask(server, design_doc_name, view_name, query, expected_rows, bucket, retry_time)
        self.task_manager.schedule(_task)
        return _task

    def query_view(self, server, design_doc_name, view_name, query,
                   expected_rows=None, bucket="default", retry_time=2, timeout=None):
        """Synchronously query a views in a design doc

        Parameters:
            server - The server to handle query view task. (TestInputServer)
            design_doc_name - Design doc with view(s) being queried(String)
            view_name - The view being queried (String)
            expected_rows - The number of rows expected to be returned from the query (int)
            bucket - The name of the bucket containing items for this view. (String)
            retry_time - The time in seconds to wait before retrying failed queries (int)

        Returns:
            ViewQueryTask - A task future that is a handle to the scheduled task."""
        _task = self.async_query_view(server, design_doc_name, view_name, query, expected_rows, bucket, retry_time)
        return _task.result(timeout)


    def modify_fragmentation_config(self, server, config, bucket="default", timeout=None):
        """Synchronously modify fragmentation configuration spec

        Parameters:
            server - The server to handle fragmentation config task. (TestInputServer)
            config - New compaction configuration (dict - see task)
            bucket - The name of the bucket fragementation config applies to. (String)

        Returns:
            boolean - True if config values accepted."""

        _task = ModifyFragmentationConfigTask(server, config, bucket)
        self.task_manager.schedule(_task)
        return _task.result(timeout)

    def async_monitor_active_task(self, server,
                                  type,
                                  target_value,
                                  wait_progress=100,
                                  num_iteration=100,
                                  wait_task=True):
        """Asynchronously monitor active task.

           When active task reached wait_progress this method  will return.

        Parameters:
            server - The server to handle fragmentation config task. (TestInputServer)
            type - task type('indexer' , 'bucket_compaction', 'view_compaction' ) (String)
            target_value - target value (for example "_design/ddoc" for indexing, bucket "default"
                for bucket_compaction or "_design/dev_view" for view_compaction) (String)
            wait_progress - expected progress (int)
            num_iteration - failed test if progress is not changed during num iterations(int)
            wait_task - expect to find task in the first attempt(bool)

        Returns:
            MonitorActiveTask - A task future that is a handle to the scheduled task."""
        _task = MonitorActiveTask(server, type, target_value, wait_progress, num_iteration, wait_task)
        self.task_manager.schedule(_task)
        return _task

    def async_monitor_view_fragmentation(self, server,
                                         design_doc_name,
                                         fragmentation_value,
                                         bucket="default"):
        """Asynchronously monitor view fragmentation.

           When <fragmentation_value> is reached on the
           index file for <design_doc_name> this method
           will return.

        Parameters:
            server - The server to handle fragmentation config task. (TestInputServer)
            design_doc_name - design doc with views represented in index file. (String)
            fragmentation_value - target amount of fragmentation within index file to detect. (String)
            bucket - The name of the bucket design_doc belongs to. (String)

        Returns:
            MonitorViewFragmentationTask - A task future that is a handle to the scheduled task."""

        _task = MonitorViewFragmentationTask(server, design_doc_name,
                                             fragmentation_value, bucket)
        self.task_manager.schedule(_task)
        return _task

    def async_generate_expected_view_results(self, doc_generators, view, query):
        """Asynchronously generate expected view query results

        Parameters:
            doc_generators - Generators used for loading docs (DocumentGenerator[])
            view - The view with map function (View)
            query - Query params to filter docs from the generator. (dict)

        Returns:
            GenerateExpectedViewResultsTask - A task future that is a handle to the scheduled task."""

        _task = GenerateExpectedViewResultsTask(doc_generators, view, query)
        self.task_manager.schedule(_task)
        return _task

    def generate_expected_view_query_results(self, doc_generators, view, query, timeout=None):
        """Synchronously generate expected view query results

        Parameters:
            doc_generators - Generators used for loading docs (DocumentGenerator[])
            view - The view with map function (View)
            query - Query params to filter docs from the generator. (dict)

        Returns:
            list - A list of rows expected to be returned for given query"""

        _task = self.async_generate_expected_view_results(doc_generators, view, query)
        return _task.result(timeout)


    def async_view_query_verification(self, server, design_doc_name, view_name, query, expected_rows, num_verified_docs=20, bucket="default", query_timeout=20):
        """Asynchronously query a views in a design doc and does full verification of results

        Parameters:
            server - The server to handle query verification task. (TestInputServer)
            design_doc_name - Design doc with view(s) being queried(String)
            view_name - The view being queried (String)
            query - Query params being used with the query. (dict)
            expected_rows - The number of rows expected to be returned from the query (int)
            num_verified_docs - The number of docs to verify that require memcached gets (int)
            bucket - The name of the bucket containing items for this view. (String)
            query_timeout - The time to allow a query with stale=false to run. (int)
            retry_time - The time in seconds to wait before retrying failed queries (int)

        Returns:
            ViewQueryVerificationTask - A task future that is a handle to the scheduled task."""
        _task = ViewQueryVerificationTask(server, design_doc_name, view_name, query, expected_rows, num_verified_docs, bucket, query_timeout)
        self.task_manager.schedule(_task)
        return _task

    def view_query_verification(self, server, design_doc_name, view_name, query, expected_rows, num_verified_docs=20, bucket="default", query_timeout=20, timeout=None):
        """Synchronously query a views in a design doc and does full verification of results

        Parameters:
            server - The server to handle query verification task. (TestInputServer)
            design_doc_name - Design doc with view(s) being queried(String)
            view_name - The view being queried (String)
            query - Query params being used with the query. (dict)
            expected_rows - The number of rows expected to be returned from the query (int)
            num_verified_docs - The number of docs to verify that require memcached gets (int)
            bucket - The name of the bucket containing items for this view. (String)
            query_timeout - The time to allow a query with stale=false to run. (int)
            retry_time - The time in seconds to wait before retrying failed queries (int)

        Returns:
            dict - An object with keys: passed = True or False
                                        errors = reasons why verification failed """
        _task = self.async_view_query_verification(server, design_doc_name, view_name, query, expected_rows, num_verified_docs, bucket, query_timeout)
        return _task.result(timeout)


    def monitor_view_fragmentation(self, server,
                                   design_doc_name,
                                   fragmentation_value,
                                   bucket="default",
                                   timeout=None):
        """Synchronously monitor view fragmentation.

           When <fragmentation_value> is reached on the
           index file for <design_doc_name> this method
           will return.

        Parameters:
            server - The server to handle fragmentation config task. (TestInputServer)
            design_doc_name - design doc with views represented in index file. (String)
            fragmentation_value - target amount of fragmentation within index file to detect. (String)
            bucket - The name of the bucket design_doc belongs to. (String)

        Returns:
            boolean - True if <fragmentation_value> reached"""

        _task = self.async_monitor_view_fragmentation(server, design_doc_name,
                                                      fragmentation_value,
                                                      bucket)
        self.task_manager.schedule(_task)
        return _task.result(timeout)

    def async_compact_view(self, server, design_doc_name, bucket="default"):
        """Asynchronously run view compaction.

        Compacts index file represented by views within the specified <design_doc_name>

        Parameters:
            server - The server to handle fragmentation config task. (TestInputServer)
            design_doc_name - design doc with views represented in index file. (String)
            bucket - The name of the bucket design_doc belongs to. (String)

        Returns:
            ViewCompactionTask - A task future that is a handle to the scheduled task."""


        _task = ViewCompactionTask(server, design_doc_name, bucket)
        self.task_manager.schedule(_task)
        return _task

    def compact_view(self, server, design_doc_name, bucket="default", timeout=None):
        """Synchronously run view compaction.

        Compacts index file represented by views within the specified <design_doc_name>

        Parameters:
            server - The server to handle fragmentation config task. (TestInputServer)
            design_doc_name - design doc with views represented in index file. (String)
            bucket - The name of the bucket design_doc belongs to. (String)

        Returns:
            boolean - True file size reduced after compaction, False if successful but no work done """

        _task = self.async_compact_view(server, design_doc_name, bucket)
        return _task.result(timeout)

    def async_failover(self, servers, to_failover):
        """Asyncronously fails over nodes

        Parameters:
            servers - All servers participating in the failover ([TestInputServers])
            to_failover - All servers being failed over ([TestInputServers])

        Returns:
            FailoverTask - A task future that is a handle to the scheduled task"""
        _task = FailoverTask(servers, to_failover)
        self.task_manager.schedule(_task)
        return _task

    def failover(self, servers, to_failover, timeout=None):
        """Syncronously fails over nodes

        Parameters:
            servers - All servers participating in the failover ([TestInputServers])
            to_failover - All servers being failed over ([TestInputServers])

        Returns:
            boolean - Whether or not the failover was successful"""
        _task = self.async_failover(servers, to_failover)
        return _task.result(timeout)
Esempio n. 11
0
class ElasticSearchBase(object):

    def __init__(self, host, logger):
        #host is in the form IP address
        self.__log = logger
        self.__host = host
        self.__document = {}
        self.__mapping = {}
        self.__STATUSOK = 200
        self.__indices = []
        self.__index_types = {}
        self.__connection_url = 'http://{0}:{1}/'.format(self.__host.ip,
                                                        self.__host.port)
        self.es_queries = []
        self.task_manager = TaskManager("ES_Thread")
        self.task_manager.start()
        self.http = httplib2.Http

    def _http_request(self, api, method='GET', params='', headers=None,
                      timeout=30):
        if not headers:
            headers = {'Content-Type': 'application/json',
                       'Accept': '*/*'}
        try:
            response, content = httplib2.Http(timeout=timeout).request(api,
                                                                       method,
                                                                       params,
                                                                       headers)
            if response['status'] in ['200', '201', '202']:
                return True, content, response
            else:
                try:
                    json_parsed = json.loads(content)
                except ValueError as e:
                    json_parsed = {}
                    json_parsed["error"] = "status: {0}, content: {1}".\
                        format(response['status'], content)
                reason = "unknown"
                if "error" in json_parsed:
                    reason = json_parsed["error"]
                self.__log.error('{0} error {1} reason: {2} {3}'.format(
                    api,
                    response['status'],
                    reason,
                    content.rstrip('\n')))
                return False, content, response
        except socket.error as e:
            self.__log.error("socket error while connecting to {0} error {1} ".
                             format(api, e))
            raise ServerUnavailableException(ip=self.__host.ip)

    def is_running(self):
        """
         make sure ES is up and running
         check the service is running , if not abort the test
        """

        try:
            status, content, _ = self._http_request(
                self.__connection_url,
                'GET')
            if status:
                return True
            else:
                return False
        except Exception as e:
            raise e

    def delete_index(self, index_name):
        """
        Deletes index
        """
        try:
            url = self.__connection_url + index_name
            status, content, _ = self._http_request(url, 'DELETE')
        except Exception as e:
            raise e

    def delete_indices(self):
        """
        Delete all indices present
        """
        for index_name in self.__indices:
            self.delete_index(index_name)
            self.__log.info("ES index %s deleted" % index_name)

    def create_empty_index(self, index_name):
        """
        Creates an empty index, given the name
        """
        try:
            self.delete_index(index_name)
            status, content, _ = self._http_request(
                self.__connection_url + index_name,
                'PUT')
            if status:
                self.__indices.append(index_name)
        except Exception as e:
            raise Exception("Could not create ES index : %s" % e)

    def create_alias(self, name, indexes):
        """
        @name: alias name
        @indexes: list of target indexes
        """
        try:
            self.__log.info("Checking if ES alias '{0}' exists...".format(name))
            self.delete_index(name)
            alias_info = {"actions": [{"add": {"indices": indexes, "alias": name}}]}
            self.__log.info("Creating ES alias '{0}' on {1}...".format(
                name,
                indexes))
            status, content, _ = self._http_request(
                self.__connection_url + name,
                'POST',
                json.dumps(alias_info))
            if status:
                self.__log.info("ES alias '{0}' created".format(name))
                self.__indices.append(name)
        except Exception as e:
            raise Exception("Could not create ES alias : %s" % e)

    def async_load_ES(self, index_name, gen, op_type='create'):
        """
        Asynchronously run query against FTS and ES and compare result
        note: every task runs a single query
        """

        _task = ESLoadGeneratorTask(es_instance=self,
                                    index_name=index_name,
                                    generator=gen,
                                    op_type=op_type)
        self.task_manager.schedule(_task)
        return _task

    def async_bulk_load_ES(self, index_name, gen, op_type='create', batch=5000):
        _task = ESBulkLoadGeneratorTask(es_instance=self,
                                    index_name=index_name,
                                    generator=gen,
                                    op_type=op_type,
                                    batch=batch)
        self.task_manager.schedule(_task)
        return _task

    def load_bulk_data(self, filename):
        """
        Bulk load to ES from a file
        curl -s -XPOST 172.23.105.25:9200/_bulk --data-binary @req
        cat req:
        { "index" : { "_index" : "default_es_index", "_type" : "aruna", "_id" : "1" } }
        { "field1" : "value1" , "field2" : "value2"}
        { "index" : { "_index" : "default_es_index", "_type" : "aruna", "_id" : "2" } }
        { "field1" : "value1" , "field2" : "value2"}
        """
        try:
            import os
            url = self.__connection_url + "/_bulk"
            data = open(filename, "rb").read()
            status, content, _ = self._http_request(url,
                                                    'POST',
                                                    data)
            return status
        except Exception as e:
            raise e

    def load_data(self, index_name, document_json, doc_type, doc_id):
        """
        index_name : name of index into which the doc is loaded
        document_json: json doc
        doc_type : type of doc. Usually the '_type' field in the doc body
        doc_id : document id
        """
        try:
            url = self.__connection_url + index_name + '/' + doc_type + '/' +\
                  doc_id
            status, content, _ = self._http_request(url,
                                                    'POST',
                                                    document_json)
        except Exception as e:
            raise e

    def update_index(self, index_name):
        """
        This procedure will refresh index when insert is performed .
        Need to call this API to take search in effect.
        :param index_name:
        :return:
        """
        try:
            status, content, _ = self._http_request(
                self.__connection_url + index_name +'/_refresh',
                'POST')
        except Exception as e:
            raise e

    def search(self, index_name, query, result_size=1000000):
        """
           This function will be used for search . based on the query
           :param index_name:
           :param query:
           :return: number of matches found, doc_ids and time taken
        """
        try:
            doc_ids = []
            url = self.__connection_url + index_name + '/_search?size='+ \
                  str(result_size)
            status, content, _ = self._http_request(
                url,
                'POST',
                json.dumps(query))
            if status:
                content = json.loads(content)
                for doc in content['hits']['hits']:
                    doc_ids.append(doc['_id'])
                return content['hits']['total'], doc_ids, content['took']
        except Exception as e:
            self.__log.error("Couldn't run query on ES: %s, reason : %s"
                             % (json.dumps(query), e))
            raise e

    def get_index_count(self, index_name):
        """
         Returns count of docs in the index
        """
        try:
            status, content, _ = self._http_request(
                self.__connection_url + index_name + '/_count',
                'POST')
            if status:
                return json.loads(content)['count']
        except Exception as e:
            raise e

    def get_indices(self):
        """
        Return all the indices created
        :return: List of all indices
        """
        return self.__indices
Esempio n. 12
0
class ElasticSearchBase(object):

    def __init__(self, host, logger):
        #host is in the form IP address
        self.__log = logger
        self.__host = host
        self.__document = {}
        self.__mapping = {}
        self.__STATUSOK = 200
        self.__indices = []
        self.__index_types = {}
        self.__connection_url = 'http://{0}:{1}/'.format(self.__host.ip,
                                                        self.__host.port)
        self.es_queries = []
        self.task_manager = TaskManager("ES_Thread")
        self.task_manager.start()
        self.http = httplib2.Http

    def _http_request(self, api, method='GET', params='', headers=None,
                      timeout=30):
        if not headers:
            headers = {'Content-Type': 'application/json',
                       'Accept': '*/*'}
        try:
            response, content = httplib2.Http(timeout=timeout).request(api,
                                                                       method,
                                                                       params,
                                                                       headers)
            if response['status'] in ['200', '201', '202']:
                return True, content, response
            else:
                try:
                    json_parsed = json.loads(content)
                except ValueError as e:
                    json_parsed = {}
                    json_parsed["error"] = "status: {0}, content: {1}".\
                        format(response['status'], content)
                reason = "unknown"
                if "error" in json_parsed:
                    reason = json_parsed["error"]
                self.__log.error('{0} error {1} reason: {2} {3}'.format(
                    api,
                    response['status'],
                    reason,
                    content.rstrip('\n')))
                return False, content, response
        except socket.error as e:
            self.__log.error("socket error while connecting to {0} error {1} ".
                             format(api, e))
            raise ServerUnavailableException(ip=self.__host.ip)

    def is_running(self):
        """
         make sure ES is up and running
         check the service is running , if not abort the test
        """

        try:
            status, content, _ = self._http_request(
                self.__connection_url,
                'GET')
            if status:
                return True
            else:
                return False
        except Exception as e:
            raise e

    def delete_index(self, index_name):
        """
        Deletes index
        """
        try:
            url = self.__connection_url + index_name
            status, content, _ = self._http_request(url, 'DELETE')
        except Exception as e:
            raise e

    def delete_indices(self):
        """
        Delete all indices present
        """
        for index_name in self.__indices:
            self.delete_index(index_name)
            self.__log.info("ES index %s deleted" % index_name)

    def create_empty_index(self, index_name):
        """
        Creates an empty index, given the name
        """
        try:
            self.delete_index(index_name)
            status, content, _ = self._http_request(
                self.__connection_url + index_name,
                'PUT')
            if status:
                self.__indices.append(index_name)
        except Exception as e:
            raise Exception("Could not create ES index : %s" % e)

    def create_empty_index_with_bleve_equivalent_std_analyzer(self, index_name):
        """
        Refer:
        https://www.elastic.co/guide/en/elasticsearch/guide/current/
        configuring-analyzers.html
        """
        try:
            self.delete_index(index_name)
            status, content, _ = self._http_request(
                self.__connection_url + index_name,
                'PUT', json.dumps(BLEVE.STD_ANALYZER))
            if status:
                self.__indices.append(index_name)
        except Exception as e:
            raise Exception("Could not create index with ES std analyzer : %s"
                            % e)

    def create_index_mapping(self, index_name, es_mapping, fts_mapping=None):
        """
        Creates a new default index, with the given mapping
        """
        self.delete_index(index_name)

        if not fts_mapping:
            map = {"mappings": es_mapping, "settings": BLEVE.STD_ANALYZER['settings']}
        else :
            # Find the ES equivalent char_filter, token_filter and tokenizer
            es_settings = self.populate_es_settings(fts_mapping['params']
                                                    ['mapping']['analysis']['analyzers'])

            # Create an ES custom index definition
            map = {"mappings": es_mapping, "settings": es_settings['settings']}

        # Create ES index
        try:
            self.__log.info("Creating %s with mapping %s"
                            % (index_name, json.dumps(map, indent=3)))
            status, content, _ = self._http_request(
                self.__connection_url + index_name,
                'PUT',
                json.dumps(map))
            if status:
                self.__log.info("SUCCESS: ES index created with above mapping")
            else:
                raise Exception("Could not create ES index")
        except Exception as e:
            raise Exception("Could not create ES index : %s" % e)

    def populate_es_settings(self, fts_custom_analyzers_def):
        """
        Populates the custom analyzer defintion of the ES Index Definition.
        Refers to the FTS Custom Analyzers definition and creates an
            equivalent definition for each ES custom analyzer
        :param fts_custom_analyzers_def: FTS Custom Analyzer Definition
        :return:
        """

        num_custom_analyzers = len(fts_custom_analyzers_def)
        n = 1
        analyzer_map = {}
        while n <= num_custom_analyzers:
            customAnalyzerName = fts_custom_analyzers_def.keys()[n-1]
            fts_char_filters = fts_custom_analyzers_def[customAnalyzerName]["char_filters"]
            fts_tokenizer = fts_custom_analyzers_def[customAnalyzerName]["tokenizer"]
            fts_token_filters = fts_custom_analyzers_def[customAnalyzerName]["token_filters"]

            analyzer_map[customAnalyzerName] = {}
            analyzer_map[customAnalyzerName]["char_filter"] = []
            analyzer_map[customAnalyzerName]["filter"] = []
            analyzer_map[customAnalyzerName]["tokenizer"] = ""

            for fts_char_filter in fts_char_filters:
                analyzer_map[customAnalyzerName]['char_filter'].append( \
                    BLEVE.FTS_ES_ANALYZER_MAPPING['char_filters'][fts_char_filter])

            analyzer_map[customAnalyzerName]['tokenizer'] = \
                BLEVE.FTS_ES_ANALYZER_MAPPING['tokenizers'][fts_tokenizer]

            for fts_token_filter in fts_token_filters:
                analyzer_map[customAnalyzerName]['filter'].append( \
                    BLEVE.FTS_ES_ANALYZER_MAPPING['token_filters'][fts_token_filter])

            n += 1

        analyzer = BLEVE.CUSTOM_ANALYZER
        analyzer['settings']['analysis']['analyzer'] = analyzer_map
        return analyzer

    def create_alias(self, name, indexes):
        """
        @name: alias name
        @indexes: list of target indexes
        """
        try:
            self.__log.info("Checking if ES alias '{0}' exists...".format(name))
            self.delete_index(name)
            alias_info = {"actions": []}
            for index in indexes:
                alias_info['actions'].append({"add": {"index": index,
                                                      "alias": name}})
            self.__log.info("Creating ES alias '{0}' on {1}...".format(
                name,
                indexes))
            status, content, _ = self._http_request(
                self.__connection_url + "_aliases",
                'POST',
                json.dumps(alias_info))
            if status:
                self.__log.info("ES alias '{0}' created".format(name))
                self.__indices.append(name)
        except Exception as ex:
            raise Exception("Could not create ES alias : %s" % ex)

    def async_load_ES(self, index_name, gen, op_type='create'):
        """
        Asynchronously run query against FTS and ES and compare result
        note: every task runs a single query
        """

        _task = ESLoadGeneratorTask(es_instance=self,
                                    index_name=index_name,
                                    generator=gen,
                                    op_type=op_type)
        self.task_manager.schedule(_task)
        return _task

    def async_bulk_load_ES(self, index_name, gen, op_type='create', batch=5000):
        _task = ESBulkLoadGeneratorTask(es_instance=self,
                                    index_name=index_name,
                                    generator=gen,
                                    op_type=op_type,
                                    batch=batch)
        self.task_manager.schedule(_task)
        return _task

    def load_bulk_data(self, filename):
        """
        Bulk load to ES from a file
        curl -s -XPOST 172.23.105.25:9200/_bulk --data-binary @req
        cat req:
        { "index" : { "_index" : "default_es_index", "_type" : "aruna", "_id" : "1" } }
        { "field1" : "value1" , "field2" : "value2"}
        { "index" : { "_index" : "default_es_index", "_type" : "aruna", "_id" : "2" } }
        { "field1" : "value1" , "field2" : "value2"}
        """
        try:
            import os
            url = self.__connection_url + "/_bulk"
            data = open(filename, "rb").read()
            status, content, _ = self._http_request(url,
                                                    'POST',
                                                    data)
            return status
        except Exception as e:
            raise e

    def load_data(self, index_name, document_json, doc_type, doc_id):
        """
        index_name : name of index into which the doc is loaded
        document_json: json doc
        doc_type : type of doc. Usually the '_type' field in the doc body
        doc_id : document id
        """
        try:
            url = self.__connection_url + index_name + '/' + doc_type + '/' +\
                  doc_id
            status, content, _ = self._http_request(url,
                                                    'POST',
                                                    document_json)
        except Exception as e:
            raise e

    def update_index(self, index_name):
        """
        This procedure will refresh index when insert is performed .
        Need to call this API to take search in effect.
        :param index_name:
        :return:
        """
        try:
            status, content, _ = self._http_request(
                self.__connection_url + index_name +'/_refresh',
                'POST')
        except Exception as e:
            raise e

    def search(self, index_name, query, result_size=1000000):
        """
           This function will be used for search . based on the query
           :param index_name:
           :param query:
           :return: number of matches found, doc_ids and time taken
        """
        try:
            doc_ids = []
            url = self.__connection_url + index_name + '/_search?size='+ \
                  str(result_size)
            status, content, _ = self._http_request(
                url,
                'POST',
                json.dumps(query))
            if status:
                content = json.loads(content)
                for doc in content['hits']['hits']:
                    doc_ids.append(doc['_id'])
                return content['hits']['total'], doc_ids, content['took']
        except Exception as e:
            self.__log.error("Couldn't run query on ES: %s, reason : %s"
                             % (json.dumps(query), e))
            raise e

    def get_index_count(self, index_name):
        """
         Returns count of docs in the index
        """
        try:
            status, content, _ = self._http_request(
                self.__connection_url + index_name + '/_count',
                'POST')
            if status:
                return json.loads(content)['count']
        except Exception as e:
            raise e

    def get_indices(self):
        """
        Return all the indices created
        :return: List of all indices
        """
        return self.__indices