Exemplo n.º 1
0
class SpatialViewQueriesTests(BaseTestCase):
    def setUp(self):
        self.helper = SpatialHelper(self, self.bucket_name)
        super(SpatialViewQueriesTests, self).setUp()
        self.thread_crashed = Event()
        self.thread_stopped = Event()
        self.skip_rebalance = self.input.param("skip_rebalance", False)
        self.use_dev_views = self.input.param("use-dev-views", False)
        self.all_view_one_ddoc = self.input.param("all-view-one-ddoc", False)
        self.default_ddoc_name = "test-ddoc-query"
        self.default_view_name = "test-view-query"
        self.params = self.get_query_params()
        self.bucket_name = "default"
        if self.standard_buckets:
            self.bucket_name = "standard_bucket0"
        if self.sasl_buckets:
            self.bucket_name = "bucket0"

        if not self.skip_rebalance:
            self.cluster.rebalance(self.servers[:], self.servers[1:], [])
        #load some items to verify
        self.docs = self.helper.insert_docs(self.num_items,
                                            'spatial-doc',
                                            return_docs=True)
        self.ddocs = self.helper.create_default_views(
            is_one_ddoc=self.all_view_one_ddoc)

    def suite_setUp(self):
        pass

    def tearDown(self):
        super(SpatialViewQueriesTests, self).tearDown()

    def suite_tearDown(self):
        pass

    def test_spatial_view_queries(self):
        error = self.input.param('error', None)
        try:
            self.query_and_verify_result(self.docs, self.params)
        except Exception as ex:
            if error and str(ex).find(error) != -1:
                self.log.info("Error caught as expected %s" % error)
                return
            else:
                self.fail("Unexpected error appeared during run %s" % ex)
        if error:
            self.fail("Expected error '%s' didn't appear" % error)

    def test_add_spatial_view_queries_threads(self):
        diff_nodes = self.input.param("diff-nodes", False)
        query_threads = []
        for i in range(len(self.servers)):
            node = (self.master, self.servers[i])[diff_nodes]
            self.query_and_verify_result(self.docs, self.params, node=node)
            q_thread = Thread(target=self.query_and_verify_result,
                              name="query_thread" + str(i),
                              args=([self.docs, self.params, node]))
            query_threads.append(q_thread)
            q_thread.start()
        for q_thread in query_threads:
            q_thread.join()
        if self.thread_crashed.is_set():
            self.fail("Error occured during run")

    def test_view_queries_during_rebalance(self):
        start_cluster = self.input.param('start-cluster', 1)
        servers_in = self.input.param('servers_in', 0)
        servers_out = self.input.param('servers_out', 0)
        if start_cluster > 1:
            rebalance = self.cluster.async_rebalance(
                self.servers[:1], self.servers[1:start_cluster], [])
            rebalance.result()
        servs_in = []
        servs_out = []
        if servers_in:
            servs_in = self.servers[start_cluster:servers_in + 1]
        if servers_out:
            if start_cluster > 1:
                servs_out = self.servers[1:start_cluster]
                servs_out = servs_out[-servers_out:]
            else:
                servs_out = self.servers[-servers_out:]
        rebalance = self.cluster.async_rebalance(self.servers, servs_in,
                                                 servs_out)
        self.query_and_verify_result(self.docs, self.params)
        rebalance.result()

    def test_view_queries_node_pending_state(self):
        operation = self.input.param('operation', 'add_node')
        rest = RestConnection(self.master)
        if operation == 'add_node':
            self.log.info("adding the node %s:%s" %
                          (self.servers[1].ip, self.servers[1].port))
            otpNode = rest.add_node(self.master.rest_username,
                                    self.master.rest_password,
                                    self.servers[1].ip, self.servers[1].port)
        elif operation == 'failover':
            nodes = rest.node_statuses()
            nodes = [
                node for node in nodes
                if node.ip != self.master.ip or node.port != self.master.port
            ]
            rest.fail_over(nodes[0].id)
        else:
            self.fail("There is no operation %s" % operation)
        self.query_and_verify_result(self.docs, self.params)

    def test_view_queries_failover(self):
        num_nodes = self.input.param('num-nodes', 1)
        self.cluster.failover(self.servers, self.servers[1:num_nodes])
        self.cluster.rebalance(self.servers, [], self.servers[1:num_nodes])
        self.query_and_verify_result(self.docs, self.params)

    def test_views_with_warm_up(self):
        warmup_node = self.servers[-1]
        shell = RemoteMachineShellConnection(warmup_node)
        shell.stop_couchbase()
        time.sleep(20)
        shell.start_couchbase()
        shell.disconnect()
        self.query_and_verify_result(self.docs, self.params)

    def test_view_queries_during_ddoc_compaction(self):
        fragmentation_value = self.input.param("fragmentation_value", 80)
        self.disable_compaction()
        fragmentation_monitor = self.cluster.async_monitor_view_fragmentation(
            self.master, self.ddocs[0].name, fragmentation_value,
            self.default_bucket_name)
        end_time = time.time() + self.wait_timeout * 30
        while fragmentation_monitor.state != "FINISHED" and end_time > time.time(
        ):
            self.docs = self.helper.insert_docs(self.num_items,
                                                'spatial-doc',
                                                return_docs=True)

        if end_time < time.time(
        ) and fragmentation_monitor.state != "FINISHED":
            self.fail("impossible to reach compaction value after %s sec" %
                      (self.wait_timeout * 20))
        fragmentation_monitor.result()
        compaction_task = self.cluster.async_compact_view(
            self.master, self.ddocs[0].name, self.default_bucket_name)
        self.query_and_verify_result(self.docs, self.params)
        result = compaction_task.result(self.wait_timeout * 10)
        self.assertTrue(
            result, "Compaction didn't finished correctly. Please check diags")

    def get_query_params(self):
        current_params = {}
        for key in self.input.test_params:
            if key == 'skip' or key == 'limit':
                current_params[key] = int(self.input.test_params[key])
            elif key == 'bbox':
                current_params[key] = [
                    int(x)
                    for x in self.input.test_params[key][1:-1].split(",")
                ]
            elif key == 'stale':
                current_params[key] = self.input.test_params[key]
        return current_params

    def query_and_verify_result(self, doc_inserted, params, node=None):
        try:
            rest = RestConnection(self.master)
            if node:
                rest = RestConnection(node)
            expected_ddocs = self.helper.generate_matching_docs(
                doc_inserted, params)
            for ddoc in self.ddocs:
                for view in ddoc.spatial_views:
                    result_ddocs = self.helper.query_view(
                        rest,
                        ddoc,
                        view,
                        bucket=self.bucket_name,
                        extra_params=params,
                        num_expected=len(expected_ddocs),
                        num_tries=20)
                    self.helper.verify_matching_keys(expected_ddocs,
                                                     result_ddocs)
        except Exception as ex:
            self.thread_crashed.set()
            self.log.error(
                "****ERROR***** \n At least one of threads is crashed: %s" %
                (ex))
            raise ex
        finally:
            if not self.thread_stopped.is_set():
                self.thread_stopped.set()
Exemplo n.º 2
0
class SpatialViewQueriesTests(BaseTestCase):

    def setUp(self):
        super(SpatialViewQueriesTests, self).setUp()
        self.thread_crashed = Event()
        self.thread_stopped = Event()
        self.skip_rebalance = self.input.param("skip_rebalance", False)
        self.use_dev_views = self.input.param("use-dev-views", False)
        self.all_view_one_ddoc = self.input.param("all-view-one-ddoc", False)
        self.default_ddoc_name = "test-ddoc-query"
        self.default_view_name = "test-view-query"
        self.params = self.get_query_params()
        self.bucket_name = "default"
        if self.standard_buckets:
            self.bucket_name = "standard_bucket0"
        if self.sasl_buckets:
            self.bucket_name = "bucket0"
        self.helper = SpatialHelper(self, self.bucket_name)
        if not self.skip_rebalance:
            self.cluster.rebalance(self.servers[:], self.servers[1:], [])
        #load some items to verify
        self.docs = self.helper.insert_docs(self.num_items, 'spatial-doc',
                                            return_docs=True)
        self.ddocs = self.helper.create_default_views(
                                        is_one_ddoc=self.all_view_one_ddoc)

    def tearDown(self):
        super(SpatialViewQueriesTests, self).tearDown()

    def test_spatial_view_queries(self):
        error = self.input.param('error', None)
        try:
            self.query_and_verify_result(self.docs, self.params)
        except Exception as ex:
            if error and str(ex).find(error) != -1:
               self.log.info("Error caught as expected %s" % error)
               return
            else:
               self.fail("Unexpected error appeared during run %s" % ex)
        if error:
            self.fail("Expected error '%s' didn't appear" % error)

    def test_add_spatial_view_queries_threads(self):
        diff_nodes = self.input.param("diff-nodes", False)
        query_threads = []
        for i in xrange(len(self.servers)):
            node = (self.master, self.servers[i])[diff_nodes]
            self.query_and_verify_result(self.docs, self.params, node=node)
            q_thread = Thread(target=self.query_and_verify_result,
                                   name="query_thread" + str(i),
                                   args=([self.docs, self.params, node]))
            query_threads.append(q_thread)
            q_thread.start()
        for q_thread in query_threads:
            q_thread.join()
        if self.thread_crashed.is_set():
            self.fail("Error occured during run")

    def test_view_queries_during_rebalance(self):
        start_cluster = self.input.param('start-cluster', 1)
        servers_in = self.input.param('servers_in', 0)
        servers_out = self.input.param('servers_out', 0)
        if start_cluster > 1:
            rebalance = self.cluster.async_rebalance(self.servers[:1],
                                                     self.servers[1:start_cluster], [])
            rebalance.result()
        servs_in = []
        servs_out = []
        if servers_in:
            servs_in = self.servers[start_cluster:servers_in + 1]
        if servers_out:
            if start_cluster > 1:
                servs_out = self.servers[1:start_cluster]
                servs_out = servs_out[-servers_out:]
            else:
                servs_out = self.servers[-servers_out:]
        rebalance = self.cluster.async_rebalance(self.servers, servs_in, servs_out)
        self.query_and_verify_result(self.docs, self.params)
        rebalance.result()

    def test_view_queries_node_pending_state(self):
        operation = self.input.param('operation', 'add_node')
        rest = RestConnection(self.master)
        if operation == 'add_node':
            self.log.info("adding the node %s:%s" % (
                        self.servers[1].ip, self.servers[1].port))
            otpNode = rest.add_node(self.master.rest_username, self.master.rest_password,
                                    self.servers[1].ip, self.servers[1].port)
        elif operation == 'failover':
            nodes = rest.node_statuses()
            nodes = [node for node in nodes
                     if node.ip != self.master.ip or node.port != self.master.port]
            rest.fail_over(nodes[0].id)
        else:
            self.fail("There is no operation %s" % operation)
        self.query_and_verify_result(self.docs, self.params)

    def test_view_queries_failover(self):
        num_nodes = self.input.param('num-nodes', 1)
        self.cluster.failover(self.servers,
                              self.servers[1:num_nodes])
        self.cluster.rebalance(self.servers, [], self.servers[1:num_nodes])
        self.query_and_verify_result(self.docs, self.params)

    def test_views_with_warm_up(self):
         warmup_node = self.servers[-1]
         shell = RemoteMachineShellConnection(warmup_node)
         shell.stop_couchbase()
         time.sleep(20)
         shell.start_couchbase()
         shell.disconnect()
         self.query_and_verify_result(self.docs, self.params)

    def test_view_queries_during_ddoc_compaction(self):
        fragmentation_value = self.input.param("fragmentation_value", 80)
        self.disable_compaction()
        fragmentation_monitor = self.cluster.async_monitor_view_fragmentation(self.master,
                             self.ddocs[0].name, fragmentation_value, self.default_bucket_name)
        end_time = time.time() + self.wait_timeout * 30
        while fragmentation_monitor.state != "FINISHED" and end_time > time.time():
            self.docs = self.helper.insert_docs(self.num_items, 'spatial-doc',
                                                return_docs=True)

        if end_time < time.time() and fragmentation_monitor.state != "FINISHED":
            self.fail("impossible to reach compaction value after %s sec" % (self.wait_timeout * 20))
        fragmentation_monitor.result()
        compaction_task = self.cluster.async_compact_view(self.master, self.ddocs[0].name,
                                                          self.default_bucket_name)
        self.query_and_verify_result(self.docs, self.params)
        result = compaction_task.result(self.wait_timeout * 10)
        self.assertTrue(result, "Compaction didn't finished correctly. Please check diags")

    def get_query_params(self):
        current_params = {}
        for key in self.input.test_params:
            if key == 'skip' or key == 'limit':
                current_params[key] = int(self.input.test_params[key])
            elif key == 'bbox':
                current_params[key] = [int(x) for x in
                                       self.input.test_params[key][1:-1].split(",")]
            elif key == 'stale':
                current_params[key] = self.input.test_params[key]
        return current_params

    def query_and_verify_result(self, doc_inserted, params, node=None):
        try:
            rest = RestConnection(self.master)
            if node:
                rest = RestConnection(node)
            expected_ddocs = self.helper.generate_matching_docs(doc_inserted, params)
            for ddoc in self.ddocs:
                for view in ddoc.spatial_views:
                    result_ddocs = self.helper.query_view(rest, ddoc, view,
                                                          bucket=self.bucket_name,
                                                          extra_params=params,
                                                          num_expected=len(expected_ddocs),
                                                          num_tries=20)
                    self.helper.verify_matching_keys(expected_ddocs, result_ddocs)
        except Exception as ex:
            self.thread_crashed.set()
            self.log.error("****ERROR***** \n At least one of threads is crashed: %s" % (ex))
            raise ex
        finally:
            if not self.thread_stopped.is_set():
                self.thread_stopped.set()