示例#1
0
 def setUp(self):
     super(SpatialViewsTests, self).setUp()
     self.thread_crashed = Event()
     self.thread_stopped = Event()
     self.skip_rebalance = self.input.param("skip_rebalance", False)
     self.use_dev_views = self.input.param("use-dev-views", False)
     self.default_map = "function (doc) {emit(doc.geometry, doc.age);}"
     self.map_updated = "function (doc) {emit(doc.geometry, doc.name);}"
     self.default_ddoc_name = self.input.param("default_ddoc_name",
                                               "test-ddoc")
     self.default_view_name = self.input.param("default_view_name",
                                               "test-view")
     self.ddoc_op = self.input.param("ddoc-ops",
                                     "create")  #create\update\delete
     self.bucket_name = "default"
     if self.standard_buckets:
         self.bucket_name = "standard_bucket0"
     if self.sasl_buckets:
         self.bucket_name = "bucket0"
     self.helper = SpatialHelper(self, self.bucket_name)
     if not self.skip_rebalance:
         self.cluster.rebalance(self.servers[:], self.servers[1:], [])
     #load some items to verify
     self.docs = self.helper.insert_docs(self.num_items,
                                         'spatial-doc',
                                         return_docs=True)
     self.num_ddoc = self.input.param('num-ddoc', 1)
     self.views_per_ddoc = self.input.param('views-per-ddoc', 1)
     self.non_spatial_views_per_ddoc = self.input.param(
         'non-spatial-views-per-ddoc', 0)
     if self.ddoc_op == 'update' or self.ddoc_op == 'delete':
         ddocs = self.make_ddocs(self.num_ddoc, self.views_per_ddoc,
                                 self.non_spatial_views_per_ddoc)
         self.create_ddocs(ddocs)
示例#2
0
    def setUp(self):
        self.helper = SpatialHelper(self, self.bucket_name)
        super(SpatialViewQueriesTests, self).setUp()
        self.thread_crashed = Event()
        self.thread_stopped = Event()
        self.skip_rebalance = self.input.param("skip_rebalance", False)
        self.use_dev_views = self.input.param("use-dev-views", False)
        self.all_view_one_ddoc = self.input.param("all-view-one-ddoc", False)
        self.default_ddoc_name = "test-ddoc-query"
        self.default_view_name = "test-view-query"
        self.params = self.get_query_params()
        self.bucket_name = "default"
        if self.standard_buckets:
            self.bucket_name = "standard_bucket0"
        if self.sasl_buckets:
            self.bucket_name = "bucket0"

        if not self.skip_rebalance:
            self.cluster.rebalance(self.servers[:], self.servers[1:], [])
        #load some items to verify
        self.docs = self.helper.insert_docs(self.num_items,
                                            'spatial-doc',
                                            return_docs=True)
        self.ddocs = self.helper.create_default_views(
            is_one_ddoc=self.all_view_one_ddoc)
示例#3
0
 def setUp(self):
     try:
         if 'first_case' not in TestInputSingleton.input.test_params:
             TestInputSingleton.input.test_params['default_bucket'] = False
             TestInputSingleton.input.test_params['skip_cleanup'] = True
         self.default_bucket_name = 'default'
         super(SpatialQueryErrorsTests, self).setUp()
         if 'first_case' in TestInputSingleton.input.test_params:
             self.cluster.rebalance(self.servers[:], self.servers[1:], [])
         # We use only one bucket in this test suite
         self.rest = RestConnection(self.master)
         self.bucket = self.rest.get_bucket(
             Bucket(name=self.default_bucket_name))
         # num_docs must be a multiple of the number of vbuckets
         self.num_docs = self.input.param("num_docs", 2000)
         # `testname` is used for the design document name as wel as the
         # spatial function name
         self.testname = 'query-errors'
         self.helper = SpatialHelper(self, "default")
         if 'first_case' in TestInputSingleton.input.test_params:
             self.create_ddoc()
             self.helper.insert_docs(self.num_docs, self.testname)
     except Exception as ex:
         self.input.test_params["stop-on-failure"] = True
         self.log.error("SETUP WAS FAILED. ALL TESTS WILL BE SKIPPED")
         self.fail(ex)
示例#4
0
class SpatialCompactionTests(BaseTestCase):
    def setUp(self):
        super(SpatialCompactionTests, self).setUp()
        self.start_cluster = self.input.param('start-cluster', len(self.servers))
        self.servers_in = self.input.param('servers_in', 0)
        self.servers_out = self.input.param('servers_out', 0)
        self.bucket_name = "default"
        if self.standard_buckets:
            self.bucket_name = "standard_bucket0"
        if self.sasl_buckets:
            self.bucket_name = "bucket0"
        self.helper = SpatialHelper(self, self.bucket_name)
        if self.start_cluster > 1:
            rebalance = self.cluster.async_rebalance(self.servers[:1],
                                                     self.servers[1:start_cluster], [])
            rebalance.result()

    def tearDown(self):
        super(SpatialCompactionTests, self).tearDown()


    def test_spatial_compaction(self):
        self.log.info(
            "description : test manual compaction for spatial indexes")
        prefix = str(uuid.uuid4())[:7]
        design_name = "dev_test_spatial_compaction"

        self.helper.create_index_fun(design_name, prefix)

        # Insert (resp. update, as they have the same prefix) and query
        # the spatial index several time so that the compaction makes sense
        for i in range(0, 8):
            self.helper.insert_docs(2000, prefix)
            self.helper.get_results(design_name)

        # Get the index size prior to compaction
        status, info = self.helper.info(design_name)
        disk_size = info["spatial_index"]["disk_size"]

        if self.servers_in or self.servers_out:
            servs_in = servs_out = []
            if self.servers_in:
                servs_in = self.servers[self.start_cluster:self.servers_in + 1]
            if self.servers_out:
                servs_out = self.servers[-self.servers_out:]
            rebalance = self.cluster.async_rebalance(self.servers, servs_in, servs_out)

        # Do the compaction
        self.helper.compact(design_name)

        # Check if the index size got smaller
        status, info = self.helper.info(design_name)
        self.assertTrue(info["spatial_index"]["disk_size"] < disk_size,
                        "The file size ({0}) isn't smaller than the "
                        "pre compaction size ({1})."
                        .format(info["spatial_index"]["disk_size"],
                                disk_size))
        if self.servers_in or self.servers_out:
            rebalance.result()
示例#5
0
    def setUp(self):
        self.helper = SpatialHelper(self, "default")
        super(SpatialQueryTests, self).setUp()
        self.log = logger.Logger.get_logger()

        self.helper.setup_cluster()
        self.cluster = Cluster()
        self.servers = self.helper.servers
 def setUp(self):
     try:
         if 'first_case' not in TestInputSingleton.input.test_params:
             TestInputSingleton.input.test_params['default_bucket'] = False
             TestInputSingleton.input.test_params['skip_cleanup'] = True
             TestInputSingleton.input.test_params['skip_buckets_handle'] = True
         self.default_bucket_name = 'default'
         super(SpatialQueryErrorsTests, self).setUp()
         if 'first_case' in TestInputSingleton.input.test_params:
             self.cluster.rebalance(self.servers[:], self.servers[1:], [])
         # We use only one bucket in this test suite
         self.rest = RestConnection(self.master)
         self.bucket = self.rest.get_bucket(Bucket(name=self.default_bucket_name))
         # num_docs must be a multiple of the number of vbuckets
         self.num_docs = self.input.param("num_docs", 2000)
         # `testname` is used for the design document name as wel as the
         # spatial function name
         self.testname = 'query-errors'
         self.helper = SpatialHelper(self, "default")
         if 'first_case' in TestInputSingleton.input.test_params:
             self.create_ddoc()
             self.helper.insert_docs(self.num_docs, self.testname)
     except Exception as ex:
         self.input.test_params["stop-on-failure"] = True
         self.log.error("SETUP WAS FAILED. ALL TESTS WILL BE SKIPPED")
         self.fail(ex)
 def setUp(self):
     super(SpatialViewsTests, self).setUp()
     self.thread_crashed = Event()
     self.thread_stopped = Event()
     self.skip_rebalance = self.input.param("skip_rebalance", False)
     self.use_dev_views = self.input.param("use-dev-views", False)
     self.default_map = "function (doc) {emit(doc.geometry, doc.age);}"
     self.map_updated = "function (doc) {emit(doc.geometry, doc.name);}"
     self.default_ddoc_name = self.input.param("default_ddoc_name", "test-ddoc")
     self.default_view_name = self.input.param("default_view_name", "test-view")
     self.ddoc_op = self.input.param("ddoc-ops", "create") #create\update\delete
     self.bucket_name = "default"
     if self.standard_buckets:
         self.bucket_name = "standard_bucket0"
     if self.sasl_buckets:
         self.bucket_name = "bucket0"
     self.helper = SpatialHelper(self, self.bucket_name)
     if not self.skip_rebalance:
         self.cluster.rebalance(self.servers[:], self.servers[1:], [])
     #load some items to verify
     self.docs = self.helper.insert_docs(self.num_items, 'spatial-doc',
                                         return_docs=True)
     self.num_ddoc = self.input.param('num-ddoc', 1)
     self.views_per_ddoc = self.input.param('views-per-ddoc', 1)
     self.non_spatial_views_per_ddoc = self.input.param('non-spatial-views-per-ddoc', 0)
     if self.ddoc_op == 'update' or self.ddoc_op == 'delete':
         ddocs =  self.make_ddocs(self.num_ddoc, self.views_per_ddoc,
                                  self.non_spatial_views_per_ddoc)
         self.create_ddocs(ddocs)
示例#8
0
    def setUp(self):
        self.helper = SpatialHelper(self, "default")
        super(SpatialQueryTests, self).setUp()
        self.log = logger.Logger.get_logger()

        self.helper.setup_cluster()
        self.cluster = Cluster()
        self.servers = self.helper.servers
示例#9
0
 def setUp(self):
     super(SpatialCompactionTests, self).setUp()
     self.start_cluster = self.input.param('start-cluster',
                                           len(self.servers))
     self.servers_in = self.input.param('servers_in', 0)
     self.servers_out = self.input.param('servers_out', 0)
     self.bucket_name = "default"
     if self.standard_buckets:
         self.bucket_name = "standard_bucket0"
     self.helper = SpatialHelper(self, self.bucket_name)
     try:
         if self.start_cluster > 1:
             rebalance = self.cluster.async_rebalance(
                 self.servers[:1], self.servers[1:self.start_cluster], [])
             rebalance.result()
     except:
         super(SpatialCompactionTests, self).tearDown()
示例#10
0
 def setUp(self):
     self.input = TestInputSingleton.input
     self.servers = self.input.servers
     self.master = self.servers[0]
     self.log = logger.Logger.get_logger()
     self.helper = SpatialHelper(self, "default")
     self.helper.setup_cluster()
     self.cluster = Cluster()
     self.default_bucket = self.input.param("default_bucket", True)
     self.sasl_buckets = self.input.param("sasl_buckets", 0)
     self.standard_buckets = self.input.param("standard_buckets", 0)
     self.memcached_buckets = self.input.param("memcached_buckets", 0)
     self.servers = self.helper.servers
     self.shell = RemoteMachineShellConnection(self.master)
     info = self.shell.extract_remote_info()
     self.os = info.type.lower()
     self.couchbase_login_info = "%s:%s" % (
         self.input.membase_settings.rest_username,
         self.input.membase_settings.rest_password)
     self.backup_location = self.input.param("backup_location",
                                             "/tmp/backup")
     self.command_options = self.input.param("command_options", '')
示例#11
0
 def setUp(self):
     super(SpatialViewsTests, self).setUp()
     self.skip_rebalance = self.input.param("skip_rebalance", False)
     self.use_dev_views = self.input.param("use-dev-views", False)
     self.default_map = "function (doc) {emit(doc.geometry, doc.age);}"
     self.default_ddoc_name = self.input.param("default_ddoc_name",
                                               "test-ddoc")
     self.default_view_name = self.input.param("default_view_name",
                                               "test-view")
     self.bucket_name = "default"
     if self.standard_buckets:
         self.bucket_name = "standard_bucket0"
     if self.sasl_buckets:
         self.bucket_name = "bucket0"
     self.helper = SpatialHelper(self, self.bucket_name)
     if not self.skip_rebalance:
         self.cluster.rebalance(self.servers[:], self.servers[1:], [])
     #load some items to verify
     self.docs = self.helper.insert_docs(self.num_items,
                                         'spatial-doc',
                                         wait_for_persistence=True,
                                         return_docs=True)
示例#12
0
 def setUp(self):
     super(SpatialCompactionTests, self).setUp()
     self.start_cluster = self.input.param('start-cluster', len(self.servers))
     self.servers_in = self.input.param('servers_in', 0)
     self.servers_out = self.input.param('servers_out', 0)
     self.bucket_name = "default"
     if self.standard_buckets:
         self.bucket_name = "standard_bucket0"
     if self.sasl_buckets:
         self.bucket_name = "bucket0"
     self.helper = SpatialHelper(self, self.bucket_name)
     if self.start_cluster > 1:
         rebalance = self.cluster.async_rebalance(self.servers[:1],
                                                  self.servers[1:start_cluster], [])
         rebalance.result()
示例#13
0
class IBRSpatialTests(SpatialQueryTests):
    def setUp(self):
        self.input = TestInputSingleton.input
        self.servers = self.input.servers
        self.master = self.servers[0]
        self.log = logger.Logger.get_logger()
        self.helper = SpatialHelper(self, "default")
        self.helper.setup_cluster()
        self.cluster = Cluster()
        self.default_bucket = self.input.param("default_bucket", True)
        self.sasl_buckets = self.input.param("sasl_buckets", 0)
        self.standard_buckets = self.input.param("standard_buckets", 0)
        self.memcached_buckets = self.input.param("memcached_buckets", 0)
        self.servers = self.helper.servers
        self.shell = RemoteMachineShellConnection(self.master)
        info = self.shell.extract_remote_info()
        self.os = info.type.lower()
        self.couchbase_login_info = "%s:%s" % (
            self.input.membase_settings.rest_username,
            self.input.membase_settings.rest_password)
        self.backup_location = self.input.param("backup_location",
                                                "/tmp/backup")
        self.command_options = self.input.param("command_options", '')

    def tearDown(self):
        self.helper.cleanup_cluster()

    def test_backup_with_spatial_data(self):
        num_docs = self.helper.input.param("num-docs", 5000)
        self.log.info("description : Make limit queries on a simple "
                      "dataset with {0} docs".format(num_docs))
        data_set = SimpleDataSet(self.helper, num_docs)
        data_set.add_limit_queries()
        self._query_test_init(data_set)

        if not self.command_options:
            self.command_options = []
        options = self.command_options + [' -m full']

        self.total_backups = 1
        self.shell.execute_cluster_backup(self.couchbase_login_info,
                                          self.backup_location, options)
        time.sleep(2)

        self.buckets = RestConnection(self.master).get_buckets()
        bucket_names = [bucket.name for bucket in self.buckets]
        BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self)
        gc.collect()

        self.helper._create_default_bucket()
        self.shell.restore_backupFile(self.couchbase_login_info,
                                      self.backup_location, bucket_names)

        SimpleDataSet(self.helper, num_docs)._create_views()
        self._query_test_init(data_set)
示例#14
0
class IBRSpatialTests(SpatialQueryTests):
    def setUp(self):
        self.input = TestInputSingleton.input
        self.servers = self.input.servers
        self.master = self.servers[0]
        self.log = logger.Logger.get_logger()
        self.helper = SpatialHelper(self, "default")
        self.helper.setup_cluster()
        self.cluster = Cluster()
        self.default_bucket = self.input.param("default_bucket", True)
        self.sasl_buckets = self.input.param("sasl_buckets", 0)
        self.standard_buckets = self.input.param("standard_buckets", 0)
        self.memcached_buckets = self.input.param("memcached_buckets", 0)
        self.servers = self.helper.servers
        self.shell = RemoteMachineShellConnection(self.master)
        info = self.shell.extract_remote_info()
        self.os = info.type.lower()
        self.couchbase_login_info = "%s:%s" % (self.input.membase_settings.rest_username,
                                               self.input.membase_settings.rest_password)
        self.backup_location = self.input.param("backup_location", "/tmp/backup")
        self.command_options = self.input.param("command_options", '')



    def tearDown(self):
        self.helper.cleanup_cluster()

    def test_backup_with_spatial_data(self):
        num_docs = self.helper.input.param("num-docs", 5000)
        self.log.info("description : Make limit queries on a simple "
                      "dataset with {0} docs".format(num_docs))
        data_set = SimpleDataSet(self.helper, num_docs)
        data_set.add_limit_queries()
        self._query_test_init(data_set)

        if not self.command_options:
            self.command_options = []
        options = self.command_options + [' -m full']

        self.total_backups = 1
        self.shell.execute_cluster_backup(self.couchbase_login_info, self.backup_location, options)
        time.sleep(2)

        self.buckets = RestConnection(self.master).get_buckets()
        bucket_names = [bucket.name for bucket in self.buckets]
        BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self)
        gc.collect()

        self.helper._create_default_bucket()
        self.shell.restore_backupFile(self.couchbase_login_info, self.backup_location, bucket_names)

        SimpleDataSet(self.helper, num_docs)._create_views()
        self._query_test_init(data_set)
示例#15
0
 def setUp(self):
     super(SpatialViewsTests, self).setUp()
     self.skip_rebalance = self.input.param("skip_rebalance", False)
     self.use_dev_views = self.input.param("use-dev-views", False)
     self.default_map = "function (doc) {emit(doc.geometry, doc.age);}"
     self.default_ddoc_name = self.input.param("default_ddoc_name", "test-ddoc")
     self.default_view_name = self.input.param("default_view_name", "test-view")
     self.bucket_name = "default"
     if self.standard_buckets:
         self.bucket_name = "standard_bucket0"
     if self.sasl_buckets:
         self.bucket_name = "bucket0"
     self.helper = SpatialHelper(self, self.bucket_name)
     if not self.skip_rebalance:
         self.cluster.rebalance(self.servers[:], self.servers[1:], [])
     #load some items to verify
     self.docs = self.helper.insert_docs(self.num_items, 'spatial-doc',
                                         wait_for_persistence=True,
                                         return_docs=True)
示例#16
0
 def setUp(self):
     self.input = TestInputSingleton.input
     self.servers = self.input.servers
     self.master = self.servers[0]
     self.log = logger.Logger.get_logger()
     self.helper = SpatialHelper(self, "default")
     self.helper.setup_cluster()
     self.cluster = Cluster()
     self.default_bucket = self.input.param("default_bucket", True)
     self.sasl_buckets = self.input.param("sasl_buckets", 0)
     self.standard_buckets = self.input.param("standard_buckets", 0)
     self.memcached_buckets = self.input.param("memcached_buckets", 0)
     self.servers = self.helper.servers
     self.shell = RemoteMachineShellConnection(self.master)
     info = self.shell.extract_remote_info()
     self.os = info.type.lower()
     self.couchbase_login_info = "%s:%s" % (self.input.membase_settings.rest_username,
                                            self.input.membase_settings.rest_password)
     self.backup_location = self.input.param("backup_location", "/tmp/backup")
     self.command_options = self.input.param("command_options", '')
示例#17
0
 def setUp(self):
     super(SpatialViewQueriesTests, self).setUp()
     self.thread_crashed = Event()
     self.thread_stopped = Event()
     self.skip_rebalance = self.input.param("skip_rebalance", False)
     self.use_dev_views = self.input.param("use-dev-views", False)
     self.all_view_one_ddoc = self.input.param("all-view-one-ddoc", False)
     self.default_ddoc_name = "test-ddoc-query"
     self.default_view_name = "test-view-query"
     self.params = self.get_query_params()
     self.bucket_name = "default"
     if self.standard_buckets:
         self.bucket_name = "standard_bucket0"
     if self.sasl_buckets:
         self.bucket_name = "bucket0"
     self.helper = SpatialHelper(self, self.bucket_name)
     if not self.skip_rebalance:
         self.cluster.rebalance(self.servers[:], self.servers[1:], [])
     #load some items to verify
     self.docs = self.helper.insert_docs(self.num_items, 'spatial-doc',
                                         return_docs=True)
     self.ddocs = self.helper.create_default_views(
                                     is_one_ddoc=self.all_view_one_ddoc)
示例#18
0
class SpatialViewQueriesTests(BaseTestCase):

    def setUp(self):
        super(SpatialViewQueriesTests, self).setUp()
        self.thread_crashed = Event()
        self.thread_stopped = Event()
        self.skip_rebalance = self.input.param("skip_rebalance", False)
        self.use_dev_views = self.input.param("use-dev-views", False)
        self.all_view_one_ddoc = self.input.param("all-view-one-ddoc", False)
        self.default_ddoc_name = "test-ddoc-query"
        self.default_view_name = "test-view-query"
        self.params = self.get_query_params()
        self.bucket_name = "default"
        if self.standard_buckets:
            self.bucket_name = "standard_bucket0"
        if self.sasl_buckets:
            self.bucket_name = "bucket0"
        self.helper = SpatialHelper(self, self.bucket_name)
        if not self.skip_rebalance:
            self.cluster.rebalance(self.servers[:], self.servers[1:], [])
        #load some items to verify
        self.docs = self.helper.insert_docs(self.num_items, 'spatial-doc',
                                            return_docs=True)
        self.ddocs = self.helper.create_default_views(
                                        is_one_ddoc=self.all_view_one_ddoc)

    def tearDown(self):
        super(SpatialViewQueriesTests, self).tearDown()

    def test_spatial_view_queries(self):
        error = self.input.param('error', None)
        try:
            self.query_and_verify_result(self.docs, self.params)
        except Exception as ex:
            if error and str(ex).find(error) != -1:
               self.log.info("Error caught as expected %s" % error)
               return
            else:
               self.fail("Unexpected error appeared during run %s" % ex)
        if error:
            self.fail("Expected error '%s' didn't appear" % error)

    def test_add_spatial_view_queries_threads(self):
        diff_nodes = self.input.param("diff-nodes", False)
        query_threads = []
        for i in xrange(len(self.servers)):
            node = (self.master, self.servers[i])[diff_nodes]
            self.query_and_verify_result(self.docs, self.params, node=node)
            q_thread = Thread(target=self.query_and_verify_result,
                                   name="query_thread" + str(i),
                                   args=([self.docs, self.params, node]))
            query_threads.append(q_thread)
            q_thread.start()
        for q_thread in query_threads:
            q_thread.join()
        if self.thread_crashed.is_set():
            self.fail("Error occured during run")

    def test_view_queries_during_rebalance(self):
        start_cluster = self.input.param('start-cluster', 1)
        servers_in = self.input.param('servers_in', 0)
        servers_out = self.input.param('servers_out', 0)
        if start_cluster > 1:
            rebalance = self.cluster.async_rebalance(self.servers[:1],
                                                     self.servers[1:start_cluster], [])
            rebalance.result()
        servs_in = []
        servs_out = []
        if servers_in:
            servs_in = self.servers[start_cluster:servers_in + 1]
        if servers_out:
            if start_cluster > 1:
                servs_out = self.servers[1:start_cluster]
                servs_out = servs_out[-servers_out:]
            else:
                servs_out = self.servers[-servers_out:]
        rebalance = self.cluster.async_rebalance(self.servers, servs_in, servs_out)
        self.query_and_verify_result(self.docs, self.params)
        rebalance.result()

    def test_view_queries_node_pending_state(self):
        operation = self.input.param('operation', 'add_node')
        rest = RestConnection(self.master)
        if operation == 'add_node':
            self.log.info("adding the node %s:%s" % (
                        self.servers[1].ip, self.servers[1].port))
            otpNode = rest.add_node(self.master.rest_username, self.master.rest_password,
                                    self.servers[1].ip, self.servers[1].port)
        elif operation == 'failover':
            nodes = rest.node_statuses()
            nodes = [node for node in nodes
                     if node.ip != self.master.ip or node.port != self.master.port]
            rest.fail_over(nodes[0].id)
        else:
            self.fail("There is no operation %s" % operation)
        self.query_and_verify_result(self.docs, self.params)

    def test_view_queries_failover(self):
        num_nodes = self.input.param('num-nodes', 1)
        self.cluster.failover(self.servers,
                              self.servers[1:num_nodes])
        self.cluster.rebalance(self.servers, [], self.servers[1:num_nodes])
        self.query_and_verify_result(self.docs, self.params)

    def test_views_with_warm_up(self):
         warmup_node = self.servers[-1]
         shell = RemoteMachineShellConnection(warmup_node)
         shell.stop_couchbase()
         time.sleep(20)
         shell.start_couchbase()
         shell.disconnect()
         self.query_and_verify_result(self.docs, self.params)

    def test_view_queries_during_ddoc_compaction(self):
        fragmentation_value = self.input.param("fragmentation_value", 80)
        self.disable_compaction()
        fragmentation_monitor = self.cluster.async_monitor_view_fragmentation(self.master,
                             self.ddocs[0].name, fragmentation_value, self.default_bucket_name)
        end_time = time.time() + self.wait_timeout * 30
        while fragmentation_monitor.state != "FINISHED" and end_time > time.time():
            self.docs = self.helper.insert_docs(self.num_items, 'spatial-doc',
                                                return_docs=True)

        if end_time < time.time() and fragmentation_monitor.state != "FINISHED":
            self.fail("impossible to reach compaction value after %s sec" % (self.wait_timeout * 20))
        fragmentation_monitor.result()
        compaction_task = self.cluster.async_compact_view(self.master, self.ddocs[0].name,
                                                          self.default_bucket_name)
        self.query_and_verify_result(self.docs, self.params)
        result = compaction_task.result(self.wait_timeout * 10)
        self.assertTrue(result, "Compaction didn't finished correctly. Please check diags")

    def get_query_params(self):
        current_params = {}
        for key in self.input.test_params:
            if key == 'skip' or key == 'limit':
                current_params[key] = int(self.input.test_params[key])
            elif key == 'bbox':
                current_params[key] = [int(x) for x in
                                       self.input.test_params[key][1:-1].split(",")]
            elif key == 'stale':
                current_params[key] = self.input.test_params[key]
        return current_params

    def query_and_verify_result(self, doc_inserted, params, node=None):
        try:
            rest = RestConnection(self.master)
            if node:
                rest = RestConnection(node)
            expected_ddocs = self.helper.generate_matching_docs(doc_inserted, params)
            for ddoc in self.ddocs:
                for view in ddoc.spatial_views:
                    result_ddocs = self.helper.query_view(rest, ddoc, view,
                                                          bucket=self.bucket_name,
                                                          extra_params=params,
                                                          num_expected=len(expected_ddocs),
                                                          num_tries=20)
                    self.helper.verify_matching_keys(expected_ddocs, result_ddocs)
        except Exception as ex:
            self.thread_crashed.set()
            self.log.error("****ERROR***** \n At least one of threads is crashed: %s" % (ex))
            raise ex
        finally:
            if not self.thread_stopped.is_set():
                self.thread_stopped.set()
示例#19
0
class SpatialViewTests(unittest.TestCase):
    def setUp(self):
        self.log = logger.Logger.get_logger()
        self.helper = SpatialHelper(self, "default")
        self.helper.setup_cluster()


    def tearDown(self):
        self.helper.cleanup_cluster()


    def test_create_multiple_development_spatial(self):
        self.log.info("description : create multiple spatial views without "
                      "running any spatial view query")
        rest = self.helper.rest
        bucket = self.helper.bucket
        prefix = str(uuid.uuid4())
        name = "dev_test_spatial_multiple"

        design_names = ["{0}-{1}-{2}".format(name, i, prefix) \
                             for i in range(0, 5)]
        for design_name in design_names:
            self.helper.create_index_fun(design_name)
            response = rest.get_spatial(bucket, design_name)
            self.assertTrue(response)
            self.assertEquals(response["_id"],
                              "_design/{0}".format(design_name))
            self.log.info(response)


    def test_insert_x_docs(self):
        num_docs = self.helper.input.param("num-docs", 100)
        self.log.info("description : create a spatial view on {0} documents"\
                          .format(num_docs))
        design_name = "dev_test_insert_{0}_docs".format(num_docs)
        prefix = str(uuid.uuid4())[:7]

        inserted_keys = self._setup_index(design_name, num_docs, prefix)
        self.assertEqual(len(inserted_keys), num_docs)


    # Does verify the full docs and not only the keys
    def test_insert_x_docs_full_verification(self):
        num_docs = self.helper.input.param("num-docs", 100)
        self.log.info("description : create a spatial view with {0} docs"
                      " and verify the full documents".format(num_docs))
        design_name = "dev_test_insert_{0}_docs_full_verification"\
            .format(num_docs)
        prefix = str(uuid.uuid4())[:7]

        self.helper.create_index_fun(design_name)
        inserted_docs = self.helper.insert_docs(num_docs, prefix,
                                                return_docs=True)
        self.helper.query_index_for_verification(design_name, inserted_docs,
                                                 full_docs=True)


    def test_insert_x_delete_y_docs(self):
        num_docs = self.helper.input.param("num-docs", 15000)
        num_deleted_docs = self.helper.input.param("num-deleted-docs", 10000)
        self.log.info("description : create spatial view with {0} docs "
                      " and delete {1} docs".format(num_docs,
                                                    num_deleted_docs))
        design_name = "dev_test_insert_{0}_delete_{1}_docs"\
            .format(num_docs, num_deleted_docs)
        prefix = str(uuid.uuid4())[:7]

        inserted_keys = self._setup_index(design_name, num_docs, prefix)

        # Delete documents and very that the documents got deleted
        deleted_keys = self.helper.delete_docs(num_deleted_docs, prefix)
        results = self.helper.get_results(design_name, 2*num_docs)
        result_keys = self.helper.get_keys(results)
        self.assertEqual(len(result_keys), num_docs-len(deleted_keys))
        self.helper.verify_result(inserted_keys, deleted_keys + result_keys)


    def test_insert_x_update_y_docs(self):
        num_docs = self.helper.input.param("num-docs", 15000)
        num_updated_docs = self.helper.input.param("num-updated-docs", 100)
        self.log.info("description : create spatial view with {0} docs "
                      " and update {1} docs".format(num_docs,
                                                    num_updated_docs))
        design_name = "dev_test_insert_{0}_delete_{1}_docs"\
            .format(num_docs, num_updated_docs)
        prefix = str(uuid.uuid4())[:7]

        self._setup_index(design_name, num_docs, prefix)

        # Update documents and verify that the documents got updated
        updated_keys = self.helper.insert_docs(num_updated_docs, prefix,
                                               dict(updated=True))
        results = self.helper.get_results(design_name, 2*num_docs)
        result_updated_keys = self._get_updated_docs_keys(results)
        self.assertEqual(len(updated_keys), len(result_updated_keys))
        self.helper.verify_result(updated_keys, result_updated_keys)


    def test_get_spatial_during_x_min_load_y_working_set(self):
        num_docs = self.helper.input.param("num-docs", 10000)
        duration = self.helper.input.param("load-time", 1)
        self.log.info("description : this test will continuously insert data "
                      "and get the spatial view results for {0} minutes")
        design_name = "dev_test_insert_and_get_spatial_{0}_mins"\
            .format(duration)
        prefix = str(uuid.uuid4())[:7]

        self.helper.create_index_fun(design_name)

        self.docs_inserted = []
        self.shutdown_load_data = False
        load_thread = Thread(
            target=self._insert_data_till_stopped,
            args=(num_docs, prefix))
        load_thread.start()

        self._get_results_for_x_minutes(design_name, duration)

        self.shutdown_load_data = True
        load_thread.join()

        # self.docs_inserted was set by the insertion thread
        # (_insert_data_till_stopped)
        self.helper.query_index_for_verification(design_name,
                                                 self.docs_inserted)


    # Create the index and insert documents including verififaction that
    # the index contains them
    # Returns the keys of the inserted documents
    def _setup_index(self, design_name, num_docs, prefix):
        self.helper.create_index_fun(design_name)
        inserted_keys = self.helper.insert_docs(num_docs, prefix)
        self.helper.query_index_for_verification(design_name, inserted_keys)

        return inserted_keys


    # Return the keys for all docs that contain a key called "updated"
    # in the value
    def _get_updated_docs_keys(self, results):
        keys = []
        if results:
            rows = results["rows"]
            for row in rows:
                if "updated" in row["value"]:
                    keys.append(row["id"].encode("ascii", "ignore"))
            self.log.info("{0} documents to updated".format(len(keys)))
        return keys


    def _get_results_for_x_minutes(self, design_name, duration, delay=5):
        random.seed(0)
        start = time.time()
        while (time.time() - start) < duration * 60:
            limit = random.randint(1, 1000)
            self.log.info("{0} seconds has passed ....".format(
                    (time.time() - start)))
            results = self.helper.get_results(design_name, limit)
            keys = self.helper.get_keys(results)
            self.log.info("spatial view returned {0} rows".format(len(keys)))
            time.sleep(delay)

    def _insert_data_till_stopped(self, num_docs, prefix):
        while not self.shutdown_load_data:
            # Will be read after the function is terminated
            self.docs_inserted = self.helper.insert_docs(
                num_docs, prefix, wait_for_persistence=False)


    def test_x_docs_failover(self):
        num_docs = self.helper.input.param("num-docs", 10000)
        self.log.info("description : test failover with {0} documents"\
                          .format(num_docs))
        design_name = "dev_test_failover_{0}".format(num_docs)
        prefix = str(uuid.uuid4())[:7]

        fh = FailoverHelper(self.helper.servers, self)

        inserted_keys = self._setup_index(design_name, num_docs, prefix)
        failover_nodes = fh.failover(1)
        self.helper.query_index_for_verification(design_name, inserted_keys)

        # The test cleanup expects all nodes running, hence spin the
        # full cluster up again
        fh.undo_failover(failover_nodes)
示例#20
0
class SpatialViewsTests(BaseTestCase):

    def setUp(self):
        super(SpatialViewsTests, self).setUp()
        self.thread_crashed = Event()
        self.thread_stopped = Event()
        self.skip_rebalance = self.input.param("skip_rebalance", False)
        self.use_dev_views = self.input.param("use-dev-views", False)
        self.default_map = "function (doc) {emit(doc.geometry, doc.age);}"
        self.map_updated = "function (doc) {emit(doc.geometry, doc.name);}"
        self.default_ddoc_name = self.input.param("default_ddoc_name", "test-ddoc")
        self.default_view_name = self.input.param("default_view_name", "test-view")
        self.ddoc_op = self.input.param("ddoc-ops", "create") #create\update\delete
        self.bucket_name = "default"
        if self.standard_buckets:
            self.bucket_name = "standard_bucket0"
        if self.sasl_buckets:
            self.bucket_name = "bucket0"
        self.helper = SpatialHelper(self, self.bucket_name)
        if not self.skip_rebalance:
            self.cluster.rebalance(self.servers[:], self.servers[1:], [])
        #load some items to verify
        self.docs = self.helper.insert_docs(self.num_items, 'spatial-doc',
                                            return_docs=True)
        self.num_ddoc = self.input.param('num-ddoc', 1)
        self.views_per_ddoc = self.input.param('views-per-ddoc', 1)
        self.non_spatial_views_per_ddoc = self.input.param('non-spatial-views-per-ddoc', 0)
        if self.ddoc_op == 'update' or self.ddoc_op == 'delete':
            ddocs =  self.make_ddocs(self.num_ddoc, self.views_per_ddoc,
                                     self.non_spatial_views_per_ddoc)
            self.create_ddocs(ddocs)

    def tearDown(self):
        super(SpatialViewsTests, self).tearDown()

    def test_add_spatial_views(self):
        ddocs =  self.make_ddocs(self.num_ddoc, self.views_per_ddoc, self.non_spatial_views_per_ddoc)
        self.perform_ddoc_ops(ddocs)

    def test_add_spatial_views_case_sensative(self):
        ddoc = DesignDocument(self.default_ddoc_name, [], spatial_views=[
                                  View(self.default_view_name, self.default_map,
                                       dev_view=self.use_dev_views, is_spatial=True),
                                  View(self.default_view_name.upper(), self.default_map,
                                       dev_view=self.use_dev_views, is_spatial=True)])
        self.create_ddocs([ddoc])

    def test_add_single_spatial_view(self):
        name_lenght = self.input.param('name_lenght', None)
        view_name = self.input.param('view_name', self.default_view_name)
        if name_lenght:
            view_name = ''.join(random.choice(string.lowercase) for x in xrange(name_lenght))
        not_compilable = self.input.param('not_compilable', False)
        error = self.input.param('error', None)
        map_fn = (self.default_map, 'function (doc) {emit(doc.geometry, doc.age);')[not_compilable]

        ddoc = DesignDocument(self.default_ddoc_name, [], spatial_views=[
                                  View(view_name, map_fn,
                                  dev_view=self.use_dev_views, is_spatial=True)])
        try:
            self.create_ddocs([ddoc])
        except Exception as ex:
            if error and str(ex).find(error) != -1:
                self.log.info("Error caught as expected %s" % error)
                return
            else:
                self.fail("Unexpected error appeared during run %s" % ex)
        if error:
                self.fail("Expected error '%s' didn't appear" % error)

    def test_add_views_to_1_ddoc(self):
        same_names = self.input.param('same-name', False)
        error = self.input.param('error', None)
        num_views_per_ddoc = 10
        create_threads = []
        try:
            for i in xrange(num_views_per_ddoc):
                ddoc = DesignDocument(self.default_ddoc_name, [], spatial_views=[
                                      View(self.default_view_name + (str(i), "")[same_names],
                                           self.default_map,
                                           dev_view=self.use_dev_views, is_spatial=True)])
                create_thread = Thread(target=self.create_ddocs,
                                       name="create_thread" + str(i),
                                       args=([ddoc,],))
                create_threads.append(create_thread)
                create_thread.start()
            for create_thread in create_threads:
                create_thread.join()
        except Exception as ex:
            if error and str(ex).find(error) != -1:
               self.log.info("Error caught as expected %s" % error)
               return
            else:
               self.fail("Unexpected error appeared during run %s" % ex)
        if error:
            self.fail("Expected error '%s' didn't appear" % error)

    def test_add_spatial_views_threads(self):
        same_names = self.input.param('same-name', False)
        num_views_per_ddoc = 10
        create_threads = []
        ddocs = []
        for i in xrange(num_views_per_ddoc):
            ddoc = DesignDocument(self.default_ddoc_name + str(i), [], spatial_views=[
                                  View(self.default_view_name + (str(i), "")[same_names],
                                       self.default_map,
                                       dev_view=self.use_dev_views, is_spatial=True)])
            ddocs.append(ddoc)
        if self.ddoc_op == 'update' or self.ddoc_op == 'delete':
            self.create_ddocs(ddocs)
        i = 0
        for ddoc in ddocs:
            create_thread = Thread(target=self.perform_ddoc_ops,
                                   name="ops_thread" + str(i),
                                   args=([ddoc,],))
            i +=1
            create_threads.append(create_thread)
            create_thread.start()
        for create_thread in create_threads:
            create_thread.join()
        if self.thread_crashed.is_set():
            self.fail("Error occured during run")

    def test_create_with_other_ddoc_ops(self):
        operation = self.input.param('operation', 'create')
        ddocs =  self.make_ddocs(self.num_ddoc, self.views_per_ddoc, 0)
        other_ddocs = self.make_ddocs(self.num_ddoc, 0, self.views_per_ddoc)
        if operation == 'delete' or operation == 'update':
            self.create_ddocs(other_ddocs)
        other_ddoc_threads = []
        for ddoc in other_ddocs:
            if operation == 'create' or operation == 'update':
                other_ddoc_thread = Thread(target=self.create_ddocs,
                                           name="other_doc_thread",
                                           args=(other_ddocs,))
            else:
                other_ddoc_thread = Thread(target=self.delete_views,
                                           name="other_doc_thread",
                                           args=(other_ddocs,))
            other_ddoc_threads.append(other_ddoc_thread)
            other_ddoc_thread.start()
        self.perform_ddoc_ops(ddocs)
        for thread in other_ddoc_threads:
            thread.join()

    def test_create_views_during_rebalance(self):
        start_cluster = self.input.param('start-cluster', 1)
        servers_in = self.input.param('servers_in', 0)
        servers_out = self.input.param('servers_out', 0)
        ddocs =  self.make_ddocs(self.num_ddoc, self.views_per_ddoc, self.non_spatial_views_per_ddoc)
        if start_cluster > 1:
            rebalance = self.cluster.async_rebalance(self.servers[:1],
                                                     self.servers[1:start_cluster], [])
            rebalance.result()
        servs_in = []
        servs_out = []
        if servers_in:
            servs_in = self.servers[start_cluster:servers_in + 1]
        if servers_out:
            if start_cluster > 1:
                servs_out = self.servers[1:start_cluster]
                servs_out = servs_out[-servers_out:]
            else:
                servs_out = self.servers[-servers_out:]
        rebalance_thread = Thread(target=self.cluster.rebalance,
                                           name="reb_thread",
                                           args=(self.servers[:1], servs_in, servs_out))
        rebalance_thread.start()
        self.perform_ddoc_ops(ddocs)
        rebalance_thread.join()

    def test_views_node_pending_state(self):
        operation = self.input.param('operation', 'add_node')
        ddocs =  self.make_ddocs(self.num_ddoc, self.views_per_ddoc, 0)
        rest = RestConnection(self.master)
        if operation == 'add_node':
            self.log.info("adding the node %s:%s" % (
                        self.servers[1].ip, self.servers[1].port))
            otpNode = rest.add_node(self.master.rest_username, self.master.rest_password,
                                    self.servers[1].ip, self.servers[1].port)
        elif operation == 'failover':
            nodes = rest.node_statuses()
            nodes = [node for node in nodes
                     if node.ip != self.master.ip or node.port != self.master.port]
            rest.fail_over(nodes[0].id)
        else:
            self.fail("There is no operation %s" % operation)
        self.perform_ddoc_ops(ddocs)

    def test_views_failover(self):
        num_nodes = self.input.param('num-nodes', 1)
        ddocs =  self.make_ddocs(self.num_ddoc, self.views_per_ddoc, 0)
        RebalanceHelper.wait_for_persistence(self.master, self.bucket_name)
        self.cluster.failover(self.servers,
                              self.servers[1:num_nodes])
        self.cluster.rebalance(self.servers, [], self.servers[1:num_nodes])
        self.perform_ddoc_ops(ddocs)

    def test_views_with_warm_up(self):
        warmup_node = self.servers[-1]
        shell = RemoteMachineShellConnection(warmup_node)
        shell.stop_couchbase()
        time.sleep(20)
        shell.start_couchbase()
        shell.disconnect()
        ddocs =  self.make_ddocs(self.num_ddoc, self.views_per_ddoc, 0)
        self.perform_ddoc_ops(ddocs)

    def test_views_during_index(self):
        ddocs =  self.make_ddocs(1, 1, 1)
        self.create_ddocs(ddocs)
        #run query stale=false to start index
        rest = RestConnection(self.master)
        for ddoc in ddocs:
            for view in ddoc.spatial_views:
                self.helper.query_view(rest, ddoc, view, bucket=self.bucket_name, extra_params={})
        ddocs =  self.make_ddocs(self.num_ddoc, self.views_per_ddoc, 1)
        self.perform_ddoc_ops(ddocs)

    def test_views_during_ddoc_compaction(self):
        fragmentation_value = self.input.param("fragmentation_value", 80)
        ddoc_to_compact = DesignDocument("ddoc_to_compact", [], spatial_views=[
                                  View(self.default_view_name,
                                       'function (doc) { emit(doc.age, doc.name);}',
                                       dev_view=self.use_dev_views)])
        ddocs =  self.make_ddocs(self.num_ddoc, self.views_per_ddoc, 0)
        self.disable_compaction()
        self.create_ddocs([ddoc_to_compact,])
        fragmentation_monitor = self.cluster.async_monitor_view_fragmentation(self.master,
                             ddoc_to_compact.name, fragmentation_value, self.default_bucket_name)
        end_time = time.time() + self.wait_timeout * 30
        while fragmentation_monitor.state != "FINISHED" and end_time > time.time():
            self.helper.insert_docs(self.num_items, 'spatial-doc')

        if end_time < time.time() and fragmentation_monitor.state != "FINISHED":
            self.fail("impossible to reach compaction value after %s sec" % (self.wait_timeout * 20))
        fragmentation_monitor.result()
        compaction_task = self.cluster.async_compact_view(self.master, ddoc_to_compact.name,
                                                          self.default_bucket_name)
        self.perform_ddoc_ops(ddocs)
        result = compaction_task.result(self.wait_timeout * 10)
        self.assertTrue(result, "Compaction didn't finished correctly. Please check diags")

    def make_ddocs(self, ddocs_num, views_per_ddoc, non_spatial_views_per_ddoc):
        ddocs = []
        for i in xrange(ddocs_num):
            views = []
            for k in xrange(views_per_ddoc):
                views.append(View(self.default_view_name + str(k), self.default_map,
                                  dev_view=self.use_dev_views, is_spatial=True))
            non_spatial_views = []
            if non_spatial_views_per_ddoc:
                for k in xrange(non_spatial_views_per_ddoc):
                    non_spatial_views.append(View(self.default_view_name + str(k), 'function (doc) { emit(null, doc);}',
                                      dev_view=self.use_dev_views))
            ddocs.append(DesignDocument(self.default_ddoc_name + str(i), non_spatial_views, spatial_views=views))
        return ddocs

    def create_ddocs(self, ddocs, bucket=None):
        bucket_views = bucket or self.buckets[0]
        for ddoc in ddocs:
            if not (ddoc.views or ddoc.spatial_views):
                self.cluster.create_view(self.master, ddoc.name, [], bucket=bucket_views)
            for view in ddoc.views:
                self.cluster.create_view(self.master, ddoc.name, view, bucket=bucket_views)
            for view in ddoc.spatial_views:
                self.cluster.create_view(self.master, ddoc.name, view, bucket=bucket_views)

    def delete_views(self, ddocs, views=[], spatial_views=[], bucket=None):
        bucket_views = bucket or self.buckets[0]
        for ddoc in ddocs:
            vs = views or ddoc.views
            sp_vs = spatial_views or ddoc.spatial_views
            for view in vs:
                self.cluster.delete_view(self.master, ddoc.name, view, bucket=bucket_views)
            for view in sp_vs:
                self.cluster.delete_view(self.master, ddoc.name, view, bucket=bucket_views)

    def perform_ddoc_ops(self, ddocs):
        try:
            if self.ddoc_op == 'update':
                for ddoc in ddocs:
                    for view in ddoc.spatial_views:
                        view.map_func = self.map_updated
            if self.ddoc_op == 'delete':
                self.delete_views(ddocs)
            else:
                self.create_ddocs(ddocs)
        except Exception as ex:
            self.thread_crashed.set()
            self.log.error("****ERROR***** \n At least one of threads is crashed: %s" % (ex))
            raise ex
        finally:
            if not self.thread_stopped.is_set():
                self.thread_stopped.set()
示例#21
0
class SpatialInfoTests(unittest.TestCase):
    def setUp(self):
        self.log = logger.Logger.get_logger()
        self.helper = SpatialHelper(self, "default")
        self.helper.setup_cluster()

    def tearDown(self):
        self.helper.cleanup_cluster()

    def test_spatial_info(self):
        self.log.info("description : test info for spatial indexes")
        rest = self.helper.rest
        prefix = str(uuid.uuid4())[:7]
        design_name = "dev_test_spatial_info"

        self.helper.create_index_fun(design_name, prefix)

        # Fill the database and add an index
        self.helper.insert_docs(2000, prefix)
        self.helper.get_results(design_name)
        status, info = self.helper.info(design_name)
        disk_size = info["spatial_index"]["disk_size"]

        self.assertTrue(disk_size > 0)
        self.assertEqual(info["name"], design_name)

        num_vbuckets = len(rest.get_vbuckets(self.helper.bucket))
        self.assertEqual(len(info["spatial_index"]["update_seq"]),
                         num_vbuckets)
        self.assertEqual(len(info["spatial_index"]["purge_seq"]), num_vbuckets)
        self.assertFalse(info["spatial_index"]["updater_running"])
        self.assertFalse(info["spatial_index"]["waiting_clients"] > 0)
        self.assertFalse(info["spatial_index"]["compact_running"])

        # Insert a lot new documents, and return after starting to
        # build up (not waiting until it's done) the index to test
        # if the updater fields are set correctly
        self.helper.insert_docs(50000, prefix)
        self.helper.get_results(design_name,
                                extra_params={"stale": "update_after"})
        # Somehow stale=update_after doesn't really return immediately,
        # thus commenting this assertion out. There's no real reason
        # to investigate, as the indexing changes heavily in the moment
        # anyway
        #self.assertTrue(info["spatial_index"]["updater_running"])
        #self.assertTrue(info["spatial_index"]["waiting_commit"])
        #self.assertTrue(info["spatial_index"]["waiting_clients"] > 0)
        self.assertFalse(info["spatial_index"]["compact_running"])

        # Request the index again, to make sure it is fully updated
        self.helper.get_results(design_name)
        status, info = self.helper.info(design_name)
        self.assertFalse(info["spatial_index"]["updater_running"])
        self.assertFalse(info["spatial_index"]["waiting_clients"] > 0)
        self.assertFalse(info["spatial_index"]["compact_running"])
        self.assertTrue(info["spatial_index"]["disk_size"] > disk_size)
示例#22
0
class SpatialCompactionTests(unittest.TestCase):
    def setUp(self):
        self.log = logger.Logger.get_logger()
        self.helper = SpatialHelper(self, "default")
        self.helper.setup_cluster()

    def tearDown(self):
        self.helper.cleanup_cluster()

    def test_spatial_compaction(self):
        self.log.info(
            "description : test manual compaction for spatial indexes")
        prefix = str(uuid.uuid4())[:7]
        design_name = "dev_test_spatial_compaction"

        self.helper.create_index_fun(design_name, prefix)

        # Insert (resp. update, as they have the same prefix) and query
        # the spatial index several time so that the compaction makes sense
        for i in range(0, 8):
            self.helper.insert_docs(2000, prefix)
            self.helper.get_results(design_name)

        # Get the index size prior to compaction
        status, info = self.helper.info(design_name)
        disk_size = info["spatial_index"]["disk_size"]

        # Do the compaction
        self.helper.compact(design_name)

        # Check if the index size got smaller
        status, info = self.helper.info(design_name)
        self.assertTrue(
            info["spatial_index"]["disk_size"] < disk_size,
            "The file size ({0}) isn't smaller than the "
            "pre compaction size ({1}).".format(
                info["spatial_index"]["disk_size"], disk_size))
示例#23
0
class SpatialCompactionTests(unittest.TestCase):
    def setUp(self):
        self.log = logger.Logger.get_logger()
        self.helper = SpatialHelper(self, "default")
        self.helper.setup_cluster()


    def tearDown(self):
        self.helper.cleanup_cluster()


    def test_spatial_compaction(self):
        self.log.info(
            "description : test manual compaction for spatial indexes")
        rest = self.helper.rest
        prefix = str(uuid.uuid4())[:7]
        design_name = "dev_test_spatial_compaction"

        self.helper.create_index_fun(design_name, prefix)

        # Insert (resp. update, as they have the same prefix) and query
        # the spatial index several time so that the compaction makes sense
        for i in range(0, 8):
            doc_names = self.helper.insert_docs(2000, prefix)
            self.helper.get_results(design_name)

        # Get the index size prior to compaction
        status, info = self.helper.info(design_name)
        disk_size = info["spatial_index"]["disk_size"]

        # Do the compaction
        self.helper.compact(design_name)

        # Check if the index size got smaller
        status, info = self.helper.info(design_name)
        self.assertTrue(info["spatial_index"]["disk_size"] < disk_size,
                        "The file size ({0}) isn't smaller than the "
                        "pre compaction size ({1})."
                        .format(info["spatial_index"]["disk_size"],
                                disk_size))
class SpatialRebalanceTests(unittest.TestCase):
    def setUp(self):
        self.log = logger.Logger.get_logger()
        self.helper = SpatialHelper(self, "default")
        # Setup, but don't rebalance cluster
        self.helper.setup_cluster(False)


    def tearDown(self):
        self.log.info("tear down test")
        self.helper.cleanup_cluster()


    def test_insert_x_delete_y_docs_create_cluster(self):
        num_docs = self.helper.input.param("num-docs", 100000)
        num_deleted_docs = self.helper.input.param("num-deleted-docs", 10000)
        msg = "description : have a single node, insert {0} docs, "\
            "delete {1} docs while creating a cluster and query it"
        self.log.info(msg.format(num_docs, num_deleted_docs))
        design_name = "dev_test_delete_10k_docs_create_cluster"
        prefix = str(uuid.uuid4())[:7]

        # Make sure we are fully de-clustered
        ClusterOperationHelper.cleanup_cluster(self.helper.servers)

        self.helper.create_index_fun(design_name, prefix)
        inserted_keys = self.helper.insert_docs(num_docs, prefix)

        # Start creating the cluster and rebalancing it without waiting until
        # it's finished
        ClusterOperationHelper.add_and_rebalance(self.helper.servers, False)

        deleted_keys = self.helper.delete_docs(num_deleted_docs, prefix)
        self._wait_for_rebalance()

        # Verify that the docs got delete and are no longer part of the
        # spatial view
        results = self.helper.get_results(design_name, num_docs)
        result_keys = self.helper.get_keys(results)
        self.assertEqual(len(result_keys), num_docs - len(deleted_keys))
        self.helper.verify_result(inserted_keys, deleted_keys + result_keys)


    def test_insert_x_delete_y_docs_destroy_cluster(self):
        num_docs = self.helper.input.param("num-docs", 100000)
        num_deleted_docs = self.helper.input.param("num-deleted-docs", 10000)
        msg = "description : have a cluster, insert {0} docs, delete "\
            "{1} docs while destroying the cluster into a single node "\
            "and query it"
        self.log.info(msg.format(num_docs, num_deleted_docs))
        design_name = "dev_test_delete_{0}_docs_destroy_cluster".format(
            num_deleted_docs)
        prefix = str(uuid.uuid4())[:7]

        # Make sure we are fully clustered
        ClusterOperationHelper.add_and_rebalance(self.helper.servers)

        self.helper.create_index_fun(design_name, prefix)
        inserted_keys = self.helper.insert_docs(num_docs, prefix)

        # Start destroying the cluster and rebalancing it without waiting
        # until it's finished
        ClusterOperationHelper.cleanup_cluster(self.helper.servers,
                                                    False)

        deleted_keys = self.helper.delete_docs(num_deleted_docs, prefix)
        self._wait_for_rebalance()

        # Verify that the docs got delete and are no longer part of the
        # spatial view
        results = self.helper.get_results(design_name, num_docs)
        result_keys = self.helper.get_keys(results)
        self.assertEqual(len(result_keys), num_docs - len(deleted_keys))
        self.helper.verify_result(inserted_keys, deleted_keys + result_keys)


    def test_insert_x_docs_during_rebalance(self):
        num_docs = self.helper.input.param("num-docs", 100000)
        msg = "description : have a single node, insert {0} docs, "\
            "query it, add another node, start rebalancing, insert {0} "\
            "docs, finish rebalancing, keep on adding nodes..."
        self.log.info(msg.format(num_docs))
        design_name = "dev_test_insert_{0}_docs_during_rebalance".format(
            num_docs)
        prefix = str(uuid.uuid4())[:7]

        # Make sure we are fully de-clustered
        ClusterOperationHelper.cleanup_cluster(self.helper.servers)

        self.helper.create_index_fun(design_name)
        inserted_keys = self.helper.insert_docs(num_docs, prefix)

        # Add all servers to the master server one by one and start
        # rebalacing
        for server in self.helper.servers[1:]:
            ClusterOperationHelper.add_and_rebalance(
                [self.helper.master, server], False)
            # Docs with the same prefix are overwritten and not newly created
            prefix = str(uuid.uuid4())[:7]
            inserted_keys.extend(self.helper.insert_docs(
                    num_docs, prefix, wait_for_persistence=False))
            self._wait_for_rebalance()

        # Make sure data is persisted
        self.helper.wait_for_persistence()

        # Verify that all documents got inserted
        self.helper.query_index_for_verification(design_name, inserted_keys)


    # Block until the rebalance is done
    def _wait_for_rebalance(self):
        self.assertTrue(self.helper.rest.monitorRebalance(),
                        "rebalance operation failed after adding nodes")
        self.log.info("rebalance finished")
示例#25
0
class SpatialQueryTests(unittest.TestCase):
    def setUp(self):
        self.log = logger.Logger.get_logger()
        self.helper = SpatialHelper(self, "default")
        self.helper.setup_cluster()

    def tearDown(self):
        self.helper.cleanup_cluster()

    def test_simple_dataset_limit_queries(self):
        num_docs = self.helper.input.param("num-docs", 1000)
        self.log.info("description : Make limit queries on a simple " "dataset with {0} docs".format(num_docs))

        data_set = SimpleDataSet(self.helper, num_docs)
        data_set.add_limit_queries()
        self._query_test_init(data_set)

    def test_simple_dataset_skip_queries(self):
        num_docs = self.helper.input.param("num-docs", 1000)
        self.log.info(
            "description : Make skip (and limit) queries on a " "simple dataset with {0} docs".format(num_docs)
        )

        data_set = SimpleDataSet(self.helper, num_docs)
        data_set.add_skip_queries()
        self._query_test_init(data_set)

    def test_simple_dataset_bbox_queries(self):
        num_docs = self.helper.input.param("num-docs", 1000)
        self.log.info("description : Make bounding box queries on a simple " "dataset with {0} docs".format(num_docs))

        data_set = SimpleDataSet(self.helper, num_docs)
        data_set.add_bbox_queries()
        self._query_test_init(data_set)

    ###
    # load the data defined for this dataset.
    # create views and query the data as it loads.
    # verification is optional, and best practice is to
    # set to False if you plan on running _query_all_views()
    # later in the test case
    ###
    def _query_test_init(self, data_set, verify_results=True):
        views = data_set.views

        # start loading data
        t = Thread(target=data_set.load, name="load_data_set", args=())
        t.start()

        # run queries while loading data
        while t.is_alive():
            self._query_all_views(views, False)
            time.sleep(5)
        t.join()

        # results will be verified if verify_results set
        if verify_results:
            self._query_all_views(views, verify_results)
        else:
            self._check_view_intergrity(views)

    ##
    # run all queries for all views in parallel
    ##
    def _query_all_views(self, views, verify_results=True):

        query_threads = []
        for view in views:
            t = RunQueriesThread(view, verify_results)
            query_threads.append(t)
            t.start()

        [t.join() for t in query_threads]

        self._check_view_intergrity(query_threads)

    ##
    # If an error occured loading or querying data for a view
    # it is queued and checked here. Fail on the first one that
    # occurs.
    ##
    def _check_view_intergrity(self, thread_results):
        for result in thread_results:
            if result.test_results.errors:
                self.fail(result.test_results.errors[0][1])
            if result.test_results.failures:
                self.fail(result.test_results.failures[0][1])
示例#26
0
class SpatialViewTests(unittest.TestCase):
    def setUp(self):
        self.log = logger.Logger.get_logger()
        self.helper = SpatialHelper(self, "default")
        self.helper.setup_cluster()


    def tearDown(self):
        self.helper.cleanup_cluster()


    def test_create_x_design_docs(self):
        num_design_docs = self.helper.input.param("num-design-docs", 5)
        self.log.info("description : create {0} spatial views without "
                      "running any spatial view query".format(num_design_docs))
        prefix = str(uuid.uuid4())

        fun = "function (doc) {emit(doc.geometry, doc);}"
        self._insert_x_design_docs(num_design_docs, prefix, fun)


    def test_update_x_design_docs(self):
        num_design_docs = self.helper.input.param("num-design-docs", 5)
        self.log.info("description : update {0} spatial views without "
                      "running any spatial view query".format(num_design_docs))
        prefix = str(uuid.uuid4())

        fun = "function (doc) {emit(doc.geometry, doc);}"
        self._insert_x_design_docs(num_design_docs, prefix, fun)

        # Update the design docs with a different function
        fun = "function (doc) {emit(doc.geometry, null);}"
        self._insert_x_design_docs(num_design_docs, prefix, fun)


    def _insert_x_design_docs(self, num_design_docs, prefix, fun):
        rest = self.helper.rest
        bucket = self.helper.bucket
        name = "dev_test_multiple_design_docs"

        for i in range(0, num_design_docs):
            design_name = "{0}-{1}-{2}".format(name, i, prefix)
            self.helper.create_index_fun(design_name, prefix, fun)

            # Verify that the function was really stored
            response, meta = rest.get_spatial(bucket, design_name)
            self.assertTrue(response)
            self.assertEquals(meta["id"],
                              "_design/{0}".format(design_name))
            self.assertEquals(
                response["spatial"][design_name].encode("ascii",
                                                                "ignore"),
                fun)


    def test_insert_x_docs(self):
        num_docs = self.helper.input.param("num-docs", 100)
        self.log.info("description : create a spatial view on {0} documents"\
                          .format(num_docs))
        design_name = "dev_test_insert_{0}_docs".format(num_docs)
        self._insert_x_docs_and_query(num_docs, design_name)


    # Does verify the full docs and not only the keys
    def test_insert_x_docs_full_verification(self):
        num_docs = self.helper.input.param("num-docs", 100)
        self.log.info("description : create a spatial view with {0} docs"
                      " and verify the full documents".format(num_docs))
        design_name = "dev_test_insert_{0}_docs_full_verification"\
            .format(num_docs)
        prefix = str(uuid.uuid4())[:7]

        self.helper.create_index_fun(design_name, prefix)
        inserted_docs = self.helper.insert_docs(num_docs, prefix,
                                                return_docs=True)
        self.helper.query_index_for_verification(design_name, inserted_docs,
                                                 full_docs=True)


    def test_insert_x_delete_y_docs(self):
        num_docs = self.helper.input.param("num-docs", 15000)
        num_deleted_docs = self.helper.input.param("num-deleted-docs", 10000)
        self.log.info("description : create spatial view with {0} docs "
                      " and delete {1} docs".format(num_docs,
                                                    num_deleted_docs))
        design_name = "dev_test_insert_{0}_delete_{1}_docs"\
            .format(num_docs, num_deleted_docs)
        prefix = str(uuid.uuid4())[:7]

        inserted_keys = self._setup_index(design_name, num_docs, prefix)

        # Delete documents and verify that the documents got deleted
        deleted_keys = self.helper.delete_docs(num_deleted_docs, prefix)
        num_expected = num_docs - len(deleted_keys)
        results = self.helper.get_results(design_name, 2 * num_docs,
                                          num_expected=num_expected)
        result_keys = self.helper.get_keys(results)
        self.assertEqual(len(result_keys), num_expected)
        self.helper.verify_result(inserted_keys, deleted_keys + result_keys)


    def test_insert_x_update_y_docs(self):
        num_docs = self.helper.input.param("num-docs", 15000)
        num_updated_docs = self.helper.input.param("num-updated-docs", 100)
        self.log.info("description : create spatial view with {0} docs "
                      " and update {1} docs".format(num_docs,
                                                    num_updated_docs))
        design_name = "dev_test_insert_{0}_delete_{1}_docs"\
            .format(num_docs, num_updated_docs)
        prefix = str(uuid.uuid4())[:7]

        self._setup_index(design_name, num_docs, prefix)

        # Update documents and verify that the documents got updated
        updated_keys = self.helper.insert_docs(num_updated_docs, prefix,
                                               dict(updated=True))
        results = self.helper.get_results(design_name, 2 * num_docs)
        result_updated_keys = self._get_updated_docs_keys(results)
        self.assertEqual(len(updated_keys), len(result_updated_keys))
        self.helper.verify_result(updated_keys, result_updated_keys)


    def test_get_spatial_during_x_min_load_y_working_set(self):
        num_docs = self.helper.input.param("num-docs", 10000)
        duration = self.helper.input.param("load-time", 1)
        self.log.info("description : this test will continuously insert data "
                      "and get the spatial view results for {0} minutes")
        design_name = "dev_test_insert_and_get_spatial_{0}_mins"\
            .format(duration)
        prefix = str(uuid.uuid4())[:7]

        self._query_x_mins_during_loading(num_docs, duration, design_name,
                                         prefix)

    def _query_x_mins_during_loading(self, num_docs, duration, design_name, prefix):
        self.helper.create_index_fun(design_name, prefix)

        load_thread = InsertDataTillStopped(self.helper, num_docs, prefix)
        load_thread.start()

        self._get_results_for_x_minutes(design_name, duration)

        load_thread.stop_insertion()
        load_thread.join()

        self.helper.query_index_for_verification(design_name,
                                                 load_thread.inserted())

    def test_get_spatial_during_x_min_load_y_working_set_multiple_design_docs(
        self):
        num_docs = self.helper.input.param("num-docs", 10000)
        num_design_docs = self.helper.input.param("num-design-docs", 10)
        duration = self.helper.input.param("load-time", 1)
        self.log.info("description : will create {0} docs per design doc and "
                      "{1} design docs that will be queried while the data "
                      "is loaded for {2} minutes"
                      .format(num_docs, num_design_docs, duration))
        name = "dev_test_spatial_test_{0}_docs_{1}_design_docs_{2}_mins_load"\
            .format(num_docs, num_design_docs, duration)

        view_test_threads = []
        for i in range(0, num_design_docs):
            prefix = str(uuid.uuid4())[:7]
            design_name = "{0}-{1}-{2}".format(name, i, prefix)
            thread_result = []
            t = Thread(
                target=SpatialViewTests._test_multiple_design_docs_thread_wrapper,
                name="Insert documents and query multiple design docs in parallel",
                args=(self, num_docs, duration, design_name, prefix,
                      thread_result))
            t.start()
            view_test_threads.append((t, thread_result))
        for (t, failures) in view_test_threads:
            t.join()
        for (t, failures) in view_test_threads:
            if len(failures) > 0:
                self.fail("view thread failed : {0}".format(failures[0]))

    def _test_multiple_design_docs_thread_wrapper(self, num_docs, duration,
                                                  design_name, prefix,
                                                  failures):
        try:
            self._query_x_mins_during_loading(num_docs, duration, design_name,
                                              prefix)
        except Exception as ex:
            failures.append(ex)


    def test_spatial_view_on_x_docs_y_design_docs(self):
        num_docs = self.helper.input.param("num-docs", 10000)
        num_design_docs = self.helper.input.param("num-design-docs", 21)
        self.log.info("description : will create {0} docs per design doc and "
                      "{1} design docs that will be queried")
        name = "dev_test_spatial_test_{0}_docs_y_design_docs"\
            .format(num_docs, num_design_docs)
        prefix = str(uuid.uuid4())[:7]

        design_names = ["{0}-{1}-{2}".format(name, i, prefix) \
                            for i in range(0, num_design_docs)]

        view_test_threads = []
        for design_name in design_names:
            thread_result = []
            t = Thread(
                target=SpatialViewTests._test_spatial_view_thread_wrapper,
                name="Insert documents and query in parallel",
                args=(self, num_docs, design_name, thread_result))
            t.start()
            view_test_threads.append((t, thread_result))
        for (t, failures) in view_test_threads:
            t.join()
        for (t, failures) in view_test_threads:
            if len(failures) > 0:
                self.fail("view thread failed : {0}".format(failures[0]))


    def _test_spatial_view_thread_wrapper(self, num_docs, design_name,
                                          failures):
        try:
            self._insert_x_docs_and_query(num_docs, design_name)
        except Exception as ex:
            failures.append(ex)


    # Create the index and insert documents including verififaction that
    # the index contains them
    # Returns the keys of the inserted documents
    def _setup_index(self, design_name, num_docs, prefix):
        self.helper.create_index_fun(design_name, prefix)
        inserted_keys = self.helper.insert_docs(num_docs, prefix)
        self.helper.query_index_for_verification(design_name, inserted_keys)

        return inserted_keys


    # Return the keys for all docs that contain a key called "updated"
    # in the value
    def _get_updated_docs_keys(self, results):
        keys = []
        if results:
            rows = results["rows"]
            for row in rows:
                if "updated" in row["value"]:
                    keys.append(row["id"].encode("ascii", "ignore"))
            self.log.info("{0} documents to updated".format(len(keys)))
        return keys


    def _get_results_for_x_minutes(self, design_name, duration, delay=5):
        random.seed(0)
        start = time.time()
        while (time.time() - start) < duration * 60:
            limit = random.randint(1, 1000)
            self.log.info("{0} seconds has passed ....".format(
                    (time.time() - start)))
            results = self.helper.get_results(design_name, limit)
            keys = self.helper.get_keys(results)
            self.log.info("spatial view returned {0} rows".format(len(keys)))
            time.sleep(delay)


    def _insert_x_docs_and_query(self, num_docs, design_name):
        prefix = str(uuid.uuid4())[:7]

        inserted_keys = self._setup_index(design_name, num_docs, prefix)
        self.assertEqual(len(inserted_keys), num_docs)


    def test_x_docs_failover(self):
        num_docs = self.helper.input.param("num-docs", 10000)
        self.log.info("description : test failover with {0} documents"\
                          .format(num_docs))
        design_name = "dev_test_failover_{0}".format(num_docs)
        prefix = str(uuid.uuid4())[:7]
        inserted_keys = self._setup_index(design_name, num_docs, prefix)
        try:
            fh = FailoverHelper(self.helper.servers, self)

            failover_nodes = fh.failover(1)
            self.helper.query_index_for_verification(design_name, inserted_keys,
                                                 wait_for_persistence=False)

            # The test cleanup expects all nodes running, hence spin the
            # full cluster up again
            fh.undo_failover(failover_nodes)
        finally:
            fh._start_servers(failover_nodes)


    def test_update_view_x_docs(self):
        num_docs = self.helper.input.param("num-docs", 100)
        self.log.info("description : create a spatial view on {0} documents "
                      "and update the view so that it returns only a subset"\
                          .format(num_docs))
        design_name = "dev_test_update_view_{0}_docs".format(num_docs)
        prefix = str(uuid.uuid4())[:7]

        # Create an index that emits all documents
        self.helper.create_index_fun(design_name, prefix)
        keys_b = self.helper.insert_docs(num_docs / 3, prefix + "bbb")
        keys_c = self.helper.insert_docs(num_docs - (num_docs / 3), prefix + "ccc")
        self.helper.query_index_for_verification(design_name, keys_b + keys_c)

        # Update index to only a subset of the documents
        self.helper.create_index_fun(design_name, prefix + "ccc")
        self.helper.query_index_for_verification(design_name, keys_c)


    def test_compare_views_all_nodes_x_docs(self):
        num_docs = self.helper.input.param("num-docs", 100)
        self.log.info("description : creates view on {0} documents, queries "
                      "all nodes (not only the master node) and compares "
                      "if the results are all the same"\
                          .format(num_docs))
        design_name = "dev_test_compare_views_{0}_docs".format(num_docs)
        prefix = str(uuid.uuid4())[:7]

        inserted_keys = self._setup_index(design_name, num_docs, prefix)

        nodes = self.helper.rest.get_nodes()
        params = {"connection_timeout": 60000, "full_set": True}

        # Query every single node and verify
        for n in nodes:
            n_rest = RestConnection({
                    "ip": n.ip,
                    "port": n.port,
                    "username": self.helper.master.rest_username,
                    "password": self.helper.master.rest_password})
            results = n_rest.spatial_results(self.helper.bucket, design_name,
                                             params, None)
            result_keys = self.helper.get_keys(results)
            self.helper.verify_result(inserted_keys, result_keys)
示例#27
0
    def setUp(self):
        self.helper = SpatialHelper(self, "default")
        super(SpatialViewTests, self).setUp()
        self.log = logger.Logger.get_logger()

        self.helper.setup_cluster()
示例#28
0
class SpatialQueryTests(unittest.TestCase):
    def setUp(self):
        self.log = logger.Logger.get_logger()
        self.helper = SpatialHelper(self, "default")
        self.helper.setup_cluster()

    def tearDown(self):
        self.helper.cleanup_cluster()

    def test_simple_dataset_limit_queries(self):
        num_docs = self.helper.input.param("num-docs", 1000)
        self.log.info("description : Make limit queries on a simple "
                      "dataset with {0} docs".format(num_docs))

        data_set = SimpleDataSet(self.helper, num_docs)
        data_set.add_limit_queries()
        self._query_test_init(data_set)

    def test_simple_dataset_skip_queries(self):
        num_docs = self.helper.input.param("num-docs", 1000)
        self.log.info("description : Make skip (and limit) queries on a "
                      "simple dataset with {0} docs".format(num_docs))

        data_set = SimpleDataSet(self.helper, num_docs)
        data_set.add_skip_queries()
        self._query_test_init(data_set)

    def test_simple_dataset_bbox_queries(self):
        num_docs = self.helper.input.param("num-docs", 1000)
        self.log.info("description : Make bounding box queries on a simple "
                      "dataset with {0} docs".format(num_docs))

        data_set = SimpleDataSet(self.helper, num_docs)
        data_set.add_bbox_queries()
        self._query_test_init(data_set)

    ###
    # load the data defined for this dataset.
    # create views and query the data as it loads.
    # verification is optional, and best practice is to
    # set to False if you plan on running _query_all_views()
    # later in the test case
    ###
    def _query_test_init(self, data_set, verify_results = True):
        views = data_set.views

        # start loading data
        t = Thread(target=data_set.load,
                   name="load_data_set",
                   args=())
        t.start()

        # run queries while loading data
        while(t.is_alive()):
            self._query_all_views(views, False)
            time.sleep(5)
        t.join()

        # results will be verified if verify_results set
        if verify_results:
            self._query_all_views(views, verify_results)
        else:
            self._check_view_intergrity(views)


    ##
    # run all queries for all views in parallel
    ##
    def _query_all_views(self, views, verify_results = True):

        query_threads = []
        for view in views:
            t = RunQueriesThread(view, verify_results)
            query_threads.append(t)
            t.start()

        [t.join() for t in query_threads]

        self._check_view_intergrity(query_threads)

    ##
    # If an error occured loading or querying data for a view
    # it is queued and checked here. Fail on the first one that
    # occurs.
    ##
    def _check_view_intergrity(self, thread_results):
        for result in thread_results:
            if result.test_results.errors:
                self.fail(result.test_results.errors[0][1])
            if result.test_results.failures:
                self.fail(result.test_results.failures[0][1])
class SpatialRebalanceTests(unittest.TestCase):
    def setUp(self):
        self.log = logger.Logger.get_logger()
        self.helper = SpatialHelper(self, "default")
        # Setup, but don't rebalance cluster
        self.helper.setup_cluster(False)

    def tearDown(self):
        self.log.info("tear down test")
        self.helper.cleanup_cluster()

    def test_insert_x_delete_y_docs_create_cluster(self):
        num_docs = self.helper.input.param("num-docs", 100000)
        num_deleted_docs = self.helper.input.param("num-deleted-docs", 10000)
        msg = "description : have a single node, insert {0} docs, "\
            "delete {1} docs while creating a cluster and query it"
        self.log.info(msg.format(num_docs, num_deleted_docs))
        design_name = "dev_test_delete_10k_docs_create_cluster"
        prefix = str(uuid.uuid4())[:7]

        # Make sure we are fully de-clustered
        ClusterOperationHelper.remove_and_rebalance(self.helper.servers)

        self.helper.create_index_fun(design_name, prefix)
        inserted_keys = self.helper.insert_docs(num_docs, prefix)

        # Start creating the cluster and rebalancing it without waiting until
        # it's finished
        ClusterOperationHelper.add_and_rebalance(self.helper.servers, False)

        deleted_keys = self.helper.delete_docs(num_deleted_docs, prefix)
        self._wait_for_rebalance()

        # Verify that the docs got delete and are no longer part of the
        # spatial view
        results = self.helper.get_results(design_name, num_docs)
        result_keys = self.helper.get_keys(results)
        self.assertEqual(len(result_keys), num_docs - len(deleted_keys))
        self.helper.verify_result(inserted_keys, deleted_keys + result_keys)

    def test_insert_x_delete_y_docs_destroy_cluster(self):
        num_docs = self.helper.input.param("num-docs", 100000)
        num_deleted_docs = self.helper.input.param("num-deleted-docs", 10000)
        msg = "description : have a cluster, insert {0} docs, delete "\
            "{1} docs while destroying the cluster into a single node "\
            "and query it"
        self.log.info(msg.format(num_docs, num_deleted_docs))
        design_name = "dev_test_delete_{0}_docs_destroy_cluster".format(
            num_deleted_docs)
        prefix = str(uuid.uuid4())[:7]

        # Make sure we are fully clustered
        ClusterOperationHelper.add_and_rebalance(self.helper.servers)

        self.helper.create_index_fun(design_name, prefix)
        inserted_keys = self.helper.insert_docs(num_docs, prefix)

        # Start destroying the cluster and rebalancing it without waiting
        # until it's finished
        ClusterOperationHelper.remove_and_rebalance(self.helper.servers, False)

        deleted_keys = self.helper.delete_docs(num_deleted_docs, prefix)
        self._wait_for_rebalance()

        # Verify that the docs got delete and are no longer part of the
        # spatial view
        results = self.helper.get_results(design_name, num_docs)
        result_keys = self.helper.get_keys(results)
        self.assertEqual(len(result_keys), num_docs - len(deleted_keys))
        self.helper.verify_result(inserted_keys, deleted_keys + result_keys)

    def test_insert_x_docs_during_rebalance(self):
        num_docs = self.helper.input.param("num-docs", 100000)
        msg = "description : have a single node, insert {0} docs, "\
            "query it, add another node, start rebalancing, insert {0} "\
            "docs, finish rebalancing, keep on adding nodes..."
        self.log.info(msg.format(num_docs))
        design_name = "dev_test_insert_{0}_docs_during_rebalance".format(
            num_docs)
        prefix = str(uuid.uuid4())[:7]

        # Make sure we are fully de-clustered
        ClusterOperationHelper.remove_and_rebalance(self.helper.servers)

        self.helper.create_index_fun(design_name)
        inserted_keys = self.helper.insert_docs(num_docs, prefix)

        # Add all servers to the master server one by one and start
        # rebalacing
        for server in self.helper.servers[1:]:
            ClusterOperationHelper.add_and_rebalance(
                [self.helper.master, server], False)
            # Docs with the same prefix are overwritten and not newly created
            prefix = str(uuid.uuid4())[:7]
            inserted_keys.extend(
                self.helper.insert_docs(num_docs,
                                        prefix,
                                        wait_for_persistence=False))
            self._wait_for_rebalance()

        # Make sure data is persisted
        self.helper.wait_for_persistence()

        # Verify that all documents got inserted
        self.helper.query_index_for_verification(design_name, inserted_keys)

    # Block until the rebalance is done
    def _wait_for_rebalance(self):
        self.assertTrue(self.helper.rest.monitorRebalance(),
                        "rebalance operation failed after adding nodes")
        self.log.info("rebalance finished")
示例#30
0
class SpatialViewsTests(BaseTestCase):

    def setUp(self):
        super(SpatialViewsTests, self).setUp()
        self.skip_rebalance = self.input.param("skip_rebalance", False)
        self.use_dev_views = self.input.param("use-dev-views", False)
        self.default_map = "function (doc) {emit(doc.geometry, doc.age);}"
        self.default_ddoc_name = self.input.param("default_ddoc_name", "test-ddoc")
        self.default_view_name = self.input.param("default_view_name", "test-view")
        self.bucket_name = "default"
        if self.standard_buckets:
            self.bucket_name = "standard_bucket0"
        if self.sasl_buckets:
            self.bucket_name = "bucket0"
        self.helper = SpatialHelper(self, self.bucket_name)
        if not self.skip_rebalance:
            self.cluster.rebalance(self.servers[:], self.servers[1:], [])
        #load some items to verify
        self.docs = self.helper.insert_docs(self.num_items, 'spatial-doc',
                                            wait_for_persistence=True,
                                            return_docs=True)

    def tearDown(self):
        super(SpatialViewsTests, self).tearDown()

    def test_add_spatial_views(self):
        num_ddoc = self.input.param('num-ddoc', 1)
        views_per_ddoc = self.input.param('views-per-ddoc', 1)
        non_spatial_views_per_ddoc = self.input.param('non-spatial-views-per-ddoc', 0)
        ddocs =  self.make_ddocs(num_ddoc, views_per_ddoc, non_spatial_views_per_ddoc)
        self.create_ddocs(ddocs)

    def test_add_spatial_views_case_sensative(self):
        ddoc = DesignDocument(self.default_ddoc_name, [], spatial_views=[
                                  View(self.default_view_name, self.default_map,
                                       dev_view=self.use_dev_views, is_spatial=True),
                                  View(self.default_view_name.upper(), self.default_map,
                                       dev_view=self.use_dev_views, is_spatial=True)])
        self.create_ddocs([ddoc])

    def make_ddocs(self, ddocs_num, views_per_ddoc, non_spatial_views_per_ddoc):
        ddocs = []
        for i in xrange(ddocs_num):
            views = []
            for k in xrange(views_per_ddoc):
                views.append(View(self.default_view_name + str(k), self.default_map,
                                  dev_view=self.use_dev_views, is_spatial=True))
            non_spatial_views = []
            if non_spatial_views_per_ddoc:
                for k in xrange(non_spatial_views_per_ddoc):
                    views.append(View(self.default_view_name + str(k), 'function (doc) { emit(null, doc);}',
                                      dev_view=self.use_dev_views))
            ddocs.append(DesignDocument(self.default_ddoc_name + str(i), non_spatial_views, spatial_views=views))
        return ddocs

    def create_ddocs(self, ddocs):
        for ddoc in ddocs:
            if not (ddoc.views or ddoc.spatial_views):
                self.cluster.create_view(self.master, ddoc.name, [], bucket=self.bucket_name)
            for view in ddoc.views:
                self.cluster.create_view(self.master, ddoc.name, view, bucket=self.bucket_name)
            for view in ddoc.spatial_views:
                self.cluster.create_view(self.master, ddoc.name, view, bucket=self.bucket_name)
 def setUp(self):
     self.log = logger.Logger.get_logger()
     self.helper = SpatialHelper(self, "default")
     # Setup, but don't rebalance cluster
     self.helper.setup_cluster(False)
示例#32
0
class SpatialCompactionTests(BaseTestCase):
    def setUp(self):
        super(SpatialCompactionTests, self).setUp()
        self.start_cluster = self.input.param('start-cluster', len(self.servers))
        self.servers_in = self.input.param('servers_in', 0)
        self.servers_out = self.input.param('servers_out', 0)
        self.bucket_name = "default"
        if self.standard_buckets:
            self.bucket_name = "standard_bucket0"
        if self.sasl_buckets:
            self.bucket_name = "bucket0"
        self.helper = SpatialHelper(self, self.bucket_name)
        try:
            if self.start_cluster > 1:
                rebalance = self.cluster.async_rebalance(self.servers[:1],
                                                         self.servers[1:self.start_cluster], [])
                rebalance.result()
        except:
            super(SpatialCompactionTests, self).tearDown()

    def tearDown(self):
        super(SpatialCompactionTests, self).tearDown()


    def test_spatial_compaction(self):
        self.log.info(
            "description : test manual compaction for spatial indexes")
        prefix = str(uuid.uuid4())[:7]
        design_name = "dev_test_spatial_compaction"

        self.helper.create_index_fun(design_name, prefix)

        # Insert (resp. update, as they have the same prefix) and query
        # the spatial index several time so that the compaction makes sense
        for i in range(0, 8):
            self.helper.insert_docs(2000, prefix)
            self.helper.get_results(design_name)

        # Get the index size prior to compaction
        status, info = self.helper.info(design_name)
        disk_size = info["spatial_index"]["disk_size"]

        if self.servers_in or self.servers_out:
            servs_in = servs_out = []
            if self.servers_in:
                servs_in = self.servers[self.start_cluster:self.servers_in + 1]
            if self.servers_out:
                servs_out = self.servers[-self.servers_out:]
            rebalance = self.cluster.async_rebalance(self.servers, servs_in, servs_out)

        # Do the compaction
        self.helper.compact(design_name)

        # Check if the index size got smaller
        status, info = self.helper.info(design_name)
        self.assertTrue(info["spatial_index"]["disk_size"] < disk_size,
                        "The file size ({0}) isn't smaller than the "
                        "pre compaction size ({1})."
                        .format(info["spatial_index"]["disk_size"],
                                disk_size))
        if self.servers_in or self.servers_out:
            rebalance.result()
 def setUp(self):
     self.log = logger.Logger.get_logger()
     self.helper = SpatialHelper(self, "default")
     # Setup, but don't rebalance cluster
     self.helper.setup_cluster(False)
示例#34
0
class SpatialViewsTests(BaseTestCase):
    def setUp(self):
        super(SpatialViewsTests, self).setUp()
        self.thread_crashed = Event()
        self.thread_stopped = Event()
        self.skip_rebalance = self.input.param("skip_rebalance", False)
        self.use_dev_views = self.input.param("use-dev-views", False)
        self.default_map = "function (doc) {emit(doc.geometry, doc.age);}"
        self.map_updated = "function (doc) {emit(doc.geometry, doc.name);}"
        self.default_ddoc_name = self.input.param("default_ddoc_name",
                                                  "test-ddoc")
        self.default_view_name = self.input.param("default_view_name",
                                                  "test-view")
        self.ddoc_op = self.input.param("ddoc-ops",
                                        "create")  #create\update\delete
        self.bucket_name = "default"
        if self.standard_buckets:
            self.bucket_name = "standard_bucket0"
        if self.sasl_buckets:
            self.bucket_name = "bucket0"
        self.helper = SpatialHelper(self, self.bucket_name)
        if not self.skip_rebalance:
            self.cluster.rebalance(self.servers[:], self.servers[1:], [])
        #load some items to verify
        self.docs = self.helper.insert_docs(self.num_items,
                                            'spatial-doc',
                                            return_docs=True)
        self.num_ddoc = self.input.param('num-ddoc', 1)
        self.views_per_ddoc = self.input.param('views-per-ddoc', 1)
        self.non_spatial_views_per_ddoc = self.input.param(
            'non-spatial-views-per-ddoc', 0)
        if self.ddoc_op == 'update' or self.ddoc_op == 'delete':
            ddocs = self.make_ddocs(self.num_ddoc, self.views_per_ddoc,
                                    self.non_spatial_views_per_ddoc)
            self.create_ddocs(ddocs)

    def suite_setUp(self):
        pass

    def tearDown(self):
        super(SpatialViewsTests, self).tearDown()

    def suite_tearDown(self):
        pass

    def test_add_spatial_views(self):
        ddocs = self.make_ddocs(self.num_ddoc, self.views_per_ddoc,
                                self.non_spatial_views_per_ddoc)
        self.perform_ddoc_ops(ddocs)

    def test_add_spatial_views_case_sensative(self):
        ddoc = DesignDocument(self.default_ddoc_name, [],
                              spatial_views=[
                                  View(self.default_view_name,
                                       self.default_map,
                                       dev_view=self.use_dev_views,
                                       is_spatial=True),
                                  View(self.default_view_name.upper(),
                                       self.default_map,
                                       dev_view=self.use_dev_views,
                                       is_spatial=True)
                              ])
        self.create_ddocs([ddoc])

    def test_add_single_spatial_view(self):
        name_lenght = self.input.param('name_lenght', None)
        view_name = self.input.param('view_name', self.default_view_name)
        if name_lenght:
            view_name = ''.join(
                random.choice(string.ascii_lowercase)
                for x in range(name_lenght))
        not_compilable = self.input.param('not_compilable', False)
        error = self.input.param('error', None)
        map_fn = (
            self.default_map,
            'function (doc) {emit(doc.geometry, doc.age);')[not_compilable]

        ddoc = DesignDocument(self.default_ddoc_name, [],
                              spatial_views=[
                                  View(view_name,
                                       map_fn,
                                       dev_view=self.use_dev_views,
                                       is_spatial=True)
                              ])
        try:
            self.create_ddocs([ddoc])
        except Exception as ex:
            if error and str(ex).find(error) != -1:
                self.log.info("Error caught as expected %s" % error)
                return
            else:
                self.fail("Unexpected error appeared during run %s" % ex)
        if error:
            self.fail("Expected error '%s' didn't appear" % error)

    def test_add_views_to_1_ddoc(self):
        same_names = self.input.param('same-name', False)
        error = self.input.param('error', None)
        num_views_per_ddoc = 10
        create_threads = []
        try:
            for i in range(num_views_per_ddoc):
                ddoc = DesignDocument(self.default_ddoc_name, [],
                                      spatial_views=[
                                          View(self.default_view_name +
                                               (str(i), "")[same_names],
                                               self.default_map,
                                               dev_view=self.use_dev_views,
                                               is_spatial=True)
                                      ])
                create_thread = Thread(target=self.create_ddocs,
                                       name="create_thread" + str(i),
                                       args=([
                                           ddoc,
                                       ], ))
                create_threads.append(create_thread)
                create_thread.start()
            for create_thread in create_threads:
                create_thread.join()
        except Exception as ex:
            if error and str(ex).find(error) != -1:
                self.log.info("Error caught as expected %s" % error)
                return
            else:
                self.fail("Unexpected error appeared during run %s" % ex)
        if error:
            self.fail("Expected error '%s' didn't appear" % error)

    def test_add_spatial_views_threads(self):
        same_names = self.input.param('same-name', False)
        num_views_per_ddoc = 10
        create_threads = []
        ddocs = []
        for i in range(num_views_per_ddoc):
            ddoc = DesignDocument(self.default_ddoc_name + str(i), [],
                                  spatial_views=[
                                      View(self.default_view_name +
                                           (str(i), "")[same_names],
                                           self.default_map,
                                           dev_view=self.use_dev_views,
                                           is_spatial=True)
                                  ])
            ddocs.append(ddoc)
        if self.ddoc_op == 'update' or self.ddoc_op == 'delete':
            self.create_ddocs(ddocs)
        i = 0
        for ddoc in ddocs:
            create_thread = Thread(target=self.perform_ddoc_ops,
                                   name="ops_thread" + str(i),
                                   args=([
                                       ddoc,
                                   ], ))
            i += 1
            create_threads.append(create_thread)
            create_thread.start()
        for create_thread in create_threads:
            create_thread.join()
        if self.thread_crashed.is_set():
            self.fail("Error occured during run")

    def test_create_with_other_ddoc_ops(self):
        operation = self.input.param('operation', 'create')
        ddocs = self.make_ddocs(self.num_ddoc, self.views_per_ddoc, 0)
        other_ddocs = self.make_ddocs(self.num_ddoc, 0, self.views_per_ddoc)
        if operation == 'delete' or operation == 'update':
            self.create_ddocs(other_ddocs)
        other_ddoc_threads = []
        for ddoc in other_ddocs:
            if operation == 'create' or operation == 'update':
                other_ddoc_thread = Thread(target=self.create_ddocs,
                                           name="other_doc_thread",
                                           args=(other_ddocs, ))
            else:
                other_ddoc_thread = Thread(target=self.delete_views,
                                           name="other_doc_thread",
                                           args=(other_ddocs, ))
            other_ddoc_threads.append(other_ddoc_thread)
            other_ddoc_thread.start()
        self.perform_ddoc_ops(ddocs)
        for thread in other_ddoc_threads:
            thread.join()

    def test_create_views_during_rebalance(self):
        start_cluster = self.input.param('start-cluster', 1)
        servers_in = self.input.param('servers_in', 0)
        servers_out = self.input.param('servers_out', 0)
        ddocs = self.make_ddocs(self.num_ddoc, self.views_per_ddoc,
                                self.non_spatial_views_per_ddoc)
        if start_cluster > 1:
            rebalance = self.cluster.async_rebalance(
                self.servers[:1], self.servers[1:start_cluster], [])
            rebalance.result()
        servs_in = []
        servs_out = []
        if servers_in:
            servs_in = self.servers[start_cluster:servers_in + 1]
        if servers_out:
            if start_cluster > 1:
                servs_out = self.servers[1:start_cluster]
                servs_out = servs_out[-servers_out:]
            else:
                servs_out = self.servers[-servers_out:]
        rebalance_thread = Thread(target=self.cluster.rebalance,
                                  name="reb_thread",
                                  args=(self.servers[:1], servs_in, servs_out))
        rebalance_thread.start()
        self.perform_ddoc_ops(ddocs)
        rebalance_thread.join()

    def test_views_node_pending_state(self):
        operation = self.input.param('operation', 'add_node')
        ddocs = self.make_ddocs(self.num_ddoc, self.views_per_ddoc, 0)
        rest = RestConnection(self.master)
        if operation == 'add_node':
            self.log.info("adding the node %s:%s" %
                          (self.servers[1].ip, self.servers[1].port))
            otpNode = rest.add_node(self.master.rest_username,
                                    self.master.rest_password,
                                    self.servers[1].ip, self.servers[1].port)
        elif operation == 'failover':
            nodes = rest.node_statuses()
            nodes = [
                node for node in nodes
                if node.ip != self.master.ip or node.port != self.master.port
            ]
            rest.fail_over(nodes[0].id)
        else:
            self.fail("There is no operation %s" % operation)
        self.perform_ddoc_ops(ddocs)

    def test_views_failover(self):
        num_nodes = self.input.param('num-nodes', 1)
        ddocs = self.make_ddocs(self.num_ddoc, self.views_per_ddoc, 0)
        RebalanceHelper.wait_for_persistence(self.master, self.bucket_name)
        self.cluster.failover(self.servers, self.servers[1:num_nodes])
        self.cluster.rebalance(self.servers, [], self.servers[1:num_nodes])
        self.perform_ddoc_ops(ddocs)

    def test_views_with_warm_up(self):
        warmup_node = self.servers[-1]
        shell = RemoteMachineShellConnection(warmup_node)
        shell.stop_couchbase()
        time.sleep(20)
        shell.start_couchbase()
        shell.disconnect()
        ddocs = self.make_ddocs(self.num_ddoc, self.views_per_ddoc, 0)
        self.perform_ddoc_ops(ddocs)

    def test_views_during_index(self):
        ddocs = self.make_ddocs(1, 1, 1)
        self.create_ddocs(ddocs)
        #run query stale=false to start index
        rest = RestConnection(self.master)
        for ddoc in ddocs:
            for view in ddoc.spatial_views:
                self.helper.query_view(rest,
                                       ddoc,
                                       view,
                                       bucket=self.bucket_name,
                                       extra_params={})
        ddocs = self.make_ddocs(self.num_ddoc, self.views_per_ddoc, 1)
        self.perform_ddoc_ops(ddocs)

    def test_views_during_ddoc_compaction(self):
        fragmentation_value = self.input.param("fragmentation_value", 80)
        ddoc_to_compact = DesignDocument(
            "ddoc_to_compact", [],
            spatial_views=[
                View(self.default_view_name,
                     'function (doc) { emit(doc.age, doc.name);}',
                     dev_view=self.use_dev_views)
            ])
        ddocs = self.make_ddocs(self.num_ddoc, self.views_per_ddoc, 0)
        self.disable_compaction()
        self.create_ddocs([
            ddoc_to_compact,
        ])
        fragmentation_monitor = self.cluster.async_monitor_view_fragmentation(
            self.master, ddoc_to_compact.name, fragmentation_value,
            self.default_bucket_name)
        end_time = time.time() + self.wait_timeout * 30
        while fragmentation_monitor.state != "FINISHED" and end_time > time.time(
        ):
            self.helper.insert_docs(self.num_items, 'spatial-doc')

        if end_time < time.time(
        ) and fragmentation_monitor.state != "FINISHED":
            self.fail("impossible to reach compaction value after %s sec" %
                      (self.wait_timeout * 20))
        fragmentation_monitor.result()
        compaction_task = self.cluster.async_compact_view(
            self.master, ddoc_to_compact.name, self.default_bucket_name)
        self.perform_ddoc_ops(ddocs)
        result = compaction_task.result(self.wait_timeout * 10)
        self.assertTrue(
            result, "Compaction didn't finished correctly. Please check diags")

    def make_ddocs(self, ddocs_num, views_per_ddoc,
                   non_spatial_views_per_ddoc):
        ddocs = []
        for i in range(ddocs_num):
            views = []
            for k in range(views_per_ddoc):
                views.append(
                    View(self.default_view_name + str(k),
                         self.default_map,
                         dev_view=self.use_dev_views,
                         is_spatial=True))
            non_spatial_views = []
            if non_spatial_views_per_ddoc:
                for k in range(non_spatial_views_per_ddoc):
                    non_spatial_views.append(
                        View(self.default_view_name + str(k),
                             'function (doc) { emit(null, doc);}',
                             dev_view=self.use_dev_views))
            ddocs.append(
                DesignDocument(self.default_ddoc_name + str(i),
                               non_spatial_views,
                               spatial_views=views))
        return ddocs

    def create_ddocs(self, ddocs, bucket=None):
        bucket_views = bucket or self.buckets[0]
        for ddoc in ddocs:
            if not (ddoc.views or ddoc.spatial_views):
                self.cluster.create_view(self.master,
                                         ddoc.name, [],
                                         bucket=bucket_views)
            for view in ddoc.views:
                self.cluster.create_view(self.master,
                                         ddoc.name,
                                         view,
                                         bucket=bucket_views)
            for view in ddoc.spatial_views:
                self.cluster.create_view(self.master,
                                         ddoc.name,
                                         view,
                                         bucket=bucket_views)

    def delete_views(self, ddocs, views=[], spatial_views=[], bucket=None):
        bucket_views = bucket or self.buckets[0]
        for ddoc in ddocs:
            vs = views or ddoc.views
            sp_vs = spatial_views or ddoc.spatial_views
            for view in vs:
                self.cluster.delete_view(self.master,
                                         ddoc.name,
                                         view,
                                         bucket=bucket_views)
            for view in sp_vs:
                self.cluster.delete_view(self.master,
                                         ddoc.name,
                                         view,
                                         bucket=bucket_views)

    def perform_ddoc_ops(self, ddocs):
        try:
            if self.ddoc_op == 'update':
                for ddoc in ddocs:
                    for view in ddoc.spatial_views:
                        view.map_func = self.map_updated
            if self.ddoc_op == 'delete':
                self.delete_views(ddocs)
            else:
                self.create_ddocs(ddocs)
        except Exception as ex:
            self.thread_crashed.set()
            self.log.error(
                "****ERROR***** \n At least one of threads is crashed: %s" %
                (ex))
            raise ex
        finally:
            if not self.thread_stopped.is_set():
                self.thread_stopped.set()
class SpatialQueryErrorsTests(BaseTestCase):
    def setUp(self):
        try:
            if 'first_case' not in TestInputSingleton.input.test_params:
                TestInputSingleton.input.test_params['default_bucket'] = False
                TestInputSingleton.input.test_params['skip_cleanup'] = True
                TestInputSingleton.input.test_params['skip_buckets_handle'] = True
            self.default_bucket_name = 'default'
            super(SpatialQueryErrorsTests, self).setUp()
            if 'first_case' in TestInputSingleton.input.test_params:
                self.cluster.rebalance(self.servers[:], self.servers[1:], [])
            # We use only one bucket in this test suite
            self.rest = RestConnection(self.master)
            self.bucket = self.rest.get_bucket(Bucket(name=self.default_bucket_name))
            # num_docs must be a multiple of the number of vbuckets
            self.num_docs = self.input.param("num_docs", 2000)
            # `testname` is used for the design document name as wel as the
            # spatial function name
            self.testname = 'query-errors'
            self.helper = SpatialHelper(self, "default")
            if 'first_case' in TestInputSingleton.input.test_params:
                self.create_ddoc()
                self.helper.insert_docs(self.num_docs, self.testname)
        except Exception as ex:
            self.input.test_params["stop-on-failure"] = True
            self.log.error("SETUP WAS FAILED. ALL TESTS WILL BE SKIPPED")
            self.fail(ex)

    def tearDown(self):
        # clean up will only performed on the last run
        if 'last_case' in TestInputSingleton.input.test_params:
            TestInputSingleton.input.test_params['skip_cleanup'] = False
            TestInputSingleton.input.test_params['skip_buckets_handle'] = False
            super(SpatialQueryErrorsTests, self).tearDown()
        else:
            self.cluster.shutdown(force=True)
            self._log_finish(self)

    def test_query_errors(self):
        all_params = ['skip', 'limit', 'stale', 'bbox', 'start_range',
                      'end_range']
        query_params = {}
        for key in self.input.test_params:
            if key in all_params:
                query_params[key] = str(self.input.test_params[key])

        try:
            self.spatial_query(query_params)
        except QueryViewException as ex:
            self.assertEquals(self.input.test_params['error'],
                              json.loads(ex.reason)['error'])
        else:
            self.fail("Query did not fail, but should have. "
                      "Query parameters were: {0}".format(query_params))


    def create_ddoc(self):
        view_fn = '''function (doc) {
    if (doc.age !== undefined || doc.height !== undefined ||
            doc.bloom !== undefined || doc.shed_leaves !== undefined) {
        emit([doc.age, doc.height, [doc.bloom, doc.shed_leaves]], doc.name);
    }}'''
        self.helper.create_index_fun(self.testname, view_fn)

    def spatial_query(self, params={}, ddoc='test'):
       bucket = self.default_bucket_name
       if not 'stale' in params:
           params['stale'] = 'false'
       return self.rest.query_view(self.testname, self.testname, bucket,
                                   params, type="spatial")
示例#36
0
class SpatialViewQueriesTests(BaseTestCase):
    def setUp(self):
        self.helper = SpatialHelper(self, self.bucket_name)
        super(SpatialViewQueriesTests, self).setUp()
        self.thread_crashed = Event()
        self.thread_stopped = Event()
        self.skip_rebalance = self.input.param("skip_rebalance", False)
        self.use_dev_views = self.input.param("use-dev-views", False)
        self.all_view_one_ddoc = self.input.param("all-view-one-ddoc", False)
        self.default_ddoc_name = "test-ddoc-query"
        self.default_view_name = "test-view-query"
        self.params = self.get_query_params()
        self.bucket_name = "default"
        if self.standard_buckets:
            self.bucket_name = "standard_bucket0"
        if self.sasl_buckets:
            self.bucket_name = "bucket0"

        if not self.skip_rebalance:
            self.cluster.rebalance(self.servers[:], self.servers[1:], [])
        #load some items to verify
        self.docs = self.helper.insert_docs(self.num_items,
                                            'spatial-doc',
                                            return_docs=True)
        self.ddocs = self.helper.create_default_views(
            is_one_ddoc=self.all_view_one_ddoc)

    def suite_setUp(self):
        pass

    def tearDown(self):
        super(SpatialViewQueriesTests, self).tearDown()

    def suite_tearDown(self):
        pass

    def test_spatial_view_queries(self):
        error = self.input.param('error', None)
        try:
            self.query_and_verify_result(self.docs, self.params)
        except Exception as ex:
            if error and str(ex).find(error) != -1:
                self.log.info("Error caught as expected %s" % error)
                return
            else:
                self.fail("Unexpected error appeared during run %s" % ex)
        if error:
            self.fail("Expected error '%s' didn't appear" % error)

    def test_add_spatial_view_queries_threads(self):
        diff_nodes = self.input.param("diff-nodes", False)
        query_threads = []
        for i in range(len(self.servers)):
            node = (self.master, self.servers[i])[diff_nodes]
            self.query_and_verify_result(self.docs, self.params, node=node)
            q_thread = Thread(target=self.query_and_verify_result,
                              name="query_thread" + str(i),
                              args=([self.docs, self.params, node]))
            query_threads.append(q_thread)
            q_thread.start()
        for q_thread in query_threads:
            q_thread.join()
        if self.thread_crashed.is_set():
            self.fail("Error occured during run")

    def test_view_queries_during_rebalance(self):
        start_cluster = self.input.param('start-cluster', 1)
        servers_in = self.input.param('servers_in', 0)
        servers_out = self.input.param('servers_out', 0)
        if start_cluster > 1:
            rebalance = self.cluster.async_rebalance(
                self.servers[:1], self.servers[1:start_cluster], [])
            rebalance.result()
        servs_in = []
        servs_out = []
        if servers_in:
            servs_in = self.servers[start_cluster:servers_in + 1]
        if servers_out:
            if start_cluster > 1:
                servs_out = self.servers[1:start_cluster]
                servs_out = servs_out[-servers_out:]
            else:
                servs_out = self.servers[-servers_out:]
        rebalance = self.cluster.async_rebalance(self.servers, servs_in,
                                                 servs_out)
        self.query_and_verify_result(self.docs, self.params)
        rebalance.result()

    def test_view_queries_node_pending_state(self):
        operation = self.input.param('operation', 'add_node')
        rest = RestConnection(self.master)
        if operation == 'add_node':
            self.log.info("adding the node %s:%s" %
                          (self.servers[1].ip, self.servers[1].port))
            otpNode = rest.add_node(self.master.rest_username,
                                    self.master.rest_password,
                                    self.servers[1].ip, self.servers[1].port)
        elif operation == 'failover':
            nodes = rest.node_statuses()
            nodes = [
                node for node in nodes
                if node.ip != self.master.ip or node.port != self.master.port
            ]
            rest.fail_over(nodes[0].id)
        else:
            self.fail("There is no operation %s" % operation)
        self.query_and_verify_result(self.docs, self.params)

    def test_view_queries_failover(self):
        num_nodes = self.input.param('num-nodes', 1)
        self.cluster.failover(self.servers, self.servers[1:num_nodes])
        self.cluster.rebalance(self.servers, [], self.servers[1:num_nodes])
        self.query_and_verify_result(self.docs, self.params)

    def test_views_with_warm_up(self):
        warmup_node = self.servers[-1]
        shell = RemoteMachineShellConnection(warmup_node)
        shell.stop_couchbase()
        time.sleep(20)
        shell.start_couchbase()
        shell.disconnect()
        self.query_and_verify_result(self.docs, self.params)

    def test_view_queries_during_ddoc_compaction(self):
        fragmentation_value = self.input.param("fragmentation_value", 80)
        self.disable_compaction()
        fragmentation_monitor = self.cluster.async_monitor_view_fragmentation(
            self.master, self.ddocs[0].name, fragmentation_value,
            self.default_bucket_name)
        end_time = time.time() + self.wait_timeout * 30
        while fragmentation_monitor.state != "FINISHED" and end_time > time.time(
        ):
            self.docs = self.helper.insert_docs(self.num_items,
                                                'spatial-doc',
                                                return_docs=True)

        if end_time < time.time(
        ) and fragmentation_monitor.state != "FINISHED":
            self.fail("impossible to reach compaction value after %s sec" %
                      (self.wait_timeout * 20))
        fragmentation_monitor.result()
        compaction_task = self.cluster.async_compact_view(
            self.master, self.ddocs[0].name, self.default_bucket_name)
        self.query_and_verify_result(self.docs, self.params)
        result = compaction_task.result(self.wait_timeout * 10)
        self.assertTrue(
            result, "Compaction didn't finished correctly. Please check diags")

    def get_query_params(self):
        current_params = {}
        for key in self.input.test_params:
            if key == 'skip' or key == 'limit':
                current_params[key] = int(self.input.test_params[key])
            elif key == 'bbox':
                current_params[key] = [
                    int(x)
                    for x in self.input.test_params[key][1:-1].split(",")
                ]
            elif key == 'stale':
                current_params[key] = self.input.test_params[key]
        return current_params

    def query_and_verify_result(self, doc_inserted, params, node=None):
        try:
            rest = RestConnection(self.master)
            if node:
                rest = RestConnection(node)
            expected_ddocs = self.helper.generate_matching_docs(
                doc_inserted, params)
            for ddoc in self.ddocs:
                for view in ddoc.spatial_views:
                    result_ddocs = self.helper.query_view(
                        rest,
                        ddoc,
                        view,
                        bucket=self.bucket_name,
                        extra_params=params,
                        num_expected=len(expected_ddocs),
                        num_tries=20)
                    self.helper.verify_matching_keys(expected_ddocs,
                                                     result_ddocs)
        except Exception as ex:
            self.thread_crashed.set()
            self.log.error(
                "****ERROR***** \n At least one of threads is crashed: %s" %
                (ex))
            raise ex
        finally:
            if not self.thread_stopped.is_set():
                self.thread_stopped.set()
示例#37
0
class SpatialQueryTests(unittest.TestCase):
    def setUp(self):
        self.log = logger.Logger.get_logger()
        self.helper = SpatialHelper(self, "default")
        self.helper.setup_cluster()
        self.cluster = Cluster()
        self.servers = self.helper.servers

    def tearDown(self):
        self.helper.cleanup_cluster()

    def test_simple_dataset_limit_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Make limit queries on a simple "
                      "dataset with {0} docs".format(num_docs))

        data_set = SimpleDataSet(self.helper, num_docs)
        data_set.add_limit_queries()
        self._query_test_init(data_set)

    def test_simple_dataset_skip_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Make skip (and limit) queries on a "
                      "simple dataset with {0} docs".format(num_docs))

        data_set = SimpleDataSet(self.helper, num_docs)
        data_set.add_skip_queries()
        self._query_test_init(data_set)

    def test_simple_dataset_bbox_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Make bounding box queries on a simple "
                      "dataset with {0} docs".format(num_docs))

        data_set = SimpleDataSet(self.helper, num_docs)
        data_set.add_bbox_queries()
        self._query_test_init(data_set)

    def test_simple_dataset_range_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Make range queries on a simple "
                      "dataset with {0} docs".format(num_docs))

        data_set = SimpleDataSet(self.helper, num_docs)
        data_set.add_range_queries()
        self._query_test_init(data_set)

    def test_multidim_dataset_limit_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Make limit queries on a multidimensional "
                      "dataset with {0} docs".format(num_docs))

        data_set = MultidimDataSet(self.helper, num_docs)
        data_set.add_limit_queries()
        self._query_test_init(data_set)

    def test_multidim_dataset_skip_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info(
            "description : Make skip (and limit) queries on a "
            "multidimensional dataset with {0} docs".format(num_docs))

        data_set = MultidimDataSet(self.helper, num_docs)
        data_set.add_skip_queries()
        self._query_test_init(data_set)

    def test_multidim_dataset_range_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Make range queries on a "
                      "multidimensional with {0} docs".format(num_docs))

        data_set = MultidimDataSet(self.helper, num_docs)
        data_set.add_range_queries()
        self._query_test_init(data_set)

    def test_multidim_dataset_range_and_limit_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Make range queries with limits on a "
                      "multidimensional with {0} docs".format(num_docs))

        data_set = MultidimDataSet(self.helper, num_docs)
        data_set.add_range_and_limit_queries()
        self._query_test_init(data_set)

## Rebalance In

    def test_rebalance_in_simple_dataset_limit_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info(
            "description : Rebalance In and limit queries on a simple "
            "dataset with {0} docs".format(num_docs))

        data_set = SimpleDataSet(self.helper, num_docs)
        data_set.add_limit_queries()
        self._rebalance_cluster(data_set)

    def test_rebalance_in_simple_dataset_skip_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info(
            "description : Rebalance In and skip (and limit) queries on a "
            "simple dataset with {0} docs".format(num_docs))

        data_set = SimpleDataSet(self.helper, num_docs)
        data_set.add_skip_queries()
        self._rebalance_cluster(data_set)

    def test_rebalance_in_simple_dataset_bbox_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info(
            "description : Rebalance In and bounding box queries on a simple "
            "dataset with {0} docs".format(num_docs))

        data_set = SimpleDataSet(self.helper, num_docs)
        data_set.add_bbox_queries()
        self._rebalance_cluster(data_set)

    def test_rebalance_in_simple_dataset_range_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info(
            "description : Rebalance In and range queries on a simple "
            "dataset with {0} docs".format(num_docs))

        data_set = SimpleDataSet(self.helper, num_docs)
        data_set.add_range_queries()
        self._rebalance_cluster(data_set)

    def test_rebalance_in_multidim_dataset_limit_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info(
            "description : Rebalance In and limit queries on a multidimensional "
            "dataset with {0} docs".format(num_docs))

        data_set = MultidimDataSet(self.helper, num_docs)
        data_set.add_limit_queries()
        self._rebalance_cluster(data_set)

    def test_rebalance_in_multidim_dataset_skip_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info(
            "description : Rebalance In and skip (and limit) queries on a "
            "multidimensional dataset with {0} docs".format(num_docs))

        data_set = MultidimDataSet(self.helper, num_docs)
        data_set.add_skip_queries()
        self._rebalance_cluster(data_set)

    def test_rebalance_in_multidim_dataset_range_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Rebalance In and range queries on a "
                      "multidimensional with {0} docs".format(num_docs))

        data_set = MultidimDataSet(self.helper, num_docs)
        data_set.add_range_queries()
        self._rebalance_cluster(data_set)

    def test_rebalance_in_multidim_dataset_range_and_limit_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info(
            "description : Rebalance In and range queries with limits on a "
            "multidimensional with {0} docs".format(num_docs))

        data_set = MultidimDataSet(self.helper, num_docs)
        data_set.add_range_and_limit_queries()
        self._rebalance_cluster(data_set)

#Rebalance Out

    def test_rebalance_out_simple_dataset_limit_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info(
            "description : Rebalance Out and  limit queries on a simple "
            "dataset with {0} docs".format(num_docs))

        data_set = SimpleDataSet(self.helper, num_docs)
        data_set.add_limit_queries()
        self._rebalance_cluster(data_set)

    def test_rebalance_out_simple_dataset_skip_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info(
            "description : Rebalance Out and skip (and limit) queries on a "
            "simple dataset with {0} docs".format(num_docs))

        data_set = SimpleDataSet(self.helper, num_docs)
        data_set.add_skip_queries()
        self._rebalance_cluster(data_set)

    def test_rebalance_out_simple_dataset_bbox_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info(
            "description : Rebalance Out and bounding box queries on a simple "
            "dataset with {0} docs".format(num_docs))

        data_set = SimpleDataSet(self.helper, num_docs)
        data_set.add_bbox_queries()
        self._rebalance_cluster(data_set)

    def test_rebalance_out_simple_dataset_range_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info(
            "description : Rebalance Out and range queries on a simple "
            "dataset with {0} docs".format(num_docs))

        data_set = SimpleDataSet(self.helper, num_docs)
        data_set.add_range_queries()
        self._rebalance_cluster(data_set)

    def test_rebalance_out_multidim_dataset_limit_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info(
            "description : Rebalance Out and limit queries on a multidimensional "
            "dataset with {0} docs".format(num_docs))

        data_set = MultidimDataSet(self.helper, num_docs)
        data_set.add_limit_queries()
        self._rebalance_cluster(data_set)

    def test_rebalance_out_multidim_dataset_skip_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info(
            "description : Rebalance Out and skip (and limit) queries on a "
            "multidimensional dataset with {0} docs".format(num_docs))

        data_set = MultidimDataSet(self.helper, num_docs)
        data_set.add_skip_queries()
        self._rebalance_cluster(data_set)

    def test_rebalance_out_multidim_dataset_range_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Rebalance Out and range queries on a "
                      "multidimensional with {0} docs".format(num_docs))

        data_set = MultidimDataSet(self.helper, num_docs)
        data_set.add_range_queries()
        self._rebalance_cluster(data_set)

    def test_rebalance_out_multidim_dataset_range_and_limit_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info(
            "description : Rebalance Out and range queries with limits on a "
            "multidimensional with {0} docs".format(num_docs))

        data_set = MultidimDataSet(self.helper, num_docs)
        data_set.add_range_and_limit_queries()
        self._rebalance_cluster(data_set)

# Warmup Tests

    def test_warmup_simple_dataset_limit_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info(
            "description : Warmup with skip and limit queries on a simple "
            "dataset with {0} docs".format(num_docs))

        data_set = SimpleDataSet(self.helper, num_docs)
        data_set.add_limit_queries()
        self._query_test_init_integration(data_set)

    def test_warmup_simple_dataset_skip_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info(
            "description : Warmup with  skip (and limit) queries on a "
            "simple dataset with {0} docs".format(num_docs))

        data_set = SimpleDataSet(self.helper, num_docs)
        data_set.add_skip_queries()
        self._query_test_init_integration(data_set)

    def test_warmup_simple_dataset_bbox_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info(
            "description : Warmup with  bounding box queries on a simple "
            "dataset with {0} docs".format(num_docs))

        data_set = SimpleDataSet(self.helper, num_docs)
        data_set.add_bbox_queries()
        self._query_test_init_integration(data_set)

    def test_warmup_simple_dataset_range_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Warmup with  range queries on a simple "
                      "dataset with {0} docs".format(num_docs))

        data_set = SimpleDataSet(self.helper, num_docs)
        data_set.add_range_queries()
        self._query_test_init_integration(data_set)

    def test_warmup_multidim_dataset_limit_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info(
            "description : Warmup with  limit queries on a multidimensional "
            "dataset with {0} docs".format(num_docs))

        data_set = MultidimDataSet(self.helper, num_docs)
        data_set.add_limit_queries()
        self._query_test_init_integration(data_set)

    def test_warmup_multidim_dataset_skip_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info(
            "description : Warmup with  skip (and limit) queries on a "
            "multidimensional dataset with {0} docs".format(num_docs))

        data_set = MultidimDataSet(self.helper, num_docs)
        data_set.add_skip_queries()
        self._query_test_init_integration(data_set)

    def test_warmup_multidim_dataset_range_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Warmup with  range queries on a "
                      "multidimensional with {0} docs".format(num_docs))

        data_set = MultidimDataSet(self.helper, num_docs)
        data_set.add_range_queries()
        self._query_test_init_integration(data_set)

    def test_warmup_multidim_dataset_range_and_limit_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info(
            "description : Warmup with  range queries with limits on a "
            "multidimensional with {0} docs".format(num_docs))

        data_set = MultidimDataSet(self.helper, num_docs)
        data_set.add_range_and_limit_queries()
        self._query_test_init_integration(data_set)

# Reboot Tests

    def test_reboot_simple_dataset_limit_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Reboot and limit queries on a simple "
                      "dataset with {0} docs".format(num_docs))

        data_set = SimpleDataSet(self.helper, num_docs)
        data_set.add_limit_queries()
        self._query_test_init_integration(data_set)

    def test_reboot_simple_dataset_skip_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info(
            "description : Reboot with  skip (and limit) queries on a "
            "simple dataset with {0} docs".format(num_docs))

        data_set = SimpleDataSet(self.helper, num_docs)
        data_set.add_skip_queries()
        self._query_test_init_integration(data_set)

    def test_reboot_simple_dataset_bbox_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info(
            "description : Reboot with  bounding box queries on a simple "
            "dataset with {0} docs".format(num_docs))

        data_set = SimpleDataSet(self.helper, num_docs)
        data_set.add_bbox_queries()
        self._query_test_init_integration(data_set)

    def test_reboot_simple_dataset_range_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Reboot with  range queries on a simple "
                      "dataset with {0} docs".format(num_docs))

        data_set = SimpleDataSet(self.helper, num_docs)
        data_set.add_range_queries()
        self._query_test_init_integration(data_set)

    def test_reboot_multidim_dataset_limit_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info(
            "description : Reboot with  limit queries on a multidimensional "
            "dataset with {0} docs".format(num_docs))

        data_set = MultidimDataSet(self.helper, num_docs)
        data_set.add_limit_queries()
        self._query_test_init_integration(data_set)

    def test_reboot_multidim_dataset_skip_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info(
            "description : Reboot with  skip (and limit) queries on a "
            "multidimensional dataset with {0} docs".format(num_docs))

        data_set = MultidimDataSet(self.helper, num_docs)
        data_set.add_skip_queries()
        self._query_test_init_integration(data_set)

    def test_reboot_multidim_dataset_range_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Reboot with  range queries on a "
                      "multidimensional with {0} docs".format(num_docs))

        data_set = MultidimDataSet(self.helper, num_docs)
        data_set.add_range_queries()
        self._query_test_init_integration(data_set)

    def test_reboot_multidim_dataset_range_and_limit_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info(
            "description : Reboot with  range queries with limits on a "
            "multidimensional with {0} docs".format(num_docs))

        data_set = MultidimDataSet(self.helper, num_docs)
        data_set.add_range_and_limit_queries()
        self._query_test_init_integration(data_set)


# Failover Tests

    def test_failover_simple_dataset_limit_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Failover and limit queries on a simple "
                      "dataset with {0} docs".format(num_docs))

        data_set = SimpleDataSet(self.helper, num_docs)
        data_set.add_limit_queries()
        self._failover_cluster(data_set)

    def test_failover_simple_dataset_skip_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info(
            "description : Rebalance Out and skip (and limit) queries on a "
            "simple dataset with {0} docs".format(num_docs))

        data_set = SimpleDataSet(self.helper, num_docs)
        data_set.add_skip_queries()
        self._failover_cluster(data_set)

    def test_failover_simple_dataset_bbox_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info(
            "description : Rebalance Out and bounding box queries on a simple "
            "dataset with {0} docs".format(num_docs))

        data_set = SimpleDataSet(self.helper, num_docs)
        data_set.add_bbox_queries()
        self._failover_cluster(data_set)

    def test_failover_simple_dataset_range_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info(
            "description : Rebalance Out and range queries on a simple "
            "dataset with {0} docs".format(num_docs))

        data_set = SimpleDataSet(self.helper, num_docs)
        data_set.add_range_queries()
        self._failover_cluster(data_set)

    def test_failover_multidim_dataset_limit_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info(
            "description : Rebalance Out and limit queries on a multidimensional "
            "dataset with {0} docs".format(num_docs))

        data_set = MultidimDataSet(self.helper, num_docs)
        data_set.add_limit_queries()
        self._failover_cluster(data_set)

    def test_failover_multidim_dataset_skip_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info(
            "description : Rebalance Out and skip (and limit) queries on a "
            "multidimensional dataset with {0} docs".format(num_docs))

        data_set = MultidimDataSet(self.helper, num_docs)
        data_set.add_skip_queries()
        self._failover_cluster(data_set)

    def test_failover_multidim_dataset_range_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Rebalance Out and range queries on a "
                      "multidimensional with {0} docs".format(num_docs))

        data_set = MultidimDataSet(self.helper, num_docs)
        data_set.add_range_queries()
        self._failover_cluster(data_set)

    def test_failover_multidim_dataset_range_and_limit_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info(
            "description : Rebalance Out and range queries with limits on a "
            "multidimensional with {0} docs".format(num_docs))

        data_set = MultidimDataSet(self.helper, num_docs)
        data_set.add_range_and_limit_queries()
        self._failover_cluster(data_set)

    ###
    # load the data defined for this dataset.
    # create views and query the data as it loads.
    # verification is optional, and best practice is to
    # set to False if you plan on running _query_all_views()
    # later in the test case
    ###
    def _query_test_init(self, data_set, verify_results=True):
        views = data_set.views

        # start loading data
        t = Thread(target=data_set.load, name="load_data_set", args=())
        t.start()

        # run queries while loading data
        while (t.is_alive()):
            self._query_all_views(views, False)
            time.sleep(5)
        t.join()

        # results will be verified if verify_results set
        if verify_results:
            self._query_all_views(views, verify_results)
        else:
            self._check_view_intergrity(views)

    def _query_test_init_integration(self, data_set, verify_results=True):
        views = data_set.views
        inserted_keys = data_set.load()
        target_fn = ()

        if self.helper.num_nodes_reboot >= 1:
            target_fn = self._reboot_cluster(data_set)
        elif self.helper.num_nodes_warmup >= 1:
            target_fn = self._warmup_cluster(data_set)
        elif self.helper.num_nodes_to_add >= 1 or self.helper.num_nodes_to_remove >= 1:
            target_fn = self._rebalance_cluster(data_set)

        t = Thread(target=self._query_all_views(views, False))
        t.start()
        # run queries while loading data
        while t.is_alive():
            self._rebalance_cluster(data_set)
            time.sleep(5)
        t.join()

        # results will be verified if verify_results set
        if verify_results:
            self._query_all_views(views, verify_results)
        else:
            self._check_view_intergrity(views)

    ##
    # run all queries for all views in parallel
    ##
    def _query_all_views(self, views, verify_results=True):
        query_threads = []
        for view in views:
            t = RunQueriesThread(view, verify_results)
            query_threads.append(t)
            t.start()

        [t.join() for t in query_threads]

        self._check_view_intergrity(query_threads)

    ##
    # If an error occured loading or querying data for a view
    # it is queued and checked here. Fail on the first one that
    # occurs.
    ##
    def _check_view_intergrity(self, thread_results):
        for result in thread_results:
            if result.test_results.errors:
                self.fail(result.test_results.errors[0][1])
            if result.test_results.failures:
                self.fail(result.test_results.failures[0][1])

    ###
    # Rebalance
    ###
    def _rebalance_cluster(self, data_set):
        if self.helper.num_nodes_to_add >= 1:
            rebalance = self.cluster.async_rebalance(
                self.servers[:1],
                self.servers[1:self.helper.num_nodes_to_add + 1], [])
            self._query_test_init(data_set)
            rebalance.result()

        elif self.helper.num_nodes_to_remove >= 1:
            rebalance = self.cluster.async_rebalance(
                self.servers[:1], [],
                self.servers[1:self.helper.num_nodes_to_add + 1])
            self._query_test_init(data_set)
            rebalance.result()

    def _failover_cluster(self, data_set):
        failover_nodes = self.servers[1:self.helper.failover_factor + 1]
        try:
            # failover and verify loaded data
            #self.cluster.failover(self.servers, failover_nodes)
            self.cluster.failover(self.servers, self.servers[1:2])
            self.log.info(
                "120 seconds sleep after failover before invoking rebalance..."
            )
            time.sleep(120)
            rebalance = self.cluster.async_rebalance(self.servers, [],
                                                     self.servers[1:2])

            self._query_test_init(data_set)

            msg = "rebalance failed while removing failover nodes {0}".format(
                failover_nodes)
            self.assertTrue(rebalance.result(), msg=msg)

            #verify queries after failover
            self._query_test_init(data_set)
        finally:
            self.log.info(
                "Completed the failover testing for spatial querying")

    ###
    # Warmup
    ###
    def _warmup_cluster(self, data_set):
        for server in self.servers[0:self.helper.num_nodes_warmup]:
            remote = RemoteMachineShellConnection(server)
            remote.stop_server()
            remote.start_server()
            remote.disconnect()
            self.log.info("Node {0} should be warming up ".format(server.ip))
            time.sleep(120)
        self._query_test_init(data_set)

    # REBOOT
    def _reboot_cluster(self, data_set):
        try:
            for server in self.servers[0:self.helper.num_nodes_reboot]:
                shell = RemoteMachineShellConnection(server)
                if shell.extract_remote_info().type.lower() == 'windows':
                    o, r = shell.execute_command("shutdown -r -f -t 0")
                    shell.log_command_output(o, r)
                    shell.disconnect()
                    self.log.info("Node {0} is being stopped".format(
                        server.ip))
                elif shell.extract_remote_info().type.lower() == 'linux':
                    o, r = shell.execute_command("reboot")
                    shell.log_command_output(o, r)
                    shell.disconnect()
                    self.log.info("Node {0} is being stopped".format(
                        server.ip))

                    time.sleep(120)
                    shell = RemoteMachineShellConnection(server)
                    command = "/sbin/iptables -F"
                    o, r = shell.execute_command(command)
                    shell.log_command_output(o, r)
                    shell.disconnect()
                    self.log.info("Node {0} backup".format(server.ip))
        finally:
            self.log.info("Warming-up server ..".format(server.ip))
            time.sleep(100)
示例#38
0
class SpatialViewTests(BaseTestCase):
    def setUp(self):
        self.helper = SpatialHelper(self, "default")
        super(SpatialViewTests, self).setUp()
        self.log = logger.Logger.get_logger()

        self.helper.setup_cluster()

    def suite_setUp(self):
        pass

    def tearDown(self):
        super(SpatialViewTests, self).tearDown()

    def suite_tearDown(self):
        pass

    def test_create_x_design_docs(self):
        num_design_docs = self.helper.input.param("num-design-docs")
        self.log.info("description : create {0} spatial views without "
                      "running any spatial view query".format(num_design_docs))

        fun = "function (doc) {emit(doc.geometry, doc);}"
        self._insert_x_design_docs(num_design_docs, fun)

    def test_update_x_design_docs(self):
        num_design_docs = self.helper.input.param("num-design-docs")
        self.log.info("description : update {0} spatial views without "
                      "running any spatial view query".format(num_design_docs))

        fun = "function (doc) {emit(doc.geometry, doc);}"
        self._insert_x_design_docs(num_design_docs, fun)

        # Update the design docs with a different function
        fun = "function (doc) {emit(doc.geometry, null);}"
        self._insert_x_design_docs(num_design_docs, fun)

    def _insert_x_design_docs(self, num_design_docs, fun):
        rest = self.helper.rest
        bucket = self.helper.bucket
        name = "dev_test_multiple_design_docs"

        for i in range(0, num_design_docs):
            design_name = "{0}-{1}".format(name, i)
            self.helper.create_index_fun(design_name, fun)

            # Verify that the function was really stored
            response, meta = rest.get_spatial(bucket, design_name)
            self.assertTrue(response)
            self.assertEqual(meta["id"], "_design/{0}".format(design_name))
            self.assertEqual(response["spatial"][design_name], fun)

    def test_insert_x_docs(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : create a spatial view on {0} documents"\
                          .format(num_docs))
        design_name = "dev_test_insert_{0}_docs".format(num_docs)
        self._insert_x_docs_and_query(num_docs, design_name)

    # Does verify the full docs and not only the keys
    def test_insert_x_docs_full_verification(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : create a spatial view with {0} docs"
                      " and verify the full documents".format(num_docs))
        design_name = "dev_test_insert_{0}_docs_full_verification"\
            .format(num_docs)

        self.helper.create_index_fun(design_name)
        inserted_docs = self.helper.insert_docs(num_docs, return_docs=True)
        self.helper.query_index_for_verification(design_name,
                                                 inserted_docs,
                                                 full_docs=True)

    def test_insert_x_delete_y_docs(self):
        num_docs = self.helper.input.param("num-docs")
        num_deleted_docs = self.helper.input.param("num-deleted-docs")
        self.log.info("description : create spatial view with {0} docs "
                      " and delete {1} docs".format(num_docs,
                                                    num_deleted_docs))
        design_name = "dev_test_insert_{0}_delete_{1}_docs"\
            .format(num_docs, num_deleted_docs)

        inserted_keys = self._setup_index(design_name, num_docs)

        # Delete documents and verify that the documents got deleted
        deleted_keys = self.helper.delete_docs(num_deleted_docs)
        num_expected = num_docs - len(deleted_keys)
        results = self.helper.get_results(design_name,
                                          2 * num_docs,
                                          num_expected=num_expected)
        result_keys = self.helper.get_keys(results)
        self.assertEqual(len(result_keys), num_expected)
        self.helper.verify_result(inserted_keys, deleted_keys + result_keys)

    def test_insert_x_update_y_docs(self):
        num_docs = self.helper.input.param("num-docs")
        num_updated_docs = self.helper.input.param("num-updated-docs")
        self.log.info("description : create spatial view with {0} docs "
                      " and update {1} docs".format(num_docs,
                                                    num_updated_docs))
        design_name = "dev_test_insert_{0}_delete_{1}_docs"\
            .format(num_docs, num_updated_docs)

        self._setup_index(design_name, num_docs)

        # Update documents and verify that the documents got updated
        updated_keys = self.helper.insert_docs(num_updated_docs,
                                               extra_values=dict(updated=True))
        results = self.helper.get_results(design_name, 2 * num_docs)
        result_updated_keys = self._get_updated_docs_keys(results)
        self.assertEqual(len(updated_keys), len(result_updated_keys))
        self.helper.verify_result(updated_keys, result_updated_keys)

    def test_get_spatial_during_x_min_load_y_working_set(self):
        num_docs = self.helper.input.param("num-docs")
        duration = self.helper.input.param("load-time")
        self.log.info("description : this test will continuously insert data "
                      "and get the spatial view results for {0} minutes")
        design_name = "dev_test_insert_and_get_spatial_{0}_mins"\
            .format(duration)

        self._query_x_mins_during_loading(num_docs, duration, design_name)

    def _query_x_mins_during_loading(self, num_docs, duration, design_name):
        self.helper.create_index_fun(design_name)

        load_thread = InsertDataTillStopped(self.helper, num_docs)
        load_thread.start()

        self._get_results_for_x_minutes(design_name, duration)

        load_thread.stop_insertion()
        load_thread.join()

        self.helper.query_index_for_verification(design_name,
                                                 load_thread.inserted())

    def test_get_spatial_during_x_min_load_y_working_set_multiple_design_docs(
            self):
        num_docs = self.helper.input.param("num-docs")
        num_design_docs = self.helper.input.param("num-design-docs")
        duration = self.helper.input.param("load-time")
        self.log.info("description : will create {0} docs per design doc and "
                      "{1} design docs that will be queried while the data "
                      "is loaded for {2} minutes".format(
                          num_docs, num_design_docs, duration))
        name = "dev_test_spatial_test_{0}_docs_{1}_design_docs_{2}_mins_load"\
            .format(num_docs, num_design_docs, duration)

        view_test_threads = []
        for i in range(0, num_design_docs):
            design_name = "{0}-{1}".format(name, i)
            thread_result = []
            t = Thread(
                target=SpatialViewTests.
                _test_multiple_design_docs_thread_wrapper,
                name=
                "Insert documents and query multiple design docs in parallel",
                args=(self, num_docs, duration, design_name, thread_result))
            t.start()
            view_test_threads.append((t, thread_result))
        for (t, failures) in view_test_threads:
            t.join()
        for (t, failures) in view_test_threads:
            if len(failures) > 0:
                self.fail("view thread failed : {0}".format(failures[0]))

    def _test_multiple_design_docs_thread_wrapper(self, num_docs, duration,
                                                  design_name, failures):
        try:
            self._query_x_mins_during_loading(num_docs, duration, design_name)
        except Exception as ex:
            failures.append(ex)

    def test_spatial_view_on_x_docs_y_design_docs(self):
        num_docs = self.helper.input.param("num-docs")
        num_design_docs = self.helper.input.param("num-design-docs")
        self.log.info("description : will create {0} docs per design doc and "
                      "{1} design docs that will be queried")
        name = "dev_test_spatial_test_{0}_docs_y_design_docs"\
            .format(num_docs, num_design_docs)

        design_names = ["{0}-{1}".format(name, i) \
                            for i in range(0, num_design_docs)]

        view_test_threads = []
        for design_name in design_names:
            thread_result = []
            t = Thread(
                target=SpatialViewTests._test_spatial_view_thread_wrapper,
                name="Insert documents and query in parallel",
                args=(self, num_docs, design_name, thread_result))
            t.start()
            view_test_threads.append((t, thread_result))
        for (t, failures) in view_test_threads:
            t.join()
        for (t, failures) in view_test_threads:
            if len(failures) > 0:
                self.fail("view thread failed : {0}".format(failures[0]))

    def _test_spatial_view_thread_wrapper(self, num_docs, design_name,
                                          failures):
        try:
            self._insert_x_docs_and_query(num_docs, design_name)
        except Exception as ex:
            failures.append(ex)

    # Create the index and insert documents including verififaction that
    # the index contains them
    # Returns the keys of the inserted documents
    def _setup_index(self, design_name, num_docs):
        self.helper.create_index_fun(design_name)
        inserted_keys = self.helper.insert_docs(num_docs)
        self.helper.query_index_for_verification(design_name, inserted_keys)

        return inserted_keys

    # Return the keys for all docs that contain a key called "updated"
    # in the value
    def _get_updated_docs_keys(self, results):
        keys = []
        if results:
            rows = results["rows"]
            for row in rows:
                if "updated" in row["value"]:
                    keys.append(row["id"])
            self.log.info("{0} documents to updated".format(len(keys)))
        return keys

    def _get_results_for_x_minutes(self, design_name, duration, delay=5):
        random.seed(0)
        start = time.time()
        while (time.time() - start) < duration * 60:
            limit = random.randint(1, 1000)
            self.log.info("{0} seconds has passed ....".format(
                (time.time() - start)))
            results = self.helper.get_results(design_name, limit)
            keys = self.helper.get_keys(results)
            self.log.info("spatial view returned {0} rows".format(len(keys)))
            time.sleep(delay)

    def _insert_x_docs_and_query(self, num_docs, design_name):
        inserted_keys = self._setup_index(design_name, num_docs)
        self.assertEqual(len(inserted_keys), num_docs)

    def test_update_view_x_docs(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : create a spatial view on {0} documents "
                      "and update the view so that it returns only a subset"\
                          .format(num_docs))
        design_name = "dev_test_update_view_{0}_docs".format(num_docs)

        # Create an index that emits all documents
        self.helper.create_index_fun(design_name)
        keys_b = self.helper.insert_docs(num_docs // 3, "bbb")
        keys_c = self.helper.insert_docs(num_docs - (num_docs // 3), "ccc")
        self.helper.query_index_for_verification(design_name, keys_b + keys_c)

        # Update index to only a subset of the documents
        spatial_fun = ('function (doc, meta) {'
                       'if(meta.id.indexOf("ccc") != -1) {'
                       'emit(doc.geometry, doc);}}')
        self.helper.create_index_fun(design_name, spatial_fun)
        self.helper.query_index_for_verification(design_name, keys_c)

    def test_compare_views_all_nodes_x_docs(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : creates view on {0} documents, queries "
                      "all nodes (not only the master node) and compares "
                      "if the results are all the same"\
                          .format(num_docs))
        design_name = "dev_test_compare_views_{0}_docs".format(num_docs)

        inserted_keys = self._setup_index(design_name, num_docs)

        nodes = self.helper.rest.get_nodes()
        params = {"connection_timeout": 60000, "full_set": True}

        # Query every single node and verify
        for n in nodes:
            n_rest = RestConnection({
                "ip":
                n.ip,
                "port":
                n.port,
                "username":
                self.helper.master.rest_username,
                "password":
                self.helper.master.rest_password
            })
            results = n_rest.spatial_results(self.helper.bucket, design_name,
                                             params, None)
            result_keys = self.helper.get_keys(results)
            self.helper.verify_result(inserted_keys, result_keys)
示例#39
0
class SpatialQueryTests(unittest.TestCase):
    def setUp(self):
        self.log = logger.Logger.get_logger()
        self.helper = SpatialHelper(self, "default")
        self.helper.setup_cluster()
        self.cluster = Cluster()
        self.servers = self.helper.servers

    def tearDown(self):
        self.helper.cleanup_cluster()

    def test_simple_dataset_limit_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Make limit queries on a simple "
                      "dataset with {0} docs".format(num_docs))

        data_set = SimpleDataSet(self.helper, num_docs)
        data_set.add_limit_queries()
        self._query_test_init(data_set)

    def test_simple_dataset_skip_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Make skip (and limit) queries on a "
                      "simple dataset with {0} docs".format(num_docs))

        data_set = SimpleDataSet(self.helper, num_docs)
        data_set.add_skip_queries()
        self._query_test_init(data_set)

    def test_simple_dataset_bbox_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Make bounding box queries on a simple "
                      "dataset with {0} docs".format(num_docs))

        data_set = SimpleDataSet(self.helper, num_docs)
        data_set.add_bbox_queries()
        self._query_test_init(data_set)

    def test_simple_dataset_range_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Make range queries on a simple "
                      "dataset with {0} docs".format(num_docs))

        data_set = SimpleDataSet(self.helper, num_docs)
        data_set.add_range_queries()
        self._query_test_init(data_set)

    def test_multidim_dataset_limit_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Make limit queries on a multidimensional "
                      "dataset with {0} docs".format(num_docs))

        data_set = MultidimDataSet(self.helper, num_docs)
        data_set.add_limit_queries()
        self._query_test_init(data_set)

    def test_multidim_dataset_skip_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Make skip (and limit) queries on a "
                      "multidimensional dataset with {0} docs".format(num_docs))

        data_set = MultidimDataSet(self.helper, num_docs)
        data_set.add_skip_queries()
        self._query_test_init(data_set)

    def test_multidim_dataset_range_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Make range queries on a "
                      "multidimensional with {0} docs".format(num_docs))

        data_set = MultidimDataSet(self.helper, num_docs)
        data_set.add_range_queries()
        self._query_test_init(data_set)

    def test_multidim_dataset_range_and_limit_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Make range queries with limits on a "
                      "multidimensional with {0} docs".format(num_docs))

        data_set = MultidimDataSet(self.helper, num_docs)
        data_set.add_range_and_limit_queries()
        self._query_test_init(data_set)

## Rebalance In
    def test_rebalance_in_simple_dataset_limit_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Rebalance In and limit queries on a simple "
                      "dataset with {0} docs".format(num_docs))

        data_set = SimpleDataSet(self.helper, num_docs)
        data_set.add_limit_queries()
        self._rebalance_cluster(data_set)

    def test_rebalance_in_simple_dataset_skip_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Rebalance In and skip (and limit) queries on a "
                      "simple dataset with {0} docs".format(num_docs))

        data_set = SimpleDataSet(self.helper, num_docs)
        data_set.add_skip_queries()
        self._rebalance_cluster(data_set)

    def test_rebalance_in_simple_dataset_bbox_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Rebalance In and bounding box queries on a simple "
                      "dataset with {0} docs".format(num_docs))

        data_set = SimpleDataSet(self.helper, num_docs)
        data_set.add_bbox_queries()
        self._rebalance_cluster(data_set)

    def test_rebalance_in_simple_dataset_range_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Rebalance In and range queries on a simple "
                      "dataset with {0} docs".format(num_docs))

        data_set = SimpleDataSet(self.helper, num_docs)
        data_set.add_range_queries()
        self._rebalance_cluster(data_set)

    def test_rebalance_in_multidim_dataset_limit_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Rebalance In and limit queries on a multidimensional "
                      "dataset with {0} docs".format(num_docs))

        data_set = MultidimDataSet(self.helper, num_docs)
        data_set.add_limit_queries()
        self._rebalance_cluster(data_set)

    def test_rebalance_in_multidim_dataset_skip_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Rebalance In and skip (and limit) queries on a "
                      "multidimensional dataset with {0} docs".format(num_docs))

        data_set = MultidimDataSet(self.helper, num_docs)
        data_set.add_skip_queries()
        self._rebalance_cluster(data_set)

    def test_rebalance_in_multidim_dataset_range_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Rebalance In and range queries on a "
                      "multidimensional with {0} docs".format(num_docs))

        data_set = MultidimDataSet(self.helper, num_docs)
        data_set.add_range_queries()
        self._rebalance_cluster(data_set)

    def test_rebalance_in_multidim_dataset_range_and_limit_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Rebalance In and range queries with limits on a "
                      "multidimensional with {0} docs".format(num_docs))

        data_set = MultidimDataSet(self.helper, num_docs)
        data_set.add_range_and_limit_queries()
        self._rebalance_cluster(data_set)

#Rebalance Out
    def test_rebalance_out_simple_dataset_limit_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Rebalance Out and  limit queries on a simple "
                      "dataset with {0} docs".format(num_docs))

        data_set = SimpleDataSet(self.helper, num_docs)
        data_set.add_limit_queries()
        self._rebalance_cluster(data_set)

    def test_rebalance_out_simple_dataset_skip_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Rebalance Out and skip (and limit) queries on a "
                      "simple dataset with {0} docs".format(num_docs))

        data_set = SimpleDataSet(self.helper, num_docs)
        data_set.add_skip_queries()
        self._rebalance_cluster(data_set)

    def test_rebalance_out_simple_dataset_bbox_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Rebalance Out and bounding box queries on a simple "
                      "dataset with {0} docs".format(num_docs))

        data_set = SimpleDataSet(self.helper, num_docs)
        data_set.add_bbox_queries()
        self._rebalance_cluster(data_set)

    def test_rebalance_out_simple_dataset_range_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Rebalance Out and range queries on a simple "
                      "dataset with {0} docs".format(num_docs))

        data_set = SimpleDataSet(self.helper, num_docs)
        data_set.add_range_queries()
        self._rebalance_cluster(data_set)

    def test_rebalance_out_multidim_dataset_limit_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Rebalance Out and limit queries on a multidimensional "
                      "dataset with {0} docs".format(num_docs))

        data_set = MultidimDataSet(self.helper, num_docs)
        data_set.add_limit_queries()
        self._rebalance_cluster(data_set)

    def test_rebalance_out_multidim_dataset_skip_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Rebalance Out and skip (and limit) queries on a "
                      "multidimensional dataset with {0} docs".format(num_docs))

        data_set = MultidimDataSet(self.helper, num_docs)
        data_set.add_skip_queries()
        self._rebalance_cluster(data_set)

    def test_rebalance_out_multidim_dataset_range_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Rebalance Out and range queries on a "
                      "multidimensional with {0} docs".format(num_docs))

        data_set = MultidimDataSet(self.helper, num_docs)
        data_set.add_range_queries()
        self._rebalance_cluster(data_set)

    def test_rebalance_out_multidim_dataset_range_and_limit_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Rebalance Out and range queries with limits on a "
                      "multidimensional with {0} docs".format(num_docs))

        data_set = MultidimDataSet(self.helper, num_docs)
        data_set.add_range_and_limit_queries()
        self._rebalance_cluster(data_set)

# Warmup Tests

    def test_warmup_simple_dataset_limit_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Warmup with skip and limit queries on a simple "
                      "dataset with {0} docs".format(num_docs))

        data_set = SimpleDataSet(self.helper, num_docs)
        data_set.add_limit_queries()
        self._query_test_init_integration(data_set)

    def test_warmup_simple_dataset_skip_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Warmup with  skip (and limit) queries on a "
                      "simple dataset with {0} docs".format(num_docs))

        data_set = SimpleDataSet(self.helper, num_docs)
        data_set.add_skip_queries()
        self._query_test_init_integration(data_set)

    def test_warmup_simple_dataset_bbox_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Warmup with  bounding box queries on a simple "
                      "dataset with {0} docs".format(num_docs))

        data_set = SimpleDataSet(self.helper, num_docs)
        data_set.add_bbox_queries()
        self._query_test_init_integration(data_set)

    def test_warmup_simple_dataset_range_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Warmup with  range queries on a simple "
                      "dataset with {0} docs".format(num_docs))

        data_set = SimpleDataSet(self.helper, num_docs)
        data_set.add_range_queries()
        self._query_test_init_integration(data_set)

    def test_warmup_multidim_dataset_limit_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Warmup with  limit queries on a multidimensional "
                      "dataset with {0} docs".format(num_docs))

        data_set = MultidimDataSet(self.helper, num_docs)
        data_set.add_limit_queries()
        self._query_test_init_integration(data_set)

    def test_warmup_multidim_dataset_skip_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Warmup with  skip (and limit) queries on a "
                      "multidimensional dataset with {0} docs".format(num_docs))

        data_set = MultidimDataSet(self.helper, num_docs)
        data_set.add_skip_queries()
        self._query_test_init_integration(data_set)

    def test_warmup_multidim_dataset_range_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Warmup with  range queries on a "
                      "multidimensional with {0} docs".format(num_docs))

        data_set = MultidimDataSet(self.helper, num_docs)
        data_set.add_range_queries()
        self._query_test_init_integration(data_set)

    def test_warmup_multidim_dataset_range_and_limit_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Warmup with  range queries with limits on a "
                      "multidimensional with {0} docs".format(num_docs))

        data_set = MultidimDataSet(self.helper, num_docs)
        data_set.add_range_and_limit_queries()
        self._query_test_init_integration(data_set)


# Reboot Tests
    def test_reboot_simple_dataset_limit_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Reboot and limit queries on a simple "
                      "dataset with {0} docs".format(num_docs))

        data_set = SimpleDataSet(self.helper, num_docs)
        data_set.add_limit_queries()
        self._query_test_init_integration(data_set)

    def test_reboot_simple_dataset_skip_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Reboot with  skip (and limit) queries on a "
                      "simple dataset with {0} docs".format(num_docs))

        data_set = SimpleDataSet(self.helper, num_docs)
        data_set.add_skip_queries()
        self._query_test_init_integration(data_set)

    def test_reboot_simple_dataset_bbox_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Reboot with  bounding box queries on a simple "
                      "dataset with {0} docs".format(num_docs))

        data_set = SimpleDataSet(self.helper, num_docs)
        data_set.add_bbox_queries()
        self._query_test_init_integration(data_set)

    def test_reboot_simple_dataset_range_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Reboot with  range queries on a simple "
                      "dataset with {0} docs".format(num_docs))

        data_set = SimpleDataSet(self.helper, num_docs)
        data_set.add_range_queries()
        self._query_test_init_integration(data_set)

    def test_reboot_multidim_dataset_limit_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Reboot with  limit queries on a multidimensional "
                      "dataset with {0} docs".format(num_docs))

        data_set = MultidimDataSet(self.helper, num_docs)
        data_set.add_limit_queries()
        self._query_test_init_integration(data_set)

    def test_reboot_multidim_dataset_skip_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Reboot with  skip (and limit) queries on a "
                      "multidimensional dataset with {0} docs".format(num_docs))

        data_set = MultidimDataSet(self.helper, num_docs)
        data_set.add_skip_queries()
        self._query_test_init_integration(data_set)

    def test_reboot_multidim_dataset_range_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Reboot with  range queries on a "
                      "multidimensional with {0} docs".format(num_docs))

        data_set = MultidimDataSet(self.helper, num_docs)
        data_set.add_range_queries()
        self._query_test_init_integration(data_set)

    def test_reboot_multidim_dataset_range_and_limit_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Reboot with  range queries with limits on a "
                      "multidimensional with {0} docs".format(num_docs))

        data_set = MultidimDataSet(self.helper, num_docs)
        data_set.add_range_and_limit_queries()
        self._query_test_init_integration(data_set)

# Failover Tests
    def test_failover_simple_dataset_limit_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Failover and limit queries on a simple "
                      "dataset with {0} docs".format(num_docs))

        data_set = SimpleDataSet(self.helper, num_docs)
        data_set.add_limit_queries()
        self._failover_cluster(data_set)

    def test_failover_simple_dataset_skip_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Rebalance Out and skip (and limit) queries on a "
                      "simple dataset with {0} docs".format(num_docs))

        data_set = SimpleDataSet(self.helper, num_docs)
        data_set.add_skip_queries()
        self._failover_cluster(data_set)

    def test_failover_simple_dataset_bbox_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Rebalance Out and bounding box queries on a simple "
                      "dataset with {0} docs".format(num_docs))

        data_set = SimpleDataSet(self.helper, num_docs)
        data_set.add_bbox_queries()
        self._failover_cluster(data_set)

    def test_failover_simple_dataset_range_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Rebalance Out and range queries on a simple "
                      "dataset with {0} docs".format(num_docs))

        data_set = SimpleDataSet(self.helper, num_docs)
        data_set.add_range_queries()
        self._failover_cluster(data_set)

    def test_failover_multidim_dataset_limit_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Rebalance Out and limit queries on a multidimensional "
                      "dataset with {0} docs".format(num_docs))

        data_set = MultidimDataSet(self.helper, num_docs)
        data_set.add_limit_queries()
        self._failover_cluster(data_set)

    def test_failover_multidim_dataset_skip_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Rebalance Out and skip (and limit) queries on a "
                      "multidimensional dataset with {0} docs".format(num_docs))

        data_set = MultidimDataSet(self.helper, num_docs)
        data_set.add_skip_queries()
        self._failover_cluster(data_set)

    def test_failover_multidim_dataset_range_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Rebalance Out and range queries on a "
                      "multidimensional with {0} docs".format(num_docs))

        data_set = MultidimDataSet(self.helper, num_docs)
        data_set.add_range_queries()
        self._failover_cluster(data_set)

    def test_failover_multidim_dataset_range_and_limit_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Rebalance Out and range queries with limits on a "
                      "multidimensional with {0} docs".format(num_docs))

        data_set = MultidimDataSet(self.helper, num_docs)
        data_set.add_range_and_limit_queries()
        self._failover_cluster(data_set)

    ###
    # load the data defined for this dataset.
    # create views and query the data as it loads.
    # verification is optional, and best practice is to
    # set to False if you plan on running _query_all_views()
    # later in the test case
    ###
    def _query_test_init(self, data_set, verify_results = True):
        views = data_set.views

        # start loading data
        t = Thread(target=data_set.load,
                   name="load_data_set",
                   args=())
        t.start()

        # run queries while loading data
        while(t.is_alive()):
            self._query_all_views(views, False)
            time.sleep(5)
        t.join()

        # results will be verified if verify_results set
        if verify_results:
            self._query_all_views(views, verify_results)
        else:
            self._check_view_intergrity(views)

    def _query_test_init_integration(self, data_set, verify_results = True):
        views = data_set.views
        inserted_keys = data_set.load()
        target_fn = ()

        if self.helper.num_nodes_reboot >= 1:
            target_fn = self._reboot_cluster(data_set)
        elif self.helper.num_nodes_warmup >= 1:
            target_fn = self._warmup_cluster(data_set)
        elif self.helper.num_nodes_to_add >= 1 or self.helper.num_nodes_to_remove >= 1:
            target_fn = self._rebalance_cluster(data_set)

        t = Thread(target=self._query_all_views(views, False))
        t.start()
        # run queries while loading data
        while t.is_alive():
            self._rebalance_cluster(data_set)
            time.sleep(5)
        t.join()

        # results will be verified if verify_results set
        if verify_results:
            self._query_all_views(views, verify_results)
        else:
            self._check_view_intergrity(views)

    ##
    # run all queries for all views in parallel
    ##
    def _query_all_views(self, views, verify_results = True):
        query_threads = []
        for view in views:
            t = RunQueriesThread(view, verify_results)
            query_threads.append(t)
            t.start()

        [t.join() for t in query_threads]

        self._check_view_intergrity(query_threads)

    ##
    # If an error occured loading or querying data for a view
    # it is queued and checked here. Fail on the first one that
    # occurs.
    ##
    def _check_view_intergrity(self, thread_results):
        for result in thread_results:
            if result.test_results.errors:
                self.fail(result.test_results.errors[0][1])
            if result.test_results.failures:
                self.fail(result.test_results.failures[0][1])

    ###
    # Rebalance
    ###
    def _rebalance_cluster(self, data_set):
        if self.helper.num_nodes_to_add >= 1:
            rebalance = self.cluster.async_rebalance(self.servers[:1],
                self.servers[1:self.helper.num_nodes_to_add + 1],
                [])
            self._query_test_init(data_set)
            rebalance.result()

        elif self.helper.num_nodes_to_remove >= 1:
            rebalance = self.cluster.async_rebalance(self.servers[:1],[],
                self.servers[1:self.helper.num_nodes_to_add + 1])
            self._query_test_init(data_set)
            rebalance.result()

    def _failover_cluster(self, data_set):
        failover_nodes = self.servers[1 : self.helper.failover_factor + 1]
        try:
            # failover and verify loaded data
            #self.cluster.failover(self.servers, failover_nodes)
            self.cluster.failover(self.servers, self.servers[1:2])
            self.log.info("120 seconds sleep after failover before invoking rebalance...")
            time.sleep(120)
            rebalance = self.cluster.async_rebalance(self.servers,
                [], self.servers[1:2])

            self._query_test_init(data_set)

            msg = "rebalance failed while removing failover nodes {0}".format(failover_nodes)
            self.assertTrue(rebalance.result(), msg=msg)

            #verify queries after failover
            self._query_test_init(data_set)
        finally:
            self.log.info("Completed the failover testing for spatial querying")

    ###
    # Warmup
    ###
    def _warmup_cluster(self, data_set):
        for server in self.servers[0:self.helper.num_nodes_warmup]:
            remote = RemoteMachineShellConnection(server)
            remote.stop_server()
            remote.start_server()
            remote.disconnect()
            self.log.info("Node {0} should be warming up ".format(server.ip))
            time.sleep(120)
        self._query_test_init(data_set)

    # REBOOT
    def _reboot_cluster(self, data_set):
        try:
            for server in self.servers[0:self.helper.num_nodes_reboot]:
                shell = RemoteMachineShellConnection(server)
                if shell.extract_remote_info().type.lower() == 'windows':
                    o, r = shell.execute_command("shutdown -r -f -t 0")
                    shell.log_command_output(o, r)
                    shell.disconnect()
                    self.log.info("Node {0} is being stopped".format(server.ip))
                elif shell.extract_remote_info().type.lower() == 'linux':
                    o, r = shell.execute_command("reboot")
                    shell.log_command_output(o, r)
                    shell.disconnect()
                    self.log.info("Node {0} is being stopped".format(server.ip))

                    time.sleep(120)
                    shell = RemoteMachineShellConnection(server)
                    command = "/sbin/iptables -F"
                    o, r = shell.execute_command(command)
                    shell.log_command_output(o, r)
                    shell.disconnect()
                    self.log.info("Node {0} backup".format(server.ip))
        finally:
            self.log.info("Warming-up server ..".format(server.ip))
            time.sleep(100)
示例#40
0
class SpatialInfoTests(unittest.TestCase):
    def setUp(self):
        self.log = logger.Logger.get_logger()
        self.helper = SpatialHelper(self, "default")
        self.helper.setup_cluster()


    def tearDown(self):
        self.helper.cleanup_cluster()


    def test_spatial_info(self):
        self.log.info(
            "description : test info for spatial indexes")
        rest = self.helper.rest
        prefix = str(uuid.uuid4())[:7]
        design_name = "dev_test_spatial_info"

        self.helper.create_index_fun(design_name, prefix)

        # Fill the database and add an index
        self.helper.insert_docs(2000, prefix)
        self.helper.get_results(design_name)
        status, info = self.helper.info(design_name)
        disk_size = info["spatial_index"]["disk_size"]

        self.assertTrue(disk_size > 0)
        self.assertEqual(info["name"], design_name)

        num_vbuckets = len(rest.get_vbuckets(self.helper.bucket))
        self.assertEqual(len(info["spatial_index"]["update_seq"]),
                         num_vbuckets)
        self.assertEqual(len(info["spatial_index"]["purge_seq"]),
                         num_vbuckets)
        self.assertFalse(info["spatial_index"]["updater_running"])
        self.assertFalse(info["spatial_index"]["waiting_clients"] > 0)
        self.assertFalse(info["spatial_index"]["compact_running"])

        # Insert a lot new documents, and return after starting to
        # build up (not waiting until it's done) the index to test
        # if the updater fields are set correctly
        self.helper.insert_docs(50000, prefix)
        self.helper.get_results(design_name,
                                extra_params={"stale": "update_after"})
        # Somehow stale=update_after doesn't really return immediately,
        # thus commenting this assertion out. There's no real reason
        # to investigate, as the indexing changes heavily in the moment
        # anyway
        #self.assertTrue(info["spatial_index"]["updater_running"])
        #self.assertTrue(info["spatial_index"]["waiting_commit"])
        #self.assertTrue(info["spatial_index"]["waiting_clients"] > 0)
        self.assertFalse(info["spatial_index"]["compact_running"])

        # Request the index again, to make sure it is fully updated
        self.helper.get_results(design_name)
        status, info = self.helper.info(design_name)
        self.assertFalse(info["spatial_index"]["updater_running"])
        self.assertFalse(info["spatial_index"]["waiting_clients"] > 0)
        self.assertFalse(info["spatial_index"]["compact_running"])
        self.assertTrue(info["spatial_index"]["disk_size"] > disk_size)
示例#41
0
class SpatialQueryErrorsTests(BaseTestCase):
    def setUp(self):
        try:
            if 'first_case' not in TestInputSingleton.input.test_params:
                TestInputSingleton.input.test_params['default_bucket'] = False
                TestInputSingleton.input.test_params['skip_cleanup'] = True
            self.default_bucket_name = 'default'
            super(SpatialQueryErrorsTests, self).setUp()
            if 'first_case' in TestInputSingleton.input.test_params:
                self.cluster.rebalance(self.servers[:], self.servers[1:], [])
            # We use only one bucket in this test suite
            self.rest = RestConnection(self.master)
            self.bucket = self.rest.get_bucket(
                Bucket(name=self.default_bucket_name))
            # num_docs must be a multiple of the number of vbuckets
            self.num_docs = self.input.param("num_docs", 2000)
            # `testname` is used for the design document name as wel as the
            # spatial function name
            self.testname = 'query-errors'
            self.helper = SpatialHelper(self, "default")
            if 'first_case' in TestInputSingleton.input.test_params:
                self.create_ddoc()
                self.helper.insert_docs(self.num_docs, self.testname)
        except Exception as ex:
            self.input.test_params["stop-on-failure"] = True
            self.log.error("SETUP WAS FAILED. ALL TESTS WILL BE SKIPPED")
            self.fail(ex)

    def tearDown(self):
        # clean up will only performed on the last run
        if 'last_case' in TestInputSingleton.input.test_params:
            TestInputSingleton.input.test_params['skip_cleanup'] = False
            super(SpatialQueryErrorsTests, self).tearDown()
        else:
            self.cluster.shutdown(force=True)
            self._log_finish(self)

    def test_query_errors(self):
        all_params = [
            'skip', 'limit', 'stale', 'bbox', 'start_range', 'end_range'
        ]
        query_params = {}
        for key in self.input.test_params:
            if key in all_params:
                query_params[key] = str(self.input.test_params[key])

        try:
            self.spatial_query(query_params)
        except QueryViewException as ex:
            self.assertEquals(self.input.test_params['error'],
                              json.loads(ex.reason)['error'])
        else:
            self.fail("Query did not fail, but should have. "
                      "Query parameters were: {0}".format(query_params))

    def create_ddoc(self):
        view_fn = '''function (doc) {
    if (doc.age !== undefined || doc.height !== undefined ||
            doc.bloom !== undefined || doc.shed_leaves !== undefined) {
        emit([doc.age, doc.height, [doc.bloom, doc.shed_leaves]], doc.name);
    }}'''
        self.helper.create_index_fun(self.testname, view_fn)

    def spatial_query(self, params={}, ddoc='test'):
        bucket = self.default_bucket_name
        if not 'stale' in params:
            params['stale'] = 'false'
        return self.rest.query_view(self.testname,
                                    self.testname,
                                    bucket,
                                    params,
                                    type="spatial")
示例#42
0
 def setUp(self):
     self.log = logger.Logger.get_logger()
     self.helper = SpatialHelper(self, "default")
     self.helper.setup_cluster()
示例#43
0
class SpatialViewsTests(BaseTestCase):
    def setUp(self):
        super(SpatialViewsTests, self).setUp()
        self.skip_rebalance = self.input.param("skip_rebalance", False)
        self.use_dev_views = self.input.param("use-dev-views", False)
        self.default_map = "function (doc) {emit(doc.geometry, doc.age);}"
        self.default_ddoc_name = self.input.param("default_ddoc_name",
                                                  "test-ddoc")
        self.default_view_name = self.input.param("default_view_name",
                                                  "test-view")
        self.bucket_name = "default"
        if self.standard_buckets:
            self.bucket_name = "standard_bucket0"
        if self.sasl_buckets:
            self.bucket_name = "bucket0"
        self.helper = SpatialHelper(self, self.bucket_name)
        if not self.skip_rebalance:
            self.cluster.rebalance(self.servers[:], self.servers[1:], [])
        #load some items to verify
        self.docs = self.helper.insert_docs(self.num_items,
                                            'spatial-doc',
                                            wait_for_persistence=True,
                                            return_docs=True)

    def tearDown(self):
        super(SpatialViewsTests, self).tearDown()

    def test_add_spatial_views(self):
        num_ddoc = self.input.param('num-ddoc', 1)
        views_per_ddoc = self.input.param('views-per-ddoc', 1)
        non_spatial_views_per_ddoc = self.input.param(
            'non-spatial-views-per-ddoc', 0)
        ddocs = self.make_ddocs(num_ddoc, views_per_ddoc,
                                non_spatial_views_per_ddoc)
        self.create_ddocs(ddocs)

    def test_add_spatial_views_case_sensative(self):
        ddoc = DesignDocument(self.default_ddoc_name, [],
                              spatial_views=[
                                  View(self.default_view_name,
                                       self.default_map,
                                       dev_view=self.use_dev_views,
                                       is_spatial=True),
                                  View(self.default_view_name.upper(),
                                       self.default_map,
                                       dev_view=self.use_dev_views,
                                       is_spatial=True)
                              ])
        self.create_ddocs([ddoc])

    def make_ddocs(self, ddocs_num, views_per_ddoc,
                   non_spatial_views_per_ddoc):
        ddocs = []
        for i in xrange(ddocs_num):
            views = []
            for k in xrange(views_per_ddoc):
                views.append(
                    View(self.default_view_name + str(k),
                         self.default_map,
                         dev_view=self.use_dev_views,
                         is_spatial=True))
            non_spatial_views = []
            if non_spatial_views_per_ddoc:
                for k in xrange(non_spatial_views_per_ddoc):
                    views.append(
                        View(self.default_view_name + str(k),
                             'function (doc) { emit(null, doc);}',
                             dev_view=self.use_dev_views))
            ddocs.append(
                DesignDocument(self.default_ddoc_name + str(i),
                               non_spatial_views,
                               spatial_views=views))
        return ddocs

    def create_ddocs(self, ddocs):
        for ddoc in ddocs:
            if not (ddoc.views or ddoc.spatial_views):
                self.cluster.create_view(self.master,
                                         ddoc.name, [],
                                         bucket=self.bucket_name)
            for view in ddoc.views:
                self.cluster.create_view(self.master,
                                         ddoc.name,
                                         view,
                                         bucket=self.bucket_name)
            for view in ddoc.spatial_views:
                self.cluster.create_view(self.master,
                                         ddoc.name,
                                         view,
                                         bucket=self.bucket_name)