class SpatialInfoTests(unittest.TestCase): def setUp(self): self.log = logger.Logger.get_logger() self.helper = SpatialHelper(self, "default") self.helper.setup_cluster() def tearDown(self): self.helper.cleanup_cluster() def test_spatial_info(self): self.log.info( "description : test info for spatial indexes") rest = self.helper.rest prefix = str(uuid.uuid4())[:7] design_name = "dev_test_spatial_info" self.helper.create_index_fun(design_name, prefix) # Fill the database and add an index self.helper.insert_docs(2000, prefix) self.helper.get_results(design_name) status, info = self.helper.info(design_name) disk_size = info["spatial_index"]["disk_size"] self.assertTrue(disk_size > 0) self.assertEqual(info["name"], design_name) num_vbuckets = len(rest.get_vbuckets(self.helper.bucket)) self.assertEqual(len(info["spatial_index"]["update_seq"]), num_vbuckets) self.assertEqual(len(info["spatial_index"]["purge_seq"]), num_vbuckets) self.assertFalse(info["spatial_index"]["updater_running"]) self.assertFalse(info["spatial_index"]["waiting_clients"] > 0) self.assertFalse(info["spatial_index"]["compact_running"]) # Insert a lot new documents, and return after starting to # build up (not waiting until it's done) the index to test # if the updater fields are set correctly self.helper.insert_docs(50000, prefix) self.helper.get_results(design_name, extra_params={"stale": "update_after"}) # Somehow stale=update_after doesn't really return immediately, # thus commenting this assertion out. There's no real reason # to investigate, as the indexing changes heavily in the moment # anyway #self.assertTrue(info["spatial_index"]["updater_running"]) #self.assertTrue(info["spatial_index"]["waiting_commit"]) #self.assertTrue(info["spatial_index"]["waiting_clients"] > 0) self.assertFalse(info["spatial_index"]["compact_running"]) # Request the index again, to make sure it is fully updated self.helper.get_results(design_name) status, info = self.helper.info(design_name) self.assertFalse(info["spatial_index"]["updater_running"]) self.assertFalse(info["spatial_index"]["waiting_clients"] > 0) self.assertFalse(info["spatial_index"]["compact_running"]) self.assertTrue(info["spatial_index"]["disk_size"] > disk_size)
class SpatialCompactionTests(BaseTestCase): def setUp(self): super(SpatialCompactionTests, self).setUp() self.start_cluster = self.input.param('start-cluster', len(self.servers)) self.servers_in = self.input.param('servers_in', 0) self.servers_out = self.input.param('servers_out', 0) self.bucket_name = "default" if self.standard_buckets: self.bucket_name = "standard_bucket0" if self.sasl_buckets: self.bucket_name = "bucket0" self.helper = SpatialHelper(self, self.bucket_name) if self.start_cluster > 1: rebalance = self.cluster.async_rebalance(self.servers[:1], self.servers[1:start_cluster], []) rebalance.result() def tearDown(self): super(SpatialCompactionTests, self).tearDown() def test_spatial_compaction(self): self.log.info( "description : test manual compaction for spatial indexes") prefix = str(uuid.uuid4())[:7] design_name = "dev_test_spatial_compaction" self.helper.create_index_fun(design_name, prefix) # Insert (resp. update, as they have the same prefix) and query # the spatial index several time so that the compaction makes sense for i in range(0, 8): self.helper.insert_docs(2000, prefix) self.helper.get_results(design_name) # Get the index size prior to compaction status, info = self.helper.info(design_name) disk_size = info["spatial_index"]["disk_size"] if self.servers_in or self.servers_out: servs_in = servs_out = [] if self.servers_in: servs_in = self.servers[self.start_cluster:self.servers_in + 1] if self.servers_out: servs_out = self.servers[-self.servers_out:] rebalance = self.cluster.async_rebalance(self.servers, servs_in, servs_out) # Do the compaction self.helper.compact(design_name) # Check if the index size got smaller status, info = self.helper.info(design_name) self.assertTrue(info["spatial_index"]["disk_size"] < disk_size, "The file size ({0}) isn't smaller than the " "pre compaction size ({1})." .format(info["spatial_index"]["disk_size"], disk_size)) if self.servers_in or self.servers_out: rebalance.result()
class SpatialInfoTests(unittest.TestCase): def setUp(self): self.log = logger.Logger.get_logger() self.helper = SpatialHelper(self, "default") self.helper.setup_cluster() def tearDown(self): self.helper.cleanup_cluster() def test_spatial_info(self): self.log.info("description : test info for spatial indexes") rest = self.helper.rest prefix = str(uuid.uuid4())[:7] design_name = "dev_test_spatial_info" self.helper.create_index_fun(design_name, prefix) # Fill the database and add an index self.helper.insert_docs(2000, prefix) self.helper.get_results(design_name) status, info = self.helper.info(design_name) disk_size = info["spatial_index"]["disk_size"] self.assertTrue(disk_size > 0) self.assertEqual(info["name"], design_name) num_vbuckets = len(rest.get_vbuckets(self.helper.bucket)) self.assertEqual(len(info["spatial_index"]["update_seq"]), num_vbuckets) self.assertEqual(len(info["spatial_index"]["purge_seq"]), num_vbuckets) self.assertFalse(info["spatial_index"]["updater_running"]) self.assertFalse(info["spatial_index"]["waiting_clients"] > 0) self.assertFalse(info["spatial_index"]["compact_running"]) # Insert a lot new documents, and return after starting to # build up (not waiting until it's done) the index to test # if the updater fields are set correctly self.helper.insert_docs(50000, prefix) self.helper.get_results(design_name, extra_params={"stale": "update_after"}) # Somehow stale=update_after doesn't really return immediately, # thus commenting this assertion out. There's no real reason # to investigate, as the indexing changes heavily in the moment # anyway #self.assertTrue(info["spatial_index"]["updater_running"]) #self.assertTrue(info["spatial_index"]["waiting_commit"]) #self.assertTrue(info["spatial_index"]["waiting_clients"] > 0) self.assertFalse(info["spatial_index"]["compact_running"]) # Request the index again, to make sure it is fully updated self.helper.get_results(design_name) status, info = self.helper.info(design_name) self.assertFalse(info["spatial_index"]["updater_running"]) self.assertFalse(info["spatial_index"]["waiting_clients"] > 0) self.assertFalse(info["spatial_index"]["compact_running"]) self.assertTrue(info["spatial_index"]["disk_size"] > disk_size)
class SpatialCompactionTests(unittest.TestCase): def setUp(self): self.log = logger.Logger.get_logger() self.helper = SpatialHelper(self, "default") self.helper.setup_cluster() def tearDown(self): self.helper.cleanup_cluster() def test_spatial_compaction(self): self.log.info( "description : test manual compaction for spatial indexes") rest = self.helper.rest prefix = str(uuid.uuid4())[:7] design_name = "dev_test_spatial_compaction" self.helper.create_index_fun(design_name, prefix) # Insert (resp. update, as they have the same prefix) and query # the spatial index several time so that the compaction makes sense for i in range(0, 8): doc_names = self.helper.insert_docs(2000, prefix) self.helper.get_results(design_name) # Get the index size prior to compaction status, info = self.helper.info(design_name) disk_size = info["spatial_index"]["disk_size"] # Do the compaction self.helper.compact(design_name) # Check if the index size got smaller status, info = self.helper.info(design_name) self.assertTrue(info["spatial_index"]["disk_size"] < disk_size, "The file size ({0}) isn't smaller than the " "pre compaction size ({1})." .format(info["spatial_index"]["disk_size"], disk_size))
class SpatialCompactionTests(unittest.TestCase): def setUp(self): self.log = logger.Logger.get_logger() self.helper = SpatialHelper(self, "default") self.helper.setup_cluster() def tearDown(self): self.helper.cleanup_cluster() def test_spatial_compaction(self): self.log.info( "description : test manual compaction for spatial indexes") prefix = str(uuid.uuid4())[:7] design_name = "dev_test_spatial_compaction" self.helper.create_index_fun(design_name, prefix) # Insert (resp. update, as they have the same prefix) and query # the spatial index several time so that the compaction makes sense for i in range(0, 8): self.helper.insert_docs(2000, prefix) self.helper.get_results(design_name) # Get the index size prior to compaction status, info = self.helper.info(design_name) disk_size = info["spatial_index"]["disk_size"] # Do the compaction self.helper.compact(design_name) # Check if the index size got smaller status, info = self.helper.info(design_name) self.assertTrue( info["spatial_index"]["disk_size"] < disk_size, "The file size ({0}) isn't smaller than the " "pre compaction size ({1}).".format( info["spatial_index"]["disk_size"], disk_size))
class SpatialCompactionTests(BaseTestCase): def setUp(self): super(SpatialCompactionTests, self).setUp() self.start_cluster = self.input.param('start-cluster', len(self.servers)) self.servers_in = self.input.param('servers_in', 0) self.servers_out = self.input.param('servers_out', 0) self.bucket_name = "default" if self.standard_buckets: self.bucket_name = "standard_bucket0" if self.sasl_buckets: self.bucket_name = "bucket0" self.helper = SpatialHelper(self, self.bucket_name) try: if self.start_cluster > 1: rebalance = self.cluster.async_rebalance(self.servers[:1], self.servers[1:self.start_cluster], []) rebalance.result() except: super(SpatialCompactionTests, self).tearDown() def tearDown(self): super(SpatialCompactionTests, self).tearDown() def test_spatial_compaction(self): self.log.info( "description : test manual compaction for spatial indexes") prefix = str(uuid.uuid4())[:7] design_name = "dev_test_spatial_compaction" self.helper.create_index_fun(design_name, prefix) # Insert (resp. update, as they have the same prefix) and query # the spatial index several time so that the compaction makes sense for i in range(0, 8): self.helper.insert_docs(2000, prefix) self.helper.get_results(design_name) # Get the index size prior to compaction status, info = self.helper.info(design_name) disk_size = info["spatial_index"]["disk_size"] if self.servers_in or self.servers_out: servs_in = servs_out = [] if self.servers_in: servs_in = self.servers[self.start_cluster:self.servers_in + 1] if self.servers_out: servs_out = self.servers[-self.servers_out:] rebalance = self.cluster.async_rebalance(self.servers, servs_in, servs_out) # Do the compaction self.helper.compact(design_name) # Check if the index size got smaller status, info = self.helper.info(design_name) self.assertTrue(info["spatial_index"]["disk_size"] < disk_size, "The file size ({0}) isn't smaller than the " "pre compaction size ({1})." .format(info["spatial_index"]["disk_size"], disk_size)) if self.servers_in or self.servers_out: rebalance.result()