class SpatialInfoTests(unittest.TestCase): def setUp(self): self.log = logger.Logger.get_logger() self.helper = SpatialHelper(self, "default") self.helper.setup_cluster() def tearDown(self): self.helper.cleanup_cluster() def test_spatial_info(self): self.log.info( "description : test info for spatial indexes") rest = self.helper.rest prefix = str(uuid.uuid4())[:7] design_name = "dev_test_spatial_info" self.helper.create_index_fun(design_name, prefix) # Fill the database and add an index self.helper.insert_docs(2000, prefix) self.helper.get_results(design_name) status, info = self.helper.info(design_name) disk_size = info["spatial_index"]["disk_size"] self.assertTrue(disk_size > 0) self.assertEqual(info["name"], design_name) num_vbuckets = len(rest.get_vbuckets(self.helper.bucket)) self.assertEqual(len(info["spatial_index"]["update_seq"]), num_vbuckets) self.assertEqual(len(info["spatial_index"]["purge_seq"]), num_vbuckets) self.assertFalse(info["spatial_index"]["updater_running"]) self.assertFalse(info["spatial_index"]["waiting_clients"] > 0) self.assertFalse(info["spatial_index"]["compact_running"]) # Insert a lot new documents, and return after starting to # build up (not waiting until it's done) the index to test # if the updater fields are set correctly self.helper.insert_docs(50000, prefix) self.helper.get_results(design_name, extra_params={"stale": "update_after"}) # Somehow stale=update_after doesn't really return immediately, # thus commenting this assertion out. There's no real reason # to investigate, as the indexing changes heavily in the moment # anyway #self.assertTrue(info["spatial_index"]["updater_running"]) #self.assertTrue(info["spatial_index"]["waiting_commit"]) #self.assertTrue(info["spatial_index"]["waiting_clients"] > 0) self.assertFalse(info["spatial_index"]["compact_running"]) # Request the index again, to make sure it is fully updated self.helper.get_results(design_name) status, info = self.helper.info(design_name) self.assertFalse(info["spatial_index"]["updater_running"]) self.assertFalse(info["spatial_index"]["waiting_clients"] > 0) self.assertFalse(info["spatial_index"]["compact_running"]) self.assertTrue(info["spatial_index"]["disk_size"] > disk_size)
class SpatialCompactionTests(BaseTestCase): def setUp(self): super(SpatialCompactionTests, self).setUp() self.start_cluster = self.input.param('start-cluster', len(self.servers)) self.servers_in = self.input.param('servers_in', 0) self.servers_out = self.input.param('servers_out', 0) self.bucket_name = "default" if self.standard_buckets: self.bucket_name = "standard_bucket0" if self.sasl_buckets: self.bucket_name = "bucket0" self.helper = SpatialHelper(self, self.bucket_name) if self.start_cluster > 1: rebalance = self.cluster.async_rebalance(self.servers[:1], self.servers[1:start_cluster], []) rebalance.result() def tearDown(self): super(SpatialCompactionTests, self).tearDown() def test_spatial_compaction(self): self.log.info( "description : test manual compaction for spatial indexes") prefix = str(uuid.uuid4())[:7] design_name = "dev_test_spatial_compaction" self.helper.create_index_fun(design_name, prefix) # Insert (resp. update, as they have the same prefix) and query # the spatial index several time so that the compaction makes sense for i in range(0, 8): self.helper.insert_docs(2000, prefix) self.helper.get_results(design_name) # Get the index size prior to compaction status, info = self.helper.info(design_name) disk_size = info["spatial_index"]["disk_size"] if self.servers_in or self.servers_out: servs_in = servs_out = [] if self.servers_in: servs_in = self.servers[self.start_cluster:self.servers_in + 1] if self.servers_out: servs_out = self.servers[-self.servers_out:] rebalance = self.cluster.async_rebalance(self.servers, servs_in, servs_out) # Do the compaction self.helper.compact(design_name) # Check if the index size got smaller status, info = self.helper.info(design_name) self.assertTrue(info["spatial_index"]["disk_size"] < disk_size, "The file size ({0}) isn't smaller than the " "pre compaction size ({1})." .format(info["spatial_index"]["disk_size"], disk_size)) if self.servers_in or self.servers_out: rebalance.result()
class SpatialInfoTests(unittest.TestCase): def setUp(self): self.log = logger.Logger.get_logger() self.helper = SpatialHelper(self, "default") self.helper.setup_cluster() def tearDown(self): self.helper.cleanup_cluster() def test_spatial_info(self): self.log.info("description : test info for spatial indexes") rest = self.helper.rest prefix = str(uuid.uuid4())[:7] design_name = "dev_test_spatial_info" self.helper.create_index_fun(design_name, prefix) # Fill the database and add an index self.helper.insert_docs(2000, prefix) self.helper.get_results(design_name) status, info = self.helper.info(design_name) disk_size = info["spatial_index"]["disk_size"] self.assertTrue(disk_size > 0) self.assertEqual(info["name"], design_name) num_vbuckets = len(rest.get_vbuckets(self.helper.bucket)) self.assertEqual(len(info["spatial_index"]["update_seq"]), num_vbuckets) self.assertEqual(len(info["spatial_index"]["purge_seq"]), num_vbuckets) self.assertFalse(info["spatial_index"]["updater_running"]) self.assertFalse(info["spatial_index"]["waiting_clients"] > 0) self.assertFalse(info["spatial_index"]["compact_running"]) # Insert a lot new documents, and return after starting to # build up (not waiting until it's done) the index to test # if the updater fields are set correctly self.helper.insert_docs(50000, prefix) self.helper.get_results(design_name, extra_params={"stale": "update_after"}) # Somehow stale=update_after doesn't really return immediately, # thus commenting this assertion out. There's no real reason # to investigate, as the indexing changes heavily in the moment # anyway #self.assertTrue(info["spatial_index"]["updater_running"]) #self.assertTrue(info["spatial_index"]["waiting_commit"]) #self.assertTrue(info["spatial_index"]["waiting_clients"] > 0) self.assertFalse(info["spatial_index"]["compact_running"]) # Request the index again, to make sure it is fully updated self.helper.get_results(design_name) status, info = self.helper.info(design_name) self.assertFalse(info["spatial_index"]["updater_running"]) self.assertFalse(info["spatial_index"]["waiting_clients"] > 0) self.assertFalse(info["spatial_index"]["compact_running"]) self.assertTrue(info["spatial_index"]["disk_size"] > disk_size)
class SpatialCompactionTests(unittest.TestCase): def setUp(self): self.log = logger.Logger.get_logger() self.helper = SpatialHelper(self, "default") self.helper.setup_cluster() def tearDown(self): self.helper.cleanup_cluster() def test_spatial_compaction(self): self.log.info( "description : test manual compaction for spatial indexes") rest = self.helper.rest prefix = str(uuid.uuid4())[:7] design_name = "dev_test_spatial_compaction" self.helper.create_index_fun(design_name, prefix) # Insert (resp. update, as they have the same prefix) and query # the spatial index several time so that the compaction makes sense for i in range(0, 8): doc_names = self.helper.insert_docs(2000, prefix) self.helper.get_results(design_name) # Get the index size prior to compaction status, info = self.helper.info(design_name) disk_size = info["spatial_index"]["disk_size"] # Do the compaction self.helper.compact(design_name) # Check if the index size got smaller status, info = self.helper.info(design_name) self.assertTrue(info["spatial_index"]["disk_size"] < disk_size, "The file size ({0}) isn't smaller than the " "pre compaction size ({1})." .format(info["spatial_index"]["disk_size"], disk_size))
class SpatialCompactionTests(unittest.TestCase): def setUp(self): self.log = logger.Logger.get_logger() self.helper = SpatialHelper(self, "default") self.helper.setup_cluster() def tearDown(self): self.helper.cleanup_cluster() def test_spatial_compaction(self): self.log.info( "description : test manual compaction for spatial indexes") prefix = str(uuid.uuid4())[:7] design_name = "dev_test_spatial_compaction" self.helper.create_index_fun(design_name, prefix) # Insert (resp. update, as they have the same prefix) and query # the spatial index several time so that the compaction makes sense for i in range(0, 8): self.helper.insert_docs(2000, prefix) self.helper.get_results(design_name) # Get the index size prior to compaction status, info = self.helper.info(design_name) disk_size = info["spatial_index"]["disk_size"] # Do the compaction self.helper.compact(design_name) # Check if the index size got smaller status, info = self.helper.info(design_name) self.assertTrue( info["spatial_index"]["disk_size"] < disk_size, "The file size ({0}) isn't smaller than the " "pre compaction size ({1}).".format( info["spatial_index"]["disk_size"], disk_size))
class SpatialViewTests(BaseTestCase): def setUp(self): self.helper = SpatialHelper(self, "default") super(SpatialViewTests, self).setUp() self.log = logger.Logger.get_logger() self.helper.setup_cluster() def suite_setUp(self): pass def tearDown(self): super(SpatialViewTests, self).tearDown() def suite_tearDown(self): pass def test_create_x_design_docs(self): num_design_docs = self.helper.input.param("num-design-docs") self.log.info("description : create {0} spatial views without " "running any spatial view query".format(num_design_docs)) fun = "function (doc) {emit(doc.geometry, doc);}" self._insert_x_design_docs(num_design_docs, fun) def test_update_x_design_docs(self): num_design_docs = self.helper.input.param("num-design-docs") self.log.info("description : update {0} spatial views without " "running any spatial view query".format(num_design_docs)) fun = "function (doc) {emit(doc.geometry, doc);}" self._insert_x_design_docs(num_design_docs, fun) # Update the design docs with a different function fun = "function (doc) {emit(doc.geometry, null);}" self._insert_x_design_docs(num_design_docs, fun) def _insert_x_design_docs(self, num_design_docs, fun): rest = self.helper.rest bucket = self.helper.bucket name = "dev_test_multiple_design_docs" for i in range(0, num_design_docs): design_name = "{0}-{1}".format(name, i) self.helper.create_index_fun(design_name, fun) # Verify that the function was really stored response, meta = rest.get_spatial(bucket, design_name) self.assertTrue(response) self.assertEqual(meta["id"], "_design/{0}".format(design_name)) self.assertEqual(response["spatial"][design_name], fun) def test_insert_x_docs(self): num_docs = self.helper.input.param("num-docs") self.log.info("description : create a spatial view on {0} documents"\ .format(num_docs)) design_name = "dev_test_insert_{0}_docs".format(num_docs) self._insert_x_docs_and_query(num_docs, design_name) # Does verify the full docs and not only the keys def test_insert_x_docs_full_verification(self): num_docs = self.helper.input.param("num-docs") self.log.info("description : create a spatial view with {0} docs" " and verify the full documents".format(num_docs)) design_name = "dev_test_insert_{0}_docs_full_verification"\ .format(num_docs) self.helper.create_index_fun(design_name) inserted_docs = self.helper.insert_docs(num_docs, return_docs=True) self.helper.query_index_for_verification(design_name, inserted_docs, full_docs=True) def test_insert_x_delete_y_docs(self): num_docs = self.helper.input.param("num-docs") num_deleted_docs = self.helper.input.param("num-deleted-docs") self.log.info("description : create spatial view with {0} docs " " and delete {1} docs".format(num_docs, num_deleted_docs)) design_name = "dev_test_insert_{0}_delete_{1}_docs"\ .format(num_docs, num_deleted_docs) inserted_keys = self._setup_index(design_name, num_docs) # Delete documents and verify that the documents got deleted deleted_keys = self.helper.delete_docs(num_deleted_docs) num_expected = num_docs - len(deleted_keys) results = self.helper.get_results(design_name, 2 * num_docs, num_expected=num_expected) result_keys = self.helper.get_keys(results) self.assertEqual(len(result_keys), num_expected) self.helper.verify_result(inserted_keys, deleted_keys + result_keys) def test_insert_x_update_y_docs(self): num_docs = self.helper.input.param("num-docs") num_updated_docs = self.helper.input.param("num-updated-docs") self.log.info("description : create spatial view with {0} docs " " and update {1} docs".format(num_docs, num_updated_docs)) design_name = "dev_test_insert_{0}_delete_{1}_docs"\ .format(num_docs, num_updated_docs) self._setup_index(design_name, num_docs) # Update documents and verify that the documents got updated updated_keys = self.helper.insert_docs(num_updated_docs, extra_values=dict(updated=True)) results = self.helper.get_results(design_name, 2 * num_docs) result_updated_keys = self._get_updated_docs_keys(results) self.assertEqual(len(updated_keys), len(result_updated_keys)) self.helper.verify_result(updated_keys, result_updated_keys) def test_get_spatial_during_x_min_load_y_working_set(self): num_docs = self.helper.input.param("num-docs") duration = self.helper.input.param("load-time") self.log.info("description : this test will continuously insert data " "and get the spatial view results for {0} minutes") design_name = "dev_test_insert_and_get_spatial_{0}_mins"\ .format(duration) self._query_x_mins_during_loading(num_docs, duration, design_name) def _query_x_mins_during_loading(self, num_docs, duration, design_name): self.helper.create_index_fun(design_name) load_thread = InsertDataTillStopped(self.helper, num_docs) load_thread.start() self._get_results_for_x_minutes(design_name, duration) load_thread.stop_insertion() load_thread.join() self.helper.query_index_for_verification(design_name, load_thread.inserted()) def test_get_spatial_during_x_min_load_y_working_set_multiple_design_docs( self): num_docs = self.helper.input.param("num-docs") num_design_docs = self.helper.input.param("num-design-docs") duration = self.helper.input.param("load-time") self.log.info("description : will create {0} docs per design doc and " "{1} design docs that will be queried while the data " "is loaded for {2} minutes".format( num_docs, num_design_docs, duration)) name = "dev_test_spatial_test_{0}_docs_{1}_design_docs_{2}_mins_load"\ .format(num_docs, num_design_docs, duration) view_test_threads = [] for i in range(0, num_design_docs): design_name = "{0}-{1}".format(name, i) thread_result = [] t = Thread( target=SpatialViewTests. _test_multiple_design_docs_thread_wrapper, name= "Insert documents and query multiple design docs in parallel", args=(self, num_docs, duration, design_name, thread_result)) t.start() view_test_threads.append((t, thread_result)) for (t, failures) in view_test_threads: t.join() for (t, failures) in view_test_threads: if len(failures) > 0: self.fail("view thread failed : {0}".format(failures[0])) def _test_multiple_design_docs_thread_wrapper(self, num_docs, duration, design_name, failures): try: self._query_x_mins_during_loading(num_docs, duration, design_name) except Exception as ex: failures.append(ex) def test_spatial_view_on_x_docs_y_design_docs(self): num_docs = self.helper.input.param("num-docs") num_design_docs = self.helper.input.param("num-design-docs") self.log.info("description : will create {0} docs per design doc and " "{1} design docs that will be queried") name = "dev_test_spatial_test_{0}_docs_y_design_docs"\ .format(num_docs, num_design_docs) design_names = ["{0}-{1}".format(name, i) \ for i in range(0, num_design_docs)] view_test_threads = [] for design_name in design_names: thread_result = [] t = Thread( target=SpatialViewTests._test_spatial_view_thread_wrapper, name="Insert documents and query in parallel", args=(self, num_docs, design_name, thread_result)) t.start() view_test_threads.append((t, thread_result)) for (t, failures) in view_test_threads: t.join() for (t, failures) in view_test_threads: if len(failures) > 0: self.fail("view thread failed : {0}".format(failures[0])) def _test_spatial_view_thread_wrapper(self, num_docs, design_name, failures): try: self._insert_x_docs_and_query(num_docs, design_name) except Exception as ex: failures.append(ex) # Create the index and insert documents including verififaction that # the index contains them # Returns the keys of the inserted documents def _setup_index(self, design_name, num_docs): self.helper.create_index_fun(design_name) inserted_keys = self.helper.insert_docs(num_docs) self.helper.query_index_for_verification(design_name, inserted_keys) return inserted_keys # Return the keys for all docs that contain a key called "updated" # in the value def _get_updated_docs_keys(self, results): keys = [] if results: rows = results["rows"] for row in rows: if "updated" in row["value"]: keys.append(row["id"]) self.log.info("{0} documents to updated".format(len(keys))) return keys def _get_results_for_x_minutes(self, design_name, duration, delay=5): random.seed(0) start = time.time() while (time.time() - start) < duration * 60: limit = random.randint(1, 1000) self.log.info("{0} seconds has passed ....".format( (time.time() - start))) results = self.helper.get_results(design_name, limit) keys = self.helper.get_keys(results) self.log.info("spatial view returned {0} rows".format(len(keys))) time.sleep(delay) def _insert_x_docs_and_query(self, num_docs, design_name): inserted_keys = self._setup_index(design_name, num_docs) self.assertEqual(len(inserted_keys), num_docs) def test_update_view_x_docs(self): num_docs = self.helper.input.param("num-docs") self.log.info("description : create a spatial view on {0} documents " "and update the view so that it returns only a subset"\ .format(num_docs)) design_name = "dev_test_update_view_{0}_docs".format(num_docs) # Create an index that emits all documents self.helper.create_index_fun(design_name) keys_b = self.helper.insert_docs(num_docs // 3, "bbb") keys_c = self.helper.insert_docs(num_docs - (num_docs // 3), "ccc") self.helper.query_index_for_verification(design_name, keys_b + keys_c) # Update index to only a subset of the documents spatial_fun = ('function (doc, meta) {' 'if(meta.id.indexOf("ccc") != -1) {' 'emit(doc.geometry, doc);}}') self.helper.create_index_fun(design_name, spatial_fun) self.helper.query_index_for_verification(design_name, keys_c) def test_compare_views_all_nodes_x_docs(self): num_docs = self.helper.input.param("num-docs") self.log.info("description : creates view on {0} documents, queries " "all nodes (not only the master node) and compares " "if the results are all the same"\ .format(num_docs)) design_name = "dev_test_compare_views_{0}_docs".format(num_docs) inserted_keys = self._setup_index(design_name, num_docs) nodes = self.helper.rest.get_nodes() params = {"connection_timeout": 60000, "full_set": True} # Query every single node and verify for n in nodes: n_rest = RestConnection({ "ip": n.ip, "port": n.port, "username": self.helper.master.rest_username, "password": self.helper.master.rest_password }) results = n_rest.spatial_results(self.helper.bucket, design_name, params, None) result_keys = self.helper.get_keys(results) self.helper.verify_result(inserted_keys, result_keys)
class SpatialViewTests(unittest.TestCase): def setUp(self): self.log = logger.Logger.get_logger() self.helper = SpatialHelper(self, "default") self.helper.setup_cluster() def tearDown(self): self.helper.cleanup_cluster() def test_create_x_design_docs(self): num_design_docs = self.helper.input.param("num-design-docs", 5) self.log.info("description : create {0} spatial views without " "running any spatial view query".format(num_design_docs)) prefix = str(uuid.uuid4()) fun = "function (doc) {emit(doc.geometry, doc);}" self._insert_x_design_docs(num_design_docs, prefix, fun) def test_update_x_design_docs(self): num_design_docs = self.helper.input.param("num-design-docs", 5) self.log.info("description : update {0} spatial views without " "running any spatial view query".format(num_design_docs)) prefix = str(uuid.uuid4()) fun = "function (doc) {emit(doc.geometry, doc);}" self._insert_x_design_docs(num_design_docs, prefix, fun) # Update the design docs with a different function fun = "function (doc) {emit(doc.geometry, null);}" self._insert_x_design_docs(num_design_docs, prefix, fun) def _insert_x_design_docs(self, num_design_docs, prefix, fun): rest = self.helper.rest bucket = self.helper.bucket name = "dev_test_multiple_design_docs" for i in range(0, num_design_docs): design_name = "{0}-{1}-{2}".format(name, i, prefix) self.helper.create_index_fun(design_name, prefix, fun) # Verify that the function was really stored response, meta = rest.get_spatial(bucket, design_name) self.assertTrue(response) self.assertEquals(meta["id"], "_design/{0}".format(design_name)) self.assertEquals( response["spatial"][design_name].encode("ascii", "ignore"), fun) def test_insert_x_docs(self): num_docs = self.helper.input.param("num-docs", 100) self.log.info("description : create a spatial view on {0} documents"\ .format(num_docs)) design_name = "dev_test_insert_{0}_docs".format(num_docs) self._insert_x_docs_and_query(num_docs, design_name) # Does verify the full docs and not only the keys def test_insert_x_docs_full_verification(self): num_docs = self.helper.input.param("num-docs", 100) self.log.info("description : create a spatial view with {0} docs" " and verify the full documents".format(num_docs)) design_name = "dev_test_insert_{0}_docs_full_verification"\ .format(num_docs) prefix = str(uuid.uuid4())[:7] self.helper.create_index_fun(design_name, prefix) inserted_docs = self.helper.insert_docs(num_docs, prefix, return_docs=True) self.helper.query_index_for_verification(design_name, inserted_docs, full_docs=True) def test_insert_x_delete_y_docs(self): num_docs = self.helper.input.param("num-docs", 15000) num_deleted_docs = self.helper.input.param("num-deleted-docs", 10000) self.log.info("description : create spatial view with {0} docs " " and delete {1} docs".format(num_docs, num_deleted_docs)) design_name = "dev_test_insert_{0}_delete_{1}_docs"\ .format(num_docs, num_deleted_docs) prefix = str(uuid.uuid4())[:7] inserted_keys = self._setup_index(design_name, num_docs, prefix) # Delete documents and verify that the documents got deleted deleted_keys = self.helper.delete_docs(num_deleted_docs, prefix) num_expected = num_docs - len(deleted_keys) results = self.helper.get_results(design_name, 2 * num_docs, num_expected=num_expected) result_keys = self.helper.get_keys(results) self.assertEqual(len(result_keys), num_expected) self.helper.verify_result(inserted_keys, deleted_keys + result_keys) def test_insert_x_update_y_docs(self): num_docs = self.helper.input.param("num-docs", 15000) num_updated_docs = self.helper.input.param("num-updated-docs", 100) self.log.info("description : create spatial view with {0} docs " " and update {1} docs".format(num_docs, num_updated_docs)) design_name = "dev_test_insert_{0}_delete_{1}_docs"\ .format(num_docs, num_updated_docs) prefix = str(uuid.uuid4())[:7] self._setup_index(design_name, num_docs, prefix) # Update documents and verify that the documents got updated updated_keys = self.helper.insert_docs(num_updated_docs, prefix, dict(updated=True)) results = self.helper.get_results(design_name, 2 * num_docs) result_updated_keys = self._get_updated_docs_keys(results) self.assertEqual(len(updated_keys), len(result_updated_keys)) self.helper.verify_result(updated_keys, result_updated_keys) def test_get_spatial_during_x_min_load_y_working_set(self): num_docs = self.helper.input.param("num-docs", 10000) duration = self.helper.input.param("load-time", 1) self.log.info("description : this test will continuously insert data " "and get the spatial view results for {0} minutes") design_name = "dev_test_insert_and_get_spatial_{0}_mins"\ .format(duration) prefix = str(uuid.uuid4())[:7] self._query_x_mins_during_loading(num_docs, duration, design_name, prefix) def _query_x_mins_during_loading(self, num_docs, duration, design_name, prefix): self.helper.create_index_fun(design_name, prefix) load_thread = InsertDataTillStopped(self.helper, num_docs, prefix) load_thread.start() self._get_results_for_x_minutes(design_name, duration) load_thread.stop_insertion() load_thread.join() self.helper.query_index_for_verification(design_name, load_thread.inserted()) def test_get_spatial_during_x_min_load_y_working_set_multiple_design_docs( self): num_docs = self.helper.input.param("num-docs", 10000) num_design_docs = self.helper.input.param("num-design-docs", 10) duration = self.helper.input.param("load-time", 1) self.log.info("description : will create {0} docs per design doc and " "{1} design docs that will be queried while the data " "is loaded for {2} minutes" .format(num_docs, num_design_docs, duration)) name = "dev_test_spatial_test_{0}_docs_{1}_design_docs_{2}_mins_load"\ .format(num_docs, num_design_docs, duration) view_test_threads = [] for i in range(0, num_design_docs): prefix = str(uuid.uuid4())[:7] design_name = "{0}-{1}-{2}".format(name, i, prefix) thread_result = [] t = Thread( target=SpatialViewTests._test_multiple_design_docs_thread_wrapper, name="Insert documents and query multiple design docs in parallel", args=(self, num_docs, duration, design_name, prefix, thread_result)) t.start() view_test_threads.append((t, thread_result)) for (t, failures) in view_test_threads: t.join() for (t, failures) in view_test_threads: if len(failures) > 0: self.fail("view thread failed : {0}".format(failures[0])) def _test_multiple_design_docs_thread_wrapper(self, num_docs, duration, design_name, prefix, failures): try: self._query_x_mins_during_loading(num_docs, duration, design_name, prefix) except Exception as ex: failures.append(ex) def test_spatial_view_on_x_docs_y_design_docs(self): num_docs = self.helper.input.param("num-docs", 10000) num_design_docs = self.helper.input.param("num-design-docs", 21) self.log.info("description : will create {0} docs per design doc and " "{1} design docs that will be queried") name = "dev_test_spatial_test_{0}_docs_y_design_docs"\ .format(num_docs, num_design_docs) prefix = str(uuid.uuid4())[:7] design_names = ["{0}-{1}-{2}".format(name, i, prefix) \ for i in range(0, num_design_docs)] view_test_threads = [] for design_name in design_names: thread_result = [] t = Thread( target=SpatialViewTests._test_spatial_view_thread_wrapper, name="Insert documents and query in parallel", args=(self, num_docs, design_name, thread_result)) t.start() view_test_threads.append((t, thread_result)) for (t, failures) in view_test_threads: t.join() for (t, failures) in view_test_threads: if len(failures) > 0: self.fail("view thread failed : {0}".format(failures[0])) def _test_spatial_view_thread_wrapper(self, num_docs, design_name, failures): try: self._insert_x_docs_and_query(num_docs, design_name) except Exception as ex: failures.append(ex) # Create the index and insert documents including verififaction that # the index contains them # Returns the keys of the inserted documents def _setup_index(self, design_name, num_docs, prefix): self.helper.create_index_fun(design_name, prefix) inserted_keys = self.helper.insert_docs(num_docs, prefix) self.helper.query_index_for_verification(design_name, inserted_keys) return inserted_keys # Return the keys for all docs that contain a key called "updated" # in the value def _get_updated_docs_keys(self, results): keys = [] if results: rows = results["rows"] for row in rows: if "updated" in row["value"]: keys.append(row["id"].encode("ascii", "ignore")) self.log.info("{0} documents to updated".format(len(keys))) return keys def _get_results_for_x_minutes(self, design_name, duration, delay=5): random.seed(0) start = time.time() while (time.time() - start) < duration * 60: limit = random.randint(1, 1000) self.log.info("{0} seconds has passed ....".format( (time.time() - start))) results = self.helper.get_results(design_name, limit) keys = self.helper.get_keys(results) self.log.info("spatial view returned {0} rows".format(len(keys))) time.sleep(delay) def _insert_x_docs_and_query(self, num_docs, design_name): prefix = str(uuid.uuid4())[:7] inserted_keys = self._setup_index(design_name, num_docs, prefix) self.assertEqual(len(inserted_keys), num_docs) def test_x_docs_failover(self): num_docs = self.helper.input.param("num-docs", 10000) self.log.info("description : test failover with {0} documents"\ .format(num_docs)) design_name = "dev_test_failover_{0}".format(num_docs) prefix = str(uuid.uuid4())[:7] inserted_keys = self._setup_index(design_name, num_docs, prefix) try: fh = FailoverHelper(self.helper.servers, self) failover_nodes = fh.failover(1) self.helper.query_index_for_verification(design_name, inserted_keys, wait_for_persistence=False) # The test cleanup expects all nodes running, hence spin the # full cluster up again fh.undo_failover(failover_nodes) finally: fh._start_servers(failover_nodes) def test_update_view_x_docs(self): num_docs = self.helper.input.param("num-docs", 100) self.log.info("description : create a spatial view on {0} documents " "and update the view so that it returns only a subset"\ .format(num_docs)) design_name = "dev_test_update_view_{0}_docs".format(num_docs) prefix = str(uuid.uuid4())[:7] # Create an index that emits all documents self.helper.create_index_fun(design_name, prefix) keys_b = self.helper.insert_docs(num_docs / 3, prefix + "bbb") keys_c = self.helper.insert_docs(num_docs - (num_docs / 3), prefix + "ccc") self.helper.query_index_for_verification(design_name, keys_b + keys_c) # Update index to only a subset of the documents self.helper.create_index_fun(design_name, prefix + "ccc") self.helper.query_index_for_verification(design_name, keys_c) def test_compare_views_all_nodes_x_docs(self): num_docs = self.helper.input.param("num-docs", 100) self.log.info("description : creates view on {0} documents, queries " "all nodes (not only the master node) and compares " "if the results are all the same"\ .format(num_docs)) design_name = "dev_test_compare_views_{0}_docs".format(num_docs) prefix = str(uuid.uuid4())[:7] inserted_keys = self._setup_index(design_name, num_docs, prefix) nodes = self.helper.rest.get_nodes() params = {"connection_timeout": 60000, "full_set": True} # Query every single node and verify for n in nodes: n_rest = RestConnection({ "ip": n.ip, "port": n.port, "username": self.helper.master.rest_username, "password": self.helper.master.rest_password}) results = n_rest.spatial_results(self.helper.bucket, design_name, params, None) result_keys = self.helper.get_keys(results) self.helper.verify_result(inserted_keys, result_keys)
class SpatialRebalanceTests(unittest.TestCase): def setUp(self): self.log = logger.Logger.get_logger() self.helper = SpatialHelper(self, "default") # Setup, but don't rebalance cluster self.helper.setup_cluster(False) def tearDown(self): self.log.info("tear down test") self.helper.cleanup_cluster() def test_insert_x_delete_y_docs_create_cluster(self): num_docs = self.helper.input.param("num-docs", 100000) num_deleted_docs = self.helper.input.param("num-deleted-docs", 10000) msg = "description : have a single node, insert {0} docs, "\ "delete {1} docs while creating a cluster and query it" self.log.info(msg.format(num_docs, num_deleted_docs)) design_name = "dev_test_delete_10k_docs_create_cluster" prefix = str(uuid.uuid4())[:7] # Make sure we are fully de-clustered ClusterOperationHelper.cleanup_cluster(self.helper.servers) self.helper.create_index_fun(design_name, prefix) inserted_keys = self.helper.insert_docs(num_docs, prefix) # Start creating the cluster and rebalancing it without waiting until # it's finished ClusterOperationHelper.add_and_rebalance(self.helper.servers, False) deleted_keys = self.helper.delete_docs(num_deleted_docs, prefix) self._wait_for_rebalance() # Verify that the docs got delete and are no longer part of the # spatial view results = self.helper.get_results(design_name, num_docs) result_keys = self.helper.get_keys(results) self.assertEqual(len(result_keys), num_docs - len(deleted_keys)) self.helper.verify_result(inserted_keys, deleted_keys + result_keys) def test_insert_x_delete_y_docs_destroy_cluster(self): num_docs = self.helper.input.param("num-docs", 100000) num_deleted_docs = self.helper.input.param("num-deleted-docs", 10000) msg = "description : have a cluster, insert {0} docs, delete "\ "{1} docs while destroying the cluster into a single node "\ "and query it" self.log.info(msg.format(num_docs, num_deleted_docs)) design_name = "dev_test_delete_{0}_docs_destroy_cluster".format( num_deleted_docs) prefix = str(uuid.uuid4())[:7] # Make sure we are fully clustered ClusterOperationHelper.add_and_rebalance(self.helper.servers) self.helper.create_index_fun(design_name, prefix) inserted_keys = self.helper.insert_docs(num_docs, prefix) # Start destroying the cluster and rebalancing it without waiting # until it's finished ClusterOperationHelper.cleanup_cluster(self.helper.servers, False) deleted_keys = self.helper.delete_docs(num_deleted_docs, prefix) self._wait_for_rebalance() # Verify that the docs got delete and are no longer part of the # spatial view results = self.helper.get_results(design_name, num_docs) result_keys = self.helper.get_keys(results) self.assertEqual(len(result_keys), num_docs - len(deleted_keys)) self.helper.verify_result(inserted_keys, deleted_keys + result_keys) def test_insert_x_docs_during_rebalance(self): num_docs = self.helper.input.param("num-docs", 100000) msg = "description : have a single node, insert {0} docs, "\ "query it, add another node, start rebalancing, insert {0} "\ "docs, finish rebalancing, keep on adding nodes..." self.log.info(msg.format(num_docs)) design_name = "dev_test_insert_{0}_docs_during_rebalance".format( num_docs) prefix = str(uuid.uuid4())[:7] # Make sure we are fully de-clustered ClusterOperationHelper.cleanup_cluster(self.helper.servers) self.helper.create_index_fun(design_name) inserted_keys = self.helper.insert_docs(num_docs, prefix) # Add all servers to the master server one by one and start # rebalacing for server in self.helper.servers[1:]: ClusterOperationHelper.add_and_rebalance( [self.helper.master, server], False) # Docs with the same prefix are overwritten and not newly created prefix = str(uuid.uuid4())[:7] inserted_keys.extend(self.helper.insert_docs( num_docs, prefix, wait_for_persistence=False)) self._wait_for_rebalance() # Make sure data is persisted self.helper.wait_for_persistence() # Verify that all documents got inserted self.helper.query_index_for_verification(design_name, inserted_keys) # Block until the rebalance is done def _wait_for_rebalance(self): self.assertTrue(self.helper.rest.monitorRebalance(), "rebalance operation failed after adding nodes") self.log.info("rebalance finished")
class SpatialCompactionTests(BaseTestCase): def setUp(self): super(SpatialCompactionTests, self).setUp() self.start_cluster = self.input.param('start-cluster', len(self.servers)) self.servers_in = self.input.param('servers_in', 0) self.servers_out = self.input.param('servers_out', 0) self.bucket_name = "default" if self.standard_buckets: self.bucket_name = "standard_bucket0" if self.sasl_buckets: self.bucket_name = "bucket0" self.helper = SpatialHelper(self, self.bucket_name) try: if self.start_cluster > 1: rebalance = self.cluster.async_rebalance(self.servers[:1], self.servers[1:self.start_cluster], []) rebalance.result() except: super(SpatialCompactionTests, self).tearDown() def tearDown(self): super(SpatialCompactionTests, self).tearDown() def test_spatial_compaction(self): self.log.info( "description : test manual compaction for spatial indexes") prefix = str(uuid.uuid4())[:7] design_name = "dev_test_spatial_compaction" self.helper.create_index_fun(design_name, prefix) # Insert (resp. update, as they have the same prefix) and query # the spatial index several time so that the compaction makes sense for i in range(0, 8): self.helper.insert_docs(2000, prefix) self.helper.get_results(design_name) # Get the index size prior to compaction status, info = self.helper.info(design_name) disk_size = info["spatial_index"]["disk_size"] if self.servers_in or self.servers_out: servs_in = servs_out = [] if self.servers_in: servs_in = self.servers[self.start_cluster:self.servers_in + 1] if self.servers_out: servs_out = self.servers[-self.servers_out:] rebalance = self.cluster.async_rebalance(self.servers, servs_in, servs_out) # Do the compaction self.helper.compact(design_name) # Check if the index size got smaller status, info = self.helper.info(design_name) self.assertTrue(info["spatial_index"]["disk_size"] < disk_size, "The file size ({0}) isn't smaller than the " "pre compaction size ({1})." .format(info["spatial_index"]["disk_size"], disk_size)) if self.servers_in or self.servers_out: rebalance.result()
class SpatialQueryErrorsTests(BaseTestCase): def setUp(self): try: if 'first_case' not in TestInputSingleton.input.test_params: TestInputSingleton.input.test_params['default_bucket'] = False TestInputSingleton.input.test_params['skip_cleanup'] = True self.default_bucket_name = 'default' super(SpatialQueryErrorsTests, self).setUp() if 'first_case' in TestInputSingleton.input.test_params: self.cluster.rebalance(self.servers[:], self.servers[1:], []) # We use only one bucket in this test suite self.rest = RestConnection(self.master) self.bucket = self.rest.get_bucket( Bucket(name=self.default_bucket_name)) # num_docs must be a multiple of the number of vbuckets self.num_docs = self.input.param("num_docs", 2000) # `testname` is used for the design document name as wel as the # spatial function name self.testname = 'query-errors' self.helper = SpatialHelper(self, "default") if 'first_case' in TestInputSingleton.input.test_params: self.create_ddoc() self.helper.insert_docs(self.num_docs, self.testname) except Exception as ex: self.input.test_params["stop-on-failure"] = True self.log.error("SETUP WAS FAILED. ALL TESTS WILL BE SKIPPED") self.fail(ex) def tearDown(self): # clean up will only performed on the last run if 'last_case' in TestInputSingleton.input.test_params: TestInputSingleton.input.test_params['skip_cleanup'] = False super(SpatialQueryErrorsTests, self).tearDown() else: self.cluster.shutdown(force=True) self._log_finish(self) def test_query_errors(self): all_params = [ 'skip', 'limit', 'stale', 'bbox', 'start_range', 'end_range' ] query_params = {} for key in self.input.test_params: if key in all_params: query_params[key] = str(self.input.test_params[key]) try: self.spatial_query(query_params) except QueryViewException as ex: self.assertEquals(self.input.test_params['error'], json.loads(ex.reason)['error']) else: self.fail("Query did not fail, but should have. " "Query parameters were: {0}".format(query_params)) def create_ddoc(self): view_fn = '''function (doc) { if (doc.age !== undefined || doc.height !== undefined || doc.bloom !== undefined || doc.shed_leaves !== undefined) { emit([doc.age, doc.height, [doc.bloom, doc.shed_leaves]], doc.name); }}''' self.helper.create_index_fun(self.testname, view_fn) def spatial_query(self, params={}, ddoc='test'): bucket = self.default_bucket_name if not 'stale' in params: params['stale'] = 'false' return self.rest.query_view(self.testname, self.testname, bucket, params, type="spatial")
class SpatialRebalanceTests(unittest.TestCase): def setUp(self): self.log = logger.Logger.get_logger() self.helper = SpatialHelper(self, "default") # Setup, but don't rebalance cluster self.helper.setup_cluster(False) def tearDown(self): self.log.info("tear down test") self.helper.cleanup_cluster() def test_insert_x_delete_y_docs_create_cluster(self): num_docs = self.helper.input.param("num-docs", 100000) num_deleted_docs = self.helper.input.param("num-deleted-docs", 10000) msg = "description : have a single node, insert {0} docs, "\ "delete {1} docs while creating a cluster and query it" self.log.info(msg.format(num_docs, num_deleted_docs)) design_name = "dev_test_delete_10k_docs_create_cluster" prefix = str(uuid.uuid4())[:7] # Make sure we are fully de-clustered ClusterOperationHelper.remove_and_rebalance(self.helper.servers) self.helper.create_index_fun(design_name, prefix) inserted_keys = self.helper.insert_docs(num_docs, prefix) # Start creating the cluster and rebalancing it without waiting until # it's finished ClusterOperationHelper.add_and_rebalance(self.helper.servers, False) deleted_keys = self.helper.delete_docs(num_deleted_docs, prefix) self._wait_for_rebalance() # Verify that the docs got delete and are no longer part of the # spatial view results = self.helper.get_results(design_name, num_docs) result_keys = self.helper.get_keys(results) self.assertEqual(len(result_keys), num_docs - len(deleted_keys)) self.helper.verify_result(inserted_keys, deleted_keys + result_keys) def test_insert_x_delete_y_docs_destroy_cluster(self): num_docs = self.helper.input.param("num-docs", 100000) num_deleted_docs = self.helper.input.param("num-deleted-docs", 10000) msg = "description : have a cluster, insert {0} docs, delete "\ "{1} docs while destroying the cluster into a single node "\ "and query it" self.log.info(msg.format(num_docs, num_deleted_docs)) design_name = "dev_test_delete_{0}_docs_destroy_cluster".format( num_deleted_docs) prefix = str(uuid.uuid4())[:7] # Make sure we are fully clustered ClusterOperationHelper.add_and_rebalance(self.helper.servers) self.helper.create_index_fun(design_name, prefix) inserted_keys = self.helper.insert_docs(num_docs, prefix) # Start destroying the cluster and rebalancing it without waiting # until it's finished ClusterOperationHelper.remove_and_rebalance(self.helper.servers, False) deleted_keys = self.helper.delete_docs(num_deleted_docs, prefix) self._wait_for_rebalance() # Verify that the docs got delete and are no longer part of the # spatial view results = self.helper.get_results(design_name, num_docs) result_keys = self.helper.get_keys(results) self.assertEqual(len(result_keys), num_docs - len(deleted_keys)) self.helper.verify_result(inserted_keys, deleted_keys + result_keys) def test_insert_x_docs_during_rebalance(self): num_docs = self.helper.input.param("num-docs", 100000) msg = "description : have a single node, insert {0} docs, "\ "query it, add another node, start rebalancing, insert {0} "\ "docs, finish rebalancing, keep on adding nodes..." self.log.info(msg.format(num_docs)) design_name = "dev_test_insert_{0}_docs_during_rebalance".format( num_docs) prefix = str(uuid.uuid4())[:7] # Make sure we are fully de-clustered ClusterOperationHelper.remove_and_rebalance(self.helper.servers) self.helper.create_index_fun(design_name) inserted_keys = self.helper.insert_docs(num_docs, prefix) # Add all servers to the master server one by one and start # rebalacing for server in self.helper.servers[1:]: ClusterOperationHelper.add_and_rebalance( [self.helper.master, server], False) # Docs with the same prefix are overwritten and not newly created prefix = str(uuid.uuid4())[:7] inserted_keys.extend( self.helper.insert_docs(num_docs, prefix, wait_for_persistence=False)) self._wait_for_rebalance() # Make sure data is persisted self.helper.wait_for_persistence() # Verify that all documents got inserted self.helper.query_index_for_verification(design_name, inserted_keys) # Block until the rebalance is done def _wait_for_rebalance(self): self.assertTrue(self.helper.rest.monitorRebalance(), "rebalance operation failed after adding nodes") self.log.info("rebalance finished")
class SpatialQueryErrorsTests(BaseTestCase): def setUp(self): try: if 'first_case' not in TestInputSingleton.input.test_params: TestInputSingleton.input.test_params['default_bucket'] = False TestInputSingleton.input.test_params['skip_cleanup'] = True TestInputSingleton.input.test_params['skip_buckets_handle'] = True self.default_bucket_name = 'default' super(SpatialQueryErrorsTests, self).setUp() if 'first_case' in TestInputSingleton.input.test_params: self.cluster.rebalance(self.servers[:], self.servers[1:], []) # We use only one bucket in this test suite self.rest = RestConnection(self.master) self.bucket = self.rest.get_bucket(Bucket(name=self.default_bucket_name)) # num_docs must be a multiple of the number of vbuckets self.num_docs = self.input.param("num_docs", 2000) # `testname` is used for the design document name as wel as the # spatial function name self.testname = 'query-errors' self.helper = SpatialHelper(self, "default") if 'first_case' in TestInputSingleton.input.test_params: self.create_ddoc() self.helper.insert_docs(self.num_docs, self.testname) except Exception as ex: self.input.test_params["stop-on-failure"] = True self.log.error("SETUP WAS FAILED. ALL TESTS WILL BE SKIPPED") self.fail(ex) def tearDown(self): # clean up will only performed on the last run if 'last_case' in TestInputSingleton.input.test_params: TestInputSingleton.input.test_params['skip_cleanup'] = False TestInputSingleton.input.test_params['skip_buckets_handle'] = False super(SpatialQueryErrorsTests, self).tearDown() else: self.cluster.shutdown(force=True) self._log_finish(self) def test_query_errors(self): all_params = ['skip', 'limit', 'stale', 'bbox', 'start_range', 'end_range'] query_params = {} for key in self.input.test_params: if key in all_params: query_params[key] = str(self.input.test_params[key]) try: self.spatial_query(query_params) except QueryViewException as ex: self.assertEquals(self.input.test_params['error'], json.loads(ex.reason)['error']) else: self.fail("Query did not fail, but should have. " "Query parameters were: {0}".format(query_params)) def create_ddoc(self): view_fn = '''function (doc) { if (doc.age !== undefined || doc.height !== undefined || doc.bloom !== undefined || doc.shed_leaves !== undefined) { emit([doc.age, doc.height, [doc.bloom, doc.shed_leaves]], doc.name); }}''' self.helper.create_index_fun(self.testname, view_fn) def spatial_query(self, params={}, ddoc='test'): bucket = self.default_bucket_name if not 'stale' in params: params['stale'] = 'false' return self.rest.query_view(self.testname, self.testname, bucket, params, type="spatial")
class SpatialViewTests(unittest.TestCase): def setUp(self): self.log = logger.Logger.get_logger() self.helper = SpatialHelper(self, "default") self.helper.setup_cluster() def tearDown(self): self.helper.cleanup_cluster() def test_create_multiple_development_spatial(self): self.log.info("description : create multiple spatial views without " "running any spatial view query") rest = self.helper.rest bucket = self.helper.bucket prefix = str(uuid.uuid4()) name = "dev_test_spatial_multiple" design_names = ["{0}-{1}-{2}".format(name, i, prefix) \ for i in range(0, 5)] for design_name in design_names: self.helper.create_index_fun(design_name) response = rest.get_spatial(bucket, design_name) self.assertTrue(response) self.assertEquals(response["_id"], "_design/{0}".format(design_name)) self.log.info(response) def test_insert_x_docs(self): num_docs = self.helper.input.param("num-docs", 100) self.log.info("description : create a spatial view on {0} documents"\ .format(num_docs)) design_name = "dev_test_insert_{0}_docs".format(num_docs) prefix = str(uuid.uuid4())[:7] inserted_keys = self._setup_index(design_name, num_docs, prefix) self.assertEqual(len(inserted_keys), num_docs) # Does verify the full docs and not only the keys def test_insert_x_docs_full_verification(self): num_docs = self.helper.input.param("num-docs", 100) self.log.info("description : create a spatial view with {0} docs" " and verify the full documents".format(num_docs)) design_name = "dev_test_insert_{0}_docs_full_verification"\ .format(num_docs) prefix = str(uuid.uuid4())[:7] self.helper.create_index_fun(design_name) inserted_docs = self.helper.insert_docs(num_docs, prefix, return_docs=True) self.helper.query_index_for_verification(design_name, inserted_docs, full_docs=True) def test_insert_x_delete_y_docs(self): num_docs = self.helper.input.param("num-docs", 15000) num_deleted_docs = self.helper.input.param("num-deleted-docs", 10000) self.log.info("description : create spatial view with {0} docs " " and delete {1} docs".format(num_docs, num_deleted_docs)) design_name = "dev_test_insert_{0}_delete_{1}_docs"\ .format(num_docs, num_deleted_docs) prefix = str(uuid.uuid4())[:7] inserted_keys = self._setup_index(design_name, num_docs, prefix) # Delete documents and very that the documents got deleted deleted_keys = self.helper.delete_docs(num_deleted_docs, prefix) results = self.helper.get_results(design_name, 2*num_docs) result_keys = self.helper.get_keys(results) self.assertEqual(len(result_keys), num_docs-len(deleted_keys)) self.helper.verify_result(inserted_keys, deleted_keys + result_keys) def test_insert_x_update_y_docs(self): num_docs = self.helper.input.param("num-docs", 15000) num_updated_docs = self.helper.input.param("num-updated-docs", 100) self.log.info("description : create spatial view with {0} docs " " and update {1} docs".format(num_docs, num_updated_docs)) design_name = "dev_test_insert_{0}_delete_{1}_docs"\ .format(num_docs, num_updated_docs) prefix = str(uuid.uuid4())[:7] self._setup_index(design_name, num_docs, prefix) # Update documents and verify that the documents got updated updated_keys = self.helper.insert_docs(num_updated_docs, prefix, dict(updated=True)) results = self.helper.get_results(design_name, 2*num_docs) result_updated_keys = self._get_updated_docs_keys(results) self.assertEqual(len(updated_keys), len(result_updated_keys)) self.helper.verify_result(updated_keys, result_updated_keys) def test_get_spatial_during_x_min_load_y_working_set(self): num_docs = self.helper.input.param("num-docs", 10000) duration = self.helper.input.param("load-time", 1) self.log.info("description : this test will continuously insert data " "and get the spatial view results for {0} minutes") design_name = "dev_test_insert_and_get_spatial_{0}_mins"\ .format(duration) prefix = str(uuid.uuid4())[:7] self.helper.create_index_fun(design_name) self.docs_inserted = [] self.shutdown_load_data = False load_thread = Thread( target=self._insert_data_till_stopped, args=(num_docs, prefix)) load_thread.start() self._get_results_for_x_minutes(design_name, duration) self.shutdown_load_data = True load_thread.join() # self.docs_inserted was set by the insertion thread # (_insert_data_till_stopped) self.helper.query_index_for_verification(design_name, self.docs_inserted) # Create the index and insert documents including verififaction that # the index contains them # Returns the keys of the inserted documents def _setup_index(self, design_name, num_docs, prefix): self.helper.create_index_fun(design_name) inserted_keys = self.helper.insert_docs(num_docs, prefix) self.helper.query_index_for_verification(design_name, inserted_keys) return inserted_keys # Return the keys for all docs that contain a key called "updated" # in the value def _get_updated_docs_keys(self, results): keys = [] if results: rows = results["rows"] for row in rows: if "updated" in row["value"]: keys.append(row["id"].encode("ascii", "ignore")) self.log.info("{0} documents to updated".format(len(keys))) return keys def _get_results_for_x_minutes(self, design_name, duration, delay=5): random.seed(0) start = time.time() while (time.time() - start) < duration * 60: limit = random.randint(1, 1000) self.log.info("{0} seconds has passed ....".format( (time.time() - start))) results = self.helper.get_results(design_name, limit) keys = self.helper.get_keys(results) self.log.info("spatial view returned {0} rows".format(len(keys))) time.sleep(delay) def _insert_data_till_stopped(self, num_docs, prefix): while not self.shutdown_load_data: # Will be read after the function is terminated self.docs_inserted = self.helper.insert_docs( num_docs, prefix, wait_for_persistence=False) def test_x_docs_failover(self): num_docs = self.helper.input.param("num-docs", 10000) self.log.info("description : test failover with {0} documents"\ .format(num_docs)) design_name = "dev_test_failover_{0}".format(num_docs) prefix = str(uuid.uuid4())[:7] fh = FailoverHelper(self.helper.servers, self) inserted_keys = self._setup_index(design_name, num_docs, prefix) failover_nodes = fh.failover(1) self.helper.query_index_for_verification(design_name, inserted_keys) # The test cleanup expects all nodes running, hence spin the # full cluster up again fh.undo_failover(failover_nodes)