def test_employee_dataset_all_queries(self): ViewBaseTests._begin_rebalance_in(self) ViewBaseTests._end_rebalance(self) docs_per_day = self.input.param('docs-per-day', 200) data_set = EmployeeDataSet(self._rconn(), docs_per_day) data_set.add_all_query_sets() self._query_test_init(data_set)
def run_queries(self, tc, verify_results = False): rest = tc._rconn() if not len(self.queries) > 0 : self.log.info("No queries to run for this view") return view_name = self.name for query in self.queries: params = query.params params["debug"] = "true" expected_num_docs = query.expected_num_docs num_keys = -1 if expected_num_docs is not None and verify_results: attempt = 0 delay = 15 results = None # first verify all doc_names get reported in the view # for windows, we need more than 20+ times while attempt < 40 and num_keys != expected_num_docs: self.log.info("Quering view {0} with params: {1}".format(view_name, params)); results = ViewBaseTests._get_view_results(tc, rest, "default", view_name, limit=None, extra_params=params) # check if this is a reduced query using _count if self.reduce_fn is '_count': num_keys = self._verify_count_reduce_helper(query, results) self.log.info("{0}: attempt {1} reduced {2} group(s) to value {3} expected: {4}" \ .format(view_name, attempt, query.expected_num_groups, num_keys, expected_num_docs)); else: num_keys = len(ViewBaseTests._get_keys(self, results)) self.log.info("{0}: attempt {1} retrieved value {2} expected: {3}" \ .format(view_name, attempt, num_keys, expected_num_docs)); attempt += 1 time.sleep(delay) if(num_keys != expected_num_docs): msg = "Query failed: {0} Documents Retrieved, expected {1}" val = msg.format(num_keys, expected_num_docs) try: tc.assertEquals(num_keys, expected_num_docs, val) except Exception as ex: self.log.error(val) self.log.error("Last query result:\n\n%s\n\n" % (json.dumps(results, sort_keys=True, indent=4))) self.results.addFailure(tc, sys.exc_info()) else: # query without verification self.log.info("Quering view {0} with params: {1}".format(view_name, params)); results = ViewBaseTests._get_view_results(tc, rest, "default", view_name, limit=None, extra_params=params)
def test_employee_dataset_startkey_endkey_queries_rebalance_in(self): docs_per_day = self.input.param('docs-per-day', 200) data_set = EmployeeDataSet(self._rconn(), docs_per_day) data_set.add_startkey_endkey_queries() self._query_test_init(data_set, False) # rebalance_in and verify loaded data ViewBaseTests._begin_rebalance_in(self) self._query_all_views(data_set.views) ViewBaseTests._end_rebalance(self)
def test_all_datasets_all_queries(self): ViewBaseTests._begin_rebalance_in(self) ViewBaseTests._end_rebalance(self) ds1 = EmployeeDataSet(self._rconn()) ds2 = SimpleDataSet(self._rconn(), self.num_docs) data_sets = [ds1, ds2] # load and query all views and datasets test_threads = [] for ds in data_sets: ds.add_all_query_sets() t = Thread(target=self._query_test_init, name=ds.name, args=(ds, False)) test_threads.append(t) t.start() [t.join() for t in test_threads] ViewBaseTests._begin_rebalance_out(self) ViewBaseTests._end_rebalance(self) # verify [self._query_all_views(ds.views) for ds in data_sets]
def load(self, tc, view, verify_docs_loaded = True): doc_names = ViewBaseTests._load_docs(tc, self.num_docs, view.prefix, verify_docs_loaded) return doc_names
def tearDown(self): ViewBaseTests.common_tearDown(self)
def setUp(self): ViewBaseTests.common_setUp(self)
def _set_view_fn_from_attrs(self, rest): return ViewBaseTests._create_function(self, rest, self.bucket, self.name, self.fn_str, self.reduce_fn)