def run_db_tests(self, suite): print("Running {0} tests with database".format(suite.countTestCases())) old_config = self.setup_databases() result = self.run_suite(suite) from corehq.db import _engine, Session Session.remove() _engine.dispose() self.teardown_databases(old_config) return self.suite_result(suite, result)
def run_db_tests(self, suite): print("Running {0} tests with database".format(suite.countTestCases())) old_config = self.setup_databases() result = self.run_suite(suite) from corehq.db import Session, connection_manager Session.remove() connection_manager.dispose_all() self.teardown_databases(old_config) return self.suite_result(suite, result)
def _get_data(self, slugs=None): if self.keys is not None and not self.group_by: raise SqlReportException('Keys supplied without group_by.') qc = self.query_context for c in self.columns: if not slugs or c.slug in slugs: qc.append_column(c.view) session = Session() try: return qc.resolve(session.connection(), self.filter_values) except: session.rollback() raise
def test_column_uniqueness_when_truncated(self): problem_spec = { "display_name": "practicing_lessons", "property_name": "long_column", "choices": [ "duplicate_choice_1", "duplicate_choice_2", ], "select_style": "multiple", "column_id": "a_very_long_base_selection_column_name_with_limited_room", "type": "choice_list", } data_source_config = DataSourceConfiguration( domain='test', display_name='foo', referenced_doc_type='CommCareCase', table_id=uuid.uuid4().hex, configured_filter={}, configured_indicators=[problem_spec], ) adapter = IndicatorSqlAdapter(data_source_config) adapter.rebuild_table() # ensure we can save data to the table. adapter.save({ '_id': uuid.uuid4().hex, 'domain': 'test', 'doc_type': 'CommCareCase', 'long_column': 'duplicate_choice_1', }) # and query it back q = Session.query(adapter.get_table()) self.assertEqual(1, q.count())
def loadtest(request): # The multimech results api is kinda all over the place. # the docs are here: http://testutils.org/multi-mechanize/datastore.html scripts = ["submit_form.py", "ota_restore.py"] tests = [] # datetime info seems to be buried in GlobalConfig.results[0].run_id, # which makes ORM-level sorting problematic for gc in Session.query(GlobalConfig).all()[::-1]: gc.scripts = dict((uc.script, uc) for uc in gc.user_group_configs) if gc.results: for script, uc in gc.scripts.items(): uc.results = filter(lambda res: res.user_group_name == uc.user_group, gc.results) test = {"datetime": gc.results[0].run_id, "run_time": gc.run_time, "results": gc.results} for script in scripts: test[script.split(".")[0]] = gc.scripts.get(script) tests.append(test) context = get_hqadmin_base_context(request) context.update({"tests": tests, "hide_filters": True}) date_axis = Axis(label="Date", dateFormat="%m/%d/%Y") tests_axis = Axis(label="Number of Tests in 30s") chart = LineChart("HQ Load Test Performance", date_axis, tests_axis) submit_data = [] ota_data = [] total_data = [] max_val = 0 max_date = None min_date = None for test in tests: date = test["datetime"] total = len(test["results"]) max_val = total if total > max_val else max_val max_date = date if not max_date or date > max_date else max_date min_date = date if not min_date or date < min_date else min_date submit_data.append({"x": date, "y": len(test["submit_form"].results)}) ota_data.append({"x": date, "y": len(test["ota_restore"].results)}) total_data.append({"x": date, "y": total}) deployments = [row["key"][1] for row in HqDeploy.get_list(settings.SERVER_ENVIRONMENT, min_date, max_date)] deploy_data = [{"x": min_date, "y": 0}] for date in deployments: deploy_data.extend([{"x": date, "y": 0}, {"x": date, "y": max_val}, {"x": date, "y": 0}]) deploy_data.append({"x": max_date, "y": 0}) chart.add_dataset("Deployments", deploy_data) chart.add_dataset("Form Submission Count", submit_data) chart.add_dataset("OTA Restore Count", ota_data) chart.add_dataset("Total Count", total_data) context["charts"] = [chart] template = "hqadmin/loadtest.html" return render(request, template, context)
def _get_distinct_values(data_source_configuration, column_config, expansion_limit=10): """ Return a tuple. The first item is a list of distinct values in the given ReportColumn no longer than expansion_limit. The second is a boolean which is True if the number of distinct values in the column is greater than the limit. :param data_source_configuration: :param column_config: :param expansion_limit: :return: """ too_many_values = False session = Session() try: connection = session.connection() table = get_indicator_table(data_source_configuration) if not table.exists(bind=connection): return [], False column = table.c[column_config.field] query = sqlalchemy.select([column], limit=expansion_limit + 1).distinct() result = connection.execute(query).fetchall() distinct_values = [x[0] for x in result] if len(distinct_values) > expansion_limit: distinct_values = distinct_values[:expansion_limit] too_many_values = True except: session.rollback() raise finally: session.close() return distinct_values, too_many_values
def _get_all_tables(): session = Session() try: connection = session.connection() tables = [sql.get_indicator_table(config) for config in userreports_models.DataSourceConfiguration.all()] return [t for t in tables if t.exists(bind=connection)] except: session.rollback() raise finally: session.close()
def _alter_tables_helper(get_tables_func, column_checker_func, column_alter_func): _sync_couch() tables = get_tables_func() session = Session() try: connection = session.connection() ctx = alembic.migration.MigrationContext.configure(connection) op = alembic.operations.Operations(ctx) for table in tables: logger.info("Checking table {}".format(table.name)) for column in table.columns: if column_checker_func(column): logger.info("Altering {}".format(column)) column_alter_func(op, table, column) else: logger.info("Skipping {}".format(column)) session.commit() except: session.rollback() raise finally: session.close()
def _get_distinct_values(data_source_configuration, column_config, expansion_limit=DEFAULT_MAXIMUM_EXPANSION): """ Return a tuple. The first item is a list of distinct values in the given ExpandedColumn no longer than expansion_limit. The second is a boolean which is True if the number of distinct values in the column is greater than the limit. :param data_source_configuration: :param column_config: :param expansion_limit: :return: """ from corehq.apps.userreports.sql.adapter import get_indicator_table too_many_values = False session = Session() try: connection = session.connection() table = get_indicator_table(data_source_configuration) if not table.exists(bind=connection): return [], False if column_config.field not in table.c: raise ColumnNotFoundError(_( 'The column "{}" does not exist in the report source! ' 'Please double check your report configuration.').format(column_config.field) ) column = table.c[column_config.field] query = sqlalchemy.select([column], limit=expansion_limit + 1).distinct() result = connection.execute(query).fetchall() distinct_values = [x[0] for x in result] if len(distinct_values) > expansion_limit: distinct_values = distinct_values[:expansion_limit] too_many_values = True except: session.rollback() raise finally: session.close() return distinct_values, too_many_values
def tearDownClass(cls): cls.couch_user.delete() Session.remove()
def tearDownClass(cls): cls._delete_everything() # todo: understand why this is necessary. the view call uses the session and the # signal doesn't fire to kill it. Session.remove()
def loadtest(request): # The multimech results api is kinda all over the place. # the docs are here: http://testutils.org/multi-mechanize/datastore.html scripts = ['submit_form.py', 'ota_restore.py'] tests = [] # datetime info seems to be buried in GlobalConfig.results[0].run_id, # which makes ORM-level sorting problematic for gc in Session.query(GlobalConfig).all()[::-1]: gc.scripts = dict((uc.script, uc) for uc in gc.user_group_configs) if gc.results: for script, uc in gc.scripts.items(): uc.results = filter( lambda res: res.user_group_name == uc.user_group, gc.results ) test = { 'datetime': gc.results[0].run_id, 'run_time': gc.run_time, 'results': gc.results, } for script in scripts: test[script.split('.')[0]] = gc.scripts.get(script) tests.append(test) context = get_hqadmin_base_context(request) context.update({ "tests": tests, "hide_filters": True, }) date_axis = Axis(label="Date", dateFormat="%m/%d/%Y") tests_axis = Axis(label="Number of Tests in 30s") chart = LineChart("HQ Load Test Performance", date_axis, tests_axis) submit_data = [] ota_data = [] total_data = [] max_val = 0 max_date = None min_date = None for test in tests: date = test['datetime'] total = len(test['results']) max_val = total if total > max_val else max_val max_date = date if not max_date or date > max_date else max_date min_date = date if not min_date or date < min_date else min_date submit_data.append({'x': date, 'y': len(test['submit_form'].results)}) ota_data.append({'x': date, 'y': len(test['ota_restore'].results)}) total_data.append({'x': date, 'y': total}) deployments = [row['key'][1] for row in HqDeploy.get_list(settings.SERVER_ENVIRONMENT, min_date, max_date)] deploy_data = [{'x': min_date, 'y': 0}] for date in deployments: deploy_data.extend([{'x': date, 'y': 0}, {'x': date, 'y': max_val}, {'x': date, 'y': 0}]) deploy_data.append({'x': max_date, 'y': 0}) chart.add_dataset("Deployments", deploy_data) chart.add_dataset("Form Submission Count", submit_data) chart.add_dataset("OTA Restore Count", ota_data) chart.add_dataset("Total Count", total_data) context['charts'] = [chart] template = "hqadmin/loadtest.html" return render(request, template, context)
def tearDownClass(cls): Session.remove()