class print_all_queries(object): def __init__(self, conn=None): if conn is None: self.conn = connection else: self.conn = conn def __enter__(self): self.capturer = CaptureQueriesContext(self.conn) self.capturer.__enter__() return self def __exit__(self, a, b, c): self.capturer.__exit__(a, b, c) for q in self.capturer.captured_queries: print(q['sql'])
class CaptureLastQuery(object): def __init__(self, conn=None): if conn is None: self.conn = connection else: self.conn = conn def __enter__(self): self.capturer = CaptureQueriesContext(self.conn) self.capturer.__enter__() return self def __exit__(self, a, b, c): self.capturer.__exit__(a, b, c) @property def query(self): return self.capturer.captured_queries[-1]['sql']
def setUp(self): c = connections[settings.SPHINX_DATABASE_NAME] self.no_string_compare = c.mysql_version < (2, 2, 7) self.truncate_model() self.now = datetime.now().replace(microsecond=0) self.defaults = self.get_model_defaults() self.spx_queries = CaptureQueriesContext( connections[settings.SPHINX_DATABASE_NAME]) self.spx_queries.__enter__() self.obj = self.model.objects.create(**self.defaults)
def test_failure(self): with self.assertRaises(TypeError): with CaptureQueriesContext(connection): raise TypeError
def test_it(self): from django.test.utils import CaptureQueriesContext from django.db import connections with CaptureQueriesContext(connections["default"]): Item(name="foo").save()
#!/usr/bin/env python from django.db import connection from django.test.utils import CaptureQueriesContext from .models import MyModel with CaptureQueriesContext(connection) as queries: MyModel.objects.all() print("\n".join(map(lambda q:q['sql'],connection.queries)))
def __enter__(self): self.capturer = CaptureQueriesContext(self.conn) self.capturer.__enter__() return self
def test_lbheartbeat_makes_no_db_queries(client): queries = CaptureQueriesContext(connection) with queries: res = client.get('/__lbheartbeat__') assert res.status_code == 200 assert len(queries) == 0
def test_count_join_optimization(self): with CaptureQueriesContext(connection) as query: self.article.publications.count() self.assertNotIn('JOIN', query[0]['sql']) self.assertEqual(self.nullable_target_article.publications.count(), 0)
class SphinxModelTestCaseBase(TransactionTestCase): _id = 0 model = models.TestModel def _fixture_teardown(self): # Prevent SHOW FULL TABLES call pass def truncate_model(self): c = connections[settings.SPHINX_DATABASE_NAME].cursor() c.execute("TRUNCATE RTINDEX %s" % self.model._meta.db_table) c.close() def setUp(self): c = connections[settings.SPHINX_DATABASE_NAME] self.no_string_compare = c.mysql_version < (2, 2, 7) self.truncate_model() self.now = datetime.now().replace(microsecond=0) self.defaults = self.get_model_defaults() self.spx_queries = CaptureQueriesContext( connections[settings.SPHINX_DATABASE_NAME]) self.spx_queries.__enter__() self.obj = self.model.objects.create(**self.defaults) def get_model_defaults(self): return { 'id': self.newid(), 'sphinx_field': "hello sphinx field", 'attr_uint': 100500, 'attr_bool': True, 'attr_bigint': 2 ** 33, 'attr_float': 1.2345, 'attr_multi': [1, 2, 3], 'attr_multi_64': [2 ** 33, 2 ** 34], 'attr_timestamp': self.now, 'attr_string': "hello sphinx attr", "attr_json": {"json": "test"}, } @classmethod def newid(cls): cls._id += 1 return cls._id def reload_object(self, obj): return obj._meta.model.objects.get(pk=obj.pk) def assertObjectEqualsToDefaults(self, other, defaults=None): defaults = defaults or self.defaults result = {k: getattr(other, k) for k in defaults.keys() if k != 'sphinx_field'} for k in defaults.keys(): if k == 'sphinx_field': continue self.assertEqual(result[k], defaults[k]) def tearDown(self): self.spx_queries.__exit__(*sys.exc_info()) for query in self.spx_queries.captured_queries: print(query['sql'])
def test_makes_no_db_queries(self, client): queries = CaptureQueriesContext(connection) with queries: res = client.get('/api/v1/classify_client/') assert res.status_code == 200 assert len(queries) == 0
def test_lbheartbeat(dockerflow_middleware, rf, dockerflow_enabled): queries = CaptureQueriesContext(connection) request = rf.get("/__lbheartbeat__") with queries: response = dockerflow_middleware.process_request(request) assert response.status_code == 200
def test_exists_join_optimization_disabled(self): with mock.patch.object(connection.features, 'supports_foreign_keys', False), \ CaptureQueriesContext(connection) as query: self.article.publications.exists() self.assertIn('JOIN', query[0]['sql'])
def test_exists_join_optimization(self): with CaptureQueriesContext(connection) as query: self.article.publications.exists() self.assertNotIn('JOIN', query[0]['sql']) self.assertIs(self.nullable_target_article.publications.exists(), False)
def test_within(self): with CaptureQueriesContext(connection) as captured_queries: Person.objects.get(pk=self.person_pk) self.assertEqual(len(captured_queries), 1) self.assertIn(self.person_pk, captured_queries[0]['sql'])
def responsediff_website_crawl(self, url=None, client=None, covered=None, diffs=None, created=None, selector=None): """ Test your website with one call to this method. It returns the list of covered URLs. But before, it tests that all fixtures for this test have been covered, or fails, requiring you the to remove obsolete files to succeed again. """ url = url or '/' client = client or test.Client() covered = covered if covered is not None else [] diffs = diffs if diffs is not None else {} created = created if created is not None else {} conn = connections['default'] with CaptureQueriesContext(conn) as queries: response = client.get(url) self.process_response(response) metadata = {'query_count': len(queries)} _diffs, _created = Response.for_test(self, url).make_diff( response, metadata=metadata, # Don't apply selector on first url, so we do the layout once selector=selector if covered else None, ) covered.append(url) created.update(_created) diffs.update(_diffs) if hasattr(response, 'streaming_content'): return covered, diffs, created results = re.findall('href="((http://testserver)?/[^"]*)', response.content.decode('utf8')) for result in results: sub_url = re.sub('http://testserver', '', result[0]) if sub_url in covered: continue if self.skip_url(sub_url): continue self.responsediff_website_crawl( sub_url, client=client, covered=covered, diffs=diffs, created=created, selector=selector, ) return covered, diffs, created
def test_mysql_text_to_traditional(self): with CaptureQueriesContext(connection) as captured_queries: Tag.objects.filter(name='test').explain(format='text') self.assertEqual(len(captured_queries), 1) self.assertIn('FORMAT=TRADITIONAL', captured_queries[0]['sql'])