import os from django import template from djangobench.utils import run_benchmark def benchmark(): context = template.Context({ 'stuff': 'something' }); t = template.Template('{{ stuff }}') t.render(context) run_benchmark( benchmark, syncdb = False, meta = { 'description': 'Render an extremely simple template (from string)', } )
from django.core.urlresolvers import resolve from djangobench.utils import run_benchmark def benchmark(): for i in range(0, 100): for path in ( '/user/repo/feature19', '/section0/feature0', '/en/feature10', '/ru/feature10', '/missing'): try: resolve(path) except: pass run_benchmark( benchmark, meta = { 'description': 'URL resolution with long-flat list of patterns. With USE_I18N=False.', } )
from djangobench.utils import run_benchmark from query_all_multifield.models import MultiField def benchmark(): list(MultiField.objects.iterator()) def setup(): for i in range(0, 3000): kwargs = {} for j in range(1, 11): kwargs['field%s' % j] = 'foobar_%s_%s' % (i, j) MultiField(**kwargs).save() run_benchmark( benchmark, setup=setup, meta={ 'description': 'A simple Model.objects.iterator() call for large number of objects and large number of fields.', })
class SimpleForm(forms.Form): one = forms.CharField() two = forms.CharField(widget=forms.Textarea) if settings.NEW_FORM_RENDERING: tpl = "{% form myform %}" else: tpl = "{{ form.as_p }}" def benchmark(): context = template.Context( { "myform": SimpleForm() } ) t = template.Template(tpl) t.render(context) run_benchmark( benchmark, syncdb = False, meta = { "description": "Render a simple two-field form." } )
from djangobench.utils import run_benchmark def setup(): global Book from query_filter.models import Book def benchmark(): global Book list(Book.objects.filter(id=1)) run_benchmark(benchmark, setup=setup, meta={"description": "A simple Model.objects.filter() call."})
from django.db.models import Count from djangobench.utils import run_benchmark from query_annotate.models import Book def benchmark(): list(Book.objects.values('title').annotate(books_total=Count('id'))) run_benchmark(benchmark, meta={ 'description': 'A simple Model.objects.annotate() call.', })
from djangobench.utils import run_benchmark from django.db.models import Manager def setup(): global Book from query_exists.models import Book def benchmark(): global Book #Checking for object that exists Book.objects.filter(id=1).exists() #Checking for object that does not exist Book.objects.filter(id=11).exists() if hasattr(Manager, 'exists'): run_benchmark( benchmark, setup=setup, meta={ 'description': 'A Model.objects.exists() call for both existing and non-existing objects.' }) else: print("SKIP: Django before 1.2 doesn't have QuerySet.exists()")
from djangobench.utils import run_benchmark from raw_sql.models import OneField from django.db import connection def benchmark(): cursor = connection.cursor() cursor.execute("select field1 from raw_sql_onefield") list(cursor.fetchall()) def setup(): for i in range(0, 10): OneField(field1=i).save() run_benchmark( benchmark, setup=setup, meta={ 'description': 'A test for stressing direct SQL performance', } )
import datetime import time from djangobench.utils import run_benchmark def setup(): global Book from qs_filter_chaining.models import Book def benchmark(): global Book Book.objects.filter(title='Talent')\ .filter(description__icontains='top performers')\ .filter(author_name__startswith='Geoff')\ .filter(date_created__lt=datetime.datetime(year=2010, month=1, day=1))\ .filter(date_created__gte=datetime.date(year=2007, month=1, day=1))\ .filter(date_published=datetime.datetime.now())\ .filter(enabled=True) run_benchmark(benchmark, setup=setup, meta={ 'description': 'Filter (but do not execute) a queryset mutliple times.', })
'zh', # multiple preferences 'en-us,en', 'nl-be,nl' # some invalid 'INVALID', 'UNKNOWN', 'NONE', 'KLINGON', ) def benchmark(): for lang in LANGUAGES: request.META = {'HTTP_ACCEPT_LANGUAGE': lang} get_language_from_request(request) def setup(): global request rf = RequestFactory() request = rf.get('/') request.COOKIES = {} run_benchmark(benchmark, setup=setup, meta={ 'description': 'Raw speed of locale detecting', })
# XXX FIXME - has to spawn a new process to measure load time from djangobench.utils import run_benchmark from django import VERSION def benchmark(): # Make sure the models and settings are loaded, then we're done. Calling # get_models() will make sure settings get loaded. from django.db import models models.get_models() if VERSION < (1, 9): run_benchmark( benchmark, migrate = False, trials = 1, meta = { 'description': 'Startup time for a simple app.', } ) else: print("SKIP: Django 1.9 and later has changed app loading. This benchmark needs fixing anyway.")
'en-us', # one non-strict language (zh -> zh-?) 'zh', # multiple preferences 'en-us,en', 'nl-be,nl' # some invalid 'INVALID', 'UNKNOWN', 'NONE', 'KLINGON', ) def benchmark(): for lang in LANGUAGES: request.META = {'HTTP_ACCEPT_LANGUAGE': lang} get_language_from_request(request) def setup(): global request rf = RequestFactory() request = rf.get('/') request.COOKIES = {} run_benchmark( benchmark, setup=setup, meta={ 'description': 'Raw speed of locale detecting', } )
import datetime import time from djangobench.utils import run_benchmark def setup(): global Book from qs_filter_chaining.models import Book def benchmark(): global Book Book.objects.filter(title='Talent')\ .filter(description__icontains='top performers')\ .filter(author_name__startswith='Geoff')\ .filter(date_created__lt=datetime.datetime(year=2010, month=1, day=1))\ .filter(date_created__gte=datetime.date(year=2007, month=1, day=1))\ .filter(date_published=datetime.datetime.now())\ .filter(enabled=True) run_benchmark( benchmark, setup=setup, meta = { 'description': 'Filter (but do not execute) a queryset mutliple times.', } )
from djangobench.utils import run_benchmark def setup(): global Book from model_save_existing.models import Book Book.objects.create(id=1, title='Foo') def benchmark(): global Book from model_save_existing.models import Book b = Book.objects.get(id=1) for i in range(0, 30): b.save() run_benchmark( benchmark, setup=setup, meta={ 'description': 'A simple Model.save() call, instance exists in DB.', }, )
try: from django.urls import resolve except ImportError: # Django < 1.10 from django.core.urlresolvers import resolve from djangobench.utils import run_benchmark def benchmark(): for i in range(0, 100): resolve('/basic/') resolve('/fallthroughview/') resolve('/replace/1') run_benchmark( benchmark, meta={ 'description': 'URL resolution.', } )
'SCRIPT_CONTENT_URL': SCRIPT_CONTENT_URL, 'WEBSITE_DOMAIN': WEBSITE_DOMAIN, 'SHOW_ALT_HEADER': SHOW_ALT_HEADER } render_to_response('permalink_django_lte_13.html', context) def benchmark_django_gt_13(): context = { 'objects1': objects1, 'objects2': objects2, 'object1': object1, 'object2': object2, 'object3': object3, 'num1' : num1, 'num2' : num2, 'boolean1': boolean1, 'SCRIPT_CONTENT_URL': SCRIPT_CONTENT_URL, 'WEBSITE_DOMAIN': WEBSITE_DOMAIN, 'SHOW_ALT_HEADER': SHOW_ALT_HEADER } render_to_response('permalink.html', context) run_benchmark( benchmark_django_gt_13 if VERSION > (1, 3) else benchmark_django_lte_13, syncdb = False, meta = { 'description': ('Render a somewhat complex, fairly typical template ' '(including inheritance, reverse URL resolution, etc.).'), } )
import time from djangobench.utils import run_benchmark def setup(): global Book from model_creation.models import Book def benchmark(): global Book Book.objects.create(title='hi!') run_benchmark( benchmark, setup=setup, meta = { 'description': 'Time of a Model.objects.create() call.', } )
from djangobench.utils import run_benchmark def setup(): global Book from query_delete.models import Book for i in range(0, 10): Book.objects.create(title='hi') def benchmark(): global Book Book.objects.all().delete() run_benchmark( benchmark, setup=setup, meta={ 'description': 'Delete an object via QuerySet.delete().', }, )
from djangobench.utils import run_benchmark from query_values_list.models import Book def benchmark(): list(Book.objects.values_list('title')) run_benchmark(benchmark, meta={ 'description': 'A simple Model.objects.values_list() call.', })
num2 = 2 boolean1 = True SCRIPT_CONTENT_URL = '/some/prefix' WEBSITE_DOMAIN = 'http://www.somedomain.com' SHOW_ALT_HEADER = 'True' def benchmark(): context = { 'objects1': objects1, 'objects2': objects2, 'object1': object1, 'object2': object2, 'object3': object3, 'num1' : num1, 'num2' : num2, 'boolean1': boolean1, 'SCRIPT_CONTENT_URL': SCRIPT_CONTENT_URL, 'WEBSITE_DOMAIN': WEBSITE_DOMAIN, 'SHOW_ALT_HEADER': SHOW_ALT_HEADER } render_to_response('permalink.html', context) run_benchmark( benchmark, syncdb = False, meta = { 'description': ('Render a somewhat complex, fairly typical template ' '(including inheritance, reverse URL resolution, etc.).'), } )
from django.db.models import Manager from djangobench.utils import run_benchmark def setup(): global Book from query_exists.models import Book def benchmark(): global Book # Checking for object that exists Book.objects.filter(id=1).exists() # Checking for object that does not exist Book.objects.filter(id=11).exists() if hasattr(Manager, 'exists'): run_benchmark( benchmark, setup=setup, meta={ 'description': 'A Model.objects.exists() call for both existing and non-existing objects.' } ) else: print("SKIP: Django before 1.2 doesn't have QuerySet.exists()")
from djangobench.utils import run_benchmark from query_select_related.models import Book def benchmark(): for i in xrange(20): list(Book.objects.select_related('author')) run_benchmark(benchmark, meta={ 'description': 'A simple Model.objects.select_related() call.', })
from djangobench.utils import run_benchmark from query_update.models import Book def benchmark(): Book.objects.all().update(title='z') run_benchmark(benchmark, meta={ 'description': 'A simple QuerySet.update().', })
from djangobench.utils import run_benchmark from query_get.models import Book def benchmark(): Book.objects.get(id=1) run_benchmark( benchmark, meta = { 'description': 'A simple Model.objects.get() call.', } )
from djangobench.utils import run_benchmark def benchmark(): global MultiField list(MultiField.objects.raw('select id from query_evaluating_multifield')) def setup(): global MultiField from query_evaluating.models import MultiField for i in range(0, 1000): kwargs = {} for j in range(1, 11): kwargs['field%s' % j] = 'foobar_%s_%s' % (i, j) MultiField(**kwargs).save() run_benchmark( benchmark, setup=setup, meta={ 'Description': 'Evaluating the overall performance of the system.', } )
from djangobench.utils import run_benchmark def benchmark(): global MultiField list(MultiField.objects.raw('select id from query_raw_deferred_multifield')) def setup(): global MultiField from query_raw_deferred.models import MultiField for i in range(0, 1000): kwargs = {} for j in range(1, 11): kwargs['field%s' % j] = 'foobar_%s_%s' % (i, j) MultiField(**kwargs).save() run_benchmark( benchmark, setup=setup, meta = { 'description': 'A test for fetching large number of objects by Model.objects.all() with deferred fields.', } )
from django.db.models import Count from djangobench.utils import run_benchmark def setup(): global Book from query_aggregate.models import Book def benchmark(): global Book Book.objects.all().aggregate(Count('title')) run_benchmark(benchmark, setup=setup, meta={ 'description': 'A simple Model.objects.aggregate() call.', })
from djangobench.utils import run_benchmark def benchmark(): global Book list(Book.objects.iterator()) def setup(): global Book from query_all.models import Book for i in range(0, 3000): Book(pk=i, title='foobar_%s' % i).save() run_benchmark( benchmark, setup=setup, meta={ 'description': 'A simple Model.objects.iterator() call for large number of objects.', } )
from django.db import connection from djangobench.utils import run_benchmark def benchmark(): cursor = connection.cursor() cursor.execute("select field1 from raw_sql_onefield") list(cursor.fetchall()) def setup(): from raw_sql.models import OneField for i in range(0, 10): OneField(field1=i).save() run_benchmark(benchmark, setup=setup, meta={ 'description': 'A test for stressing direct SQL performance', })
environ = { 'PATH_INFO': '/', 'QUERY_STRING': '', 'REQUEST_METHOD': 'GET', 'SCRIPT_NAME': '', 'SERVER_NAME': 'testserver', 'SERVER_PORT': 80, 'SERVER_PROTOCOL': 'HTTP/1.1', "wsgi.input": sys.stdin } return WSGIRequest(environ) req_object = make_request() def benchmark(): render_to_response('list.html', {'numbers': range(0, 200)}, context_instance=RequestContext(req_object)) run_benchmark( benchmark, migrate = False, meta = { 'description': 'Render a l10n intensive template.', } )
import itertools from djangobench.utils import run_benchmark from query_get_or_create.models import Book counter = itertools.count(1) def benchmark(): nextid = counter.next() # This will do a create ... Book.objects.get_or_create(id=nextid, defaults={'title': 'hi'}) # ... and this a get. Book.objects.get_or_create(id=nextid, defaults={'title': 'hi'}) run_benchmark( benchmark, meta = { 'description': 'A Model.objects.get_or_create() call, both for existing and non-existing objects.', } )
from djangobench.utils import run_benchmark from query_in_bulk.models import Book def benchmark(): Book.objects.in_bulk([1]) run_benchmark(benchmark, meta={ 'description': 'A simple Model.objects.in_bulk() call.', })
from django import forms from djangobench.utils import run_benchmark class BookForm(forms.Form): title = forms.CharField(max_length=100) form = BookForm({'title': 'hi'}) run_benchmark( form.full_clean, syncdb = False, meta = { 'description': 'Speed of a Form.clean call.', } )
from django import forms from djangobench.utils import run_benchmark class BookForm(forms.Form): title = forms.CharField(max_length=100) def benchmark(): BookForm({'title': 'a'}) run_benchmark(benchmark, migrate=False, meta={ 'description': 'Time required to instantiate and bind a form.', })
def make_request(): environ = { 'PATH_INFO': '/', 'QUERY_STRING': '', 'REQUEST_METHOD': 'GET', 'SCRIPT_NAME': '', 'SERVER_NAME': 'testserver', 'SERVER_PORT': 80, 'SERVER_PROTOCOL': 'HTTP/1.1', "wsgi.input": sys.stdin } return WSGIRequest(environ) req_object = make_request() def benchmark(): render_to_response('list.html', {'numbers': range(0, 200)}, context_instance=RequestContext(req_object)) run_benchmark(benchmark, migrate=False, meta={ 'description': 'Render a l10n intensive template.', })
from djangobench.utils import run_benchmark from query_delete.models import Book def benchmark(): Book.objects.all().delete() run_benchmark( benchmark, meta={"description": "Delete an object via QuerySet.delete()."}, setup=lambda: [Book.objects.create(title="hi") for i in range(0, 10)], )
import itertools from djangobench.utils import run_benchmark counter = itertools.count(1) def setup(): global Book from query_get_or_create.models import Book def benchmark(): global Book nextid = next(counter) # This will do a create ... Book.objects.get_or_create(id=nextid, defaults={'title': 'hi'}) # ... and this a get. Book.objects.get_or_create(id=nextid, defaults={'title': 'hi'}) run_benchmark( benchmark, setup=setup, meta={ 'description': 'A Model.objects.get_or_create() call, both for ' 'existing and non-existing objects.', } )
from djangobench.utils import run_benchmark def setup(): global Book from query_count.models import Book def benchmark(): global Book Book.objects.count() run_benchmark( benchmark, setup=setup, meta = { 'description': 'A simple Model.objects.count() call.', } )
from djangobench.utils import run_benchmark def setup(): global Book from query_update.models import Book def benchmark(): global Book Book.objects.all().update(title='z') run_benchmark( benchmark, setup=setup, meta={ 'description': 'A simple QuerySet.update().', } )
from django.core.urlresolvers import resolve from djangobench.utils import run_benchmark def benchmark(): for i in range(0, 100): resolve('/basic/') resolve('/fallthroughview/') resolve('/replace/1') run_benchmark(benchmark, meta={ 'description': 'URL resolution.', })
from djangobench.utils import run_benchmark from django import VERSION from query_prefetch_related.models import Book, Author def benchmark(): for i in xrange(10): for a in Author.objects.prefetch_related('books'): list(a.books.all()) def setup(): for i in range(0, 20): a = Author.objects.create(author="Author %s" % i) bset = set() for j in range(0, 3): b = Book.objects.create(title="Title %s" % j) bset.add(b) a.books = bset if VERSION < (1, 4): print("SKIP: prefetch_related not supported before Django 1.4") else: run_benchmark( benchmark, setup=setup, meta = { 'description': 'A simple Model.objects.select_related() call.', } )
from djangobench.utils import run_benchmark def benchmark(): global MultiField list( MultiField.objects.raw('select id from query_raw_deferred_multifield')) def setup(): global MultiField from query_raw_deferred.models import MultiField for i in range(0, 1000): kwargs = {} for j in range(1, 11): kwargs['field%s' % j] = 'foobar_%s_%s' % (i, j) MultiField(**kwargs).save() run_benchmark( benchmark, setup=setup, meta={ 'description': 'A test for fetching large number of objects by Model.objects.all() with deferred fields.', })
from djangobench.utils import run_benchmark def setup(): global Book from model_save_new.models import Book def benchmark(): global Book for i in range(0, 30): b = Book(id=i, title='Foo') b.save() run_benchmark( benchmark, setup=setup, meta={ 'description': 'A simple Model.save() call, instance not in DB.', }, )
import time from djangobench.utils import run_benchmark from query_delete.models import Book def benchmark(): b = Book.objects.create(title='hi') start = time.time() b.delete() return time.time() - start run_benchmark( benchmark, meta = { 'description': 'Delete an object via Model.delete().', } )
import time from djangobench.utils import run_benchmark def setup(): global Book from model_creation.models import Book def benchmark(): global Book Book.objects.create(title='hi!') run_benchmark(benchmark, setup=setup, meta={ 'description': 'Time of a Model.objects.create() call.', })
from djangobench.utils import run_benchmark def benchmark(): global Book Book.objects.all().delete() def setup(): global Book from query_delete_related.models import Book, Chapter b1 = Book.objects.create(title='hi') b2 = Book.objects.create(title='hi') b3 = Book.objects.create(title='hi') for i in range(0, 5): Chapter.objects.create(book=b1, title='chapter%d' % i) Chapter.objects.create(book=b2, title='chapter%d' % i) Chapter.objects.create(book=b3, title='chapter%d' % i) run_benchmark( benchmark, meta={ 'description': 'Delete an object via QuerySet.delete(), ' 'objects deleted have related objects.', }, setup=setup )
'WEBSITE_DOMAIN': WEBSITE_DOMAIN, 'SHOW_ALT_HEADER': SHOW_ALT_HEADER } render_to_response('permalink_django_lte_13.html', context) def benchmark_django_gt_13(): context = { 'objects1': objects1, 'objects2': objects2, 'object1': object1, 'object2': object2, 'object3': object3, 'num1' : num1, 'num2' : num2, 'boolean1': boolean1, 'SCRIPT_CONTENT_URL': SCRIPT_CONTENT_URL, 'WEBSITE_DOMAIN': WEBSITE_DOMAIN, 'SHOW_ALT_HEADER': SHOW_ALT_HEADER, 'base_template': 'base.html' if VERSION > (1, 5) else 'base_django_lte_15.html', } render_to_response('permalink.html', context) run_benchmark( benchmark_django_gt_13 if VERSION > (1, 3) else benchmark_django_lte_13, migrate=False, meta={ 'description': ('Render a somewhat complex, fairly typical template ' '(including inheritance, reverse URL resolution, etc.).'), } )
def benchmark(): # Just compile the template, no rendering t = Template(""" {% for v in vals %} {{ v }} {{ v }} {{ v }} {{ v }} {{ v }} {{ v }} {{ v }} {{ v }} {{ v }} {{ v }} {{ v }} {{ v }} {{ v }} {{ v }} {{ v }} {% endfor %} """) run_benchmark( benchmark, migrate = False, meta = { 'description': 'Template compilation time.', } )
from djangobench.utils import run_benchmark def benchmark(): global MultiField list(MultiField.objects.iterator()) def setup(): global MultiField from query_all_multifield.models import MultiField for i in range(0, 3000): kwargs = {} for j in range(1, 11): kwargs['field%s' % j] = 'foobar_%s_%s' % (i, j) MultiField(**kwargs).save() run_benchmark( benchmark, setup=setup, meta={ 'description': 'A simple Model.objects.iterator() for a large number of objects and large number of fields.', } )
from djangobench.utils import run_benchmark def setup(): global Book from query_distinct.models import Book def benchmark(): global Book list(Book.objects.distinct()) run_benchmark(benchmark, setup=setup, meta={ 'description': 'A simple Model.objects.distinct() call.', })
from djangobench.utils import run_benchmark from query_complex_filter.models import Book def benchmark(): Book.objects.complex_filter({'pk': 1}) run_benchmark( benchmark, meta = { 'description': 'A simple Model.objects.complex_filter() call.', } )