def __call__(self, environ, start_response): from django.conf import settings # Set up middleware if needed. We couldn't do this earlier, because # settings weren't available. if self._request_middleware is None: self.load_middleware() dispatcher.send(signal=signals.request_started) try: request = WSGIRequest(environ) response = self.get_response(request) # Apply response middleware for middleware_method in self._response_middleware: response = middleware_method(request, response) finally: dispatcher.send(signal=signals.request_finished) try: status_text = STATUS_CODE_TEXT[response.status_code] except KeyError: status_text = 'UNKNOWN STATUS CODE' status = '%s %s' % (response.status_code, status_text) response_headers = response.headers.items() for c in response.cookies.values(): response_headers.append(('Set-Cookie', c.output(header=''))) start_response(status, response_headers) return response
def send_create_signal(self, app_label, model_names): """ Sends a post_syncdb signal for the model specified. If the model is not found (perhaps it's been deleted?), no signal is sent. TODO: The behavior of django.contrib.* apps seems flawed in that they don't respect created_models. Rather, they blindly execute over all models within the app sending the signal. This is a patch we should push Django to make For now, this should work. """ app = models.get_app(app_label) if not app: return created_models = [] for model_name in model_names: model = models.get_model(app_label, model_name) if model: created_models.append(model) if created_models: # syncdb defaults -- perhaps take these as options? verbosity = 1 interactive = True if hasattr(dispatcher, "send"): dispatcher.send(signal=models.signals.post_syncdb, sender=app, app=app, created_models=created_models, verbosity=verbosity, interactive=interactive) else: models.signals.post_syncdb.send(sender=app, app=app, created_models=created_models, verbosity=verbosity, interactive=interactive)
def instrumented_test_render(self, context): """ An instrumented Template render method, providing a signal that can be intercepted by the test system Client """ dispatcher.send(signal=signals.template_rendered, sender=self, template=self, context=context) return self.nodelist.render(context)
def _prepare(cls): """ Creates some methods once self._meta has been populated. """ opts = cls._meta opts._prepare(cls) if opts.order_with_respect_to: cls.get_next_in_order = curry(cls._get_next_or_previous_in_order, is_next=True) cls.get_previous_in_order = curry( cls._get_next_or_previous_in_order, is_next=False) setattr(opts.order_with_respect_to.rel.to, 'get_%s_order' % cls.__name__.lower(), curry(method_get_order, cls)) setattr(opts.order_with_respect_to.rel.to, 'set_%s_order' % cls.__name__.lower(), curry(method_set_order, cls)) # Give the class a docstring -- its definition. if cls.__doc__ is None: cls.__doc__ = "%s(%s)" % (cls.__name__, ", ".join( [f.attname for f in opts.fields])) if hasattr(cls, 'get_absolute_url'): cls.get_absolute_url = curry(get_absolute_url, opts, cls.get_absolute_url) dispatcher.send(signal=signals.class_prepared, sender=cls)
def really_send_create_signal(self, app_label, model_names): """ Sends a post_syncdb signal for the model specified. If the model is not found (perhaps it's been deleted?), no signal is sent. TODO: The behavior of django.contrib.* apps seems flawed in that they don't respect created_models. Rather, they blindly execute over all models within the app sending the signal. This is a patch we should push Django to make For now, this should work. """ if self.debug: print " - Sending post_syncdb signal for %s: %s" % (app_label, model_names) app = models.get_app(app_label) if not app: return created_models = [] for model_name in model_names: model = models.get_model(app_label, model_name) if model: created_models.append(model) if created_models: # syncdb defaults -- perhaps take these as options? verbosity = 1 interactive = True if hasattr(dispatcher, "send"): # Older djangos dispatcher.send(signal=models.signals.post_syncdb, sender=app, app=app, created_models=created_models, verbosity=verbosity, interactive=interactive) else: if self._is_multidb(): # Django 1.2+ models.signals.post_syncdb.send( sender=app, app=app, created_models=created_models, verbosity=verbosity, interactive=interactive, db=self.db_alias, ) else: # Django 1.1 - 1.0 models.signals.post_syncdb.send( sender=app, app=app, created_models=created_models, verbosity=verbosity, interactive=interactive, )
def instrumented_test_iter_render(self, context): """ An instrumented Template iter_render method, providing a signal that can be intercepted by the test system Client. """ for chunk in self.nodelist.iter_render(context): yield chunk dispatcher.send(signal=signals.template_rendered, sender=self, template=self, context=context)
def delete_objects(seen_objs): """ Iterate through a list of seen classes, and remove any instances that are referred to. """ try: ordered_classes = seen_objs.keys() except CyclicDependency: # if there is a cyclic dependency, we cannot in general delete # the objects. However, if an appropriate transaction is set # up, or if the database is lax enough, it will succeed. # So for now, we go ahead and try anway. ordered_classes = seen_objs.unordered_keys() obj_pairs = {} for cls in ordered_classes: items = seen_objs[cls].items() items.sort() obj_pairs[cls] = items # Pre notify all instances to be deleted for pk_val, instance in items: dispatcher.send(signal=signals.pre_delete, sender=cls, instance=instance) pk_list = [pk for pk, instance in items] del_query = sql.DeleteQuery(cls, connection) del_query.delete_batch_related(pk_list) update_query = sql.UpdateQuery(cls, connection) for field in cls._meta.fields: if field.rel and field.null and field.rel.to in seen_objs: update_query.clear_related(field, pk_list) # Now delete the actual data for cls in ordered_classes: items = obj_pairs[cls] items.reverse() pk_list = [pk for pk, instance in items] del_query = sql.DeleteQuery(cls, connection) del_query.delete_batch(pk_list) # Last cleanup; set NULLs where there once was a reference to the # object, NULL the primary key of the found objects, and perform # post-notification. for pk_val, instance in items: for field in cls._meta.fields: if field.rel and field.null and field.rel.to in seen_objs: setattr(instance, field.attname, None) dispatcher.send(signal=signals.post_delete, sender=cls, instance=instance) setattr(instance, cls._meta.pk.attname, None) transaction.commit_unless_managed()
def delete_objects(seen_objs): """ Iterate through a list of seen classes, and remove any instances that are referred to. """ try: ordered_classes = seen_objs.keys() except CyclicDependency: # if there is a cyclic dependency, we cannot in general delete # the objects. However, if an appropriate transaction is set # up, or if the database is lax enough, it will succeed. # So for now, we go ahead and try anway. ordered_classes = seen_objs.unordered_keys() obj_pairs = {} for cls in ordered_classes: items = seen_objs[cls].items() items.sort() obj_pairs[cls] = items # Pre notify all instances to be deleted for pk_val, instance in items: dispatcher.send(signal=signals.pre_delete, sender=cls, instance=instance) pk_list = [pk for pk,instance in items] del_query = sql.DeleteQuery(cls, connection) del_query.delete_batch_related(pk_list) update_query = sql.UpdateQuery(cls, connection) for field in cls._meta.fields: if field.rel and field.null and field.rel.to in seen_objs: update_query.clear_related(field, pk_list) # Now delete the actual data for cls in ordered_classes: items = obj_pairs[cls] items.reverse() pk_list = [pk for pk,instance in items] del_query = sql.DeleteQuery(cls, connection) del_query.delete_batch(pk_list) # Last cleanup; set NULLs where there once was a reference to the # object, NULL the primary key of the found objects, and perform # post-notification. for pk_val, instance in items: for field in cls._meta.fields: if field.rel and field.null and field.rel.to in seen_objs: setattr(instance, field.attname, None) dispatcher.send(signal=signals.post_delete, sender=cls, instance=instance) setattr(instance, cls._meta.pk.attname, None) transaction.commit_unless_managed()
def save(self): dispatcher.send(signal=signals.pre_save, sender=self.__class__, instance=self) non_pks = [f for f in self._meta.fields if not f.primary_key] cursor = connection.cursor() # First, try an UPDATE. If that doesn't update anything, do an INSERT. pk_val = self._get_pk_val() pk_set = bool(pk_val) record_exists = True if pk_set: # Determine whether a record with the primary key already exists. cursor.execute("SELECT 1 FROM %s WHERE %s=%%s LIMIT 1" % \ (backend.quote_name(self._meta.db_table), backend.quote_name(self._meta.pk.column)), [pk_val]) # If it does already exist, do an UPDATE. if cursor.fetchone(): db_values = [f.get_db_prep_save(f.pre_save(self, False)) for f in non_pks] if db_values: cursor.execute("UPDATE %s SET %s WHERE %s=%%s" % \ (backend.quote_name(self._meta.db_table), ','.join(['%s=%%s' % backend.quote_name(f.column) for f in non_pks]), backend.quote_name(self._meta.pk.column)), db_values + [pk_val]) else: record_exists = False if not pk_set or not record_exists: field_names = [backend.quote_name(f.column) for f in self._meta.fields if not isinstance(f, AutoField)] db_values = [f.get_db_prep_save(f.pre_save(self, True)) for f in self._meta.fields if not isinstance(f, AutoField)] # If the PK has been manually set, respect that. if pk_set: field_names += [f.column for f in self._meta.fields if isinstance(f, AutoField)] db_values += [f.get_db_prep_save(f.pre_save(self, True)) for f in self._meta.fields if isinstance(f, AutoField)] placeholders = ['%s'] * len(field_names) if self._meta.order_with_respect_to: field_names.append(backend.quote_name('_order')) # TODO: This assumes the database supports subqueries. placeholders.append('(SELECT COUNT(*) FROM %s WHERE %s = %%s)' % \ (backend.quote_name(self._meta.db_table), backend.quote_name(self._meta.order_with_respect_to.column))) db_values.append(getattr(self, self._meta.order_with_respect_to.attname)) if db_values: cursor.execute("INSERT INTO %s (%s) VALUES (%s)" % \ (backend.quote_name(self._meta.db_table), ','.join(field_names), ','.join(placeholders)), db_values) else: # Create a new record with defaults for everything. cursor.execute("INSERT INTO %s (%s) VALUES (%s)" % (backend.quote_name(self._meta.db_table), backend.quote_name(self._meta.pk.column), backend.get_pk_default_value())) if self._meta.has_auto_field and not pk_set: setattr(self, self._meta.pk.attname, backend.get_last_insert_id(cursor, self._meta.db_table, self._meta.pk.column)) transaction.commit_unless_managed() # Run any post-save hooks. dispatcher.send(signal=signals.post_save, sender=self.__class__, instance=self)
def emit_post_sync_signal(created_models, verbosity, interactive): from django.db import models from django.dispatch import dispatcher # Emit the post_sync signal for every application. for app in models.get_apps(): app_name = app.__name__.split('.')[-2] if verbosity >= 2: print "Running post-sync handlers for application", app_name dispatcher.send(signal=models.signals.post_syncdb, sender=app, app=app, created_models=created_models, verbosity=verbosity, interactive=interactive)
def really_send_create_signal(self, app_label, model_names, verbosity=0, interactive=False): """ Sends a post_syncdb signal for the model specified. If the model is not found (perhaps it's been deleted?), no signal is sent. TODO: The behavior of django.contrib.* apps seems flawed in that they don't respect created_models. Rather, they blindly execute over all models within the app sending the signal. This is a patch we should push Django to make For now, this should work. """ if self.debug: print(" - Sending post_syncdb signal for %s: %s" % (app_label, model_names)) app = models.get_app(app_label) if not app: return created_models = [] for model_name in model_names: model = models.get_model(app_label, model_name) if model: created_models.append(model) if created_models: if hasattr(dispatcher, "send"): # Older djangos dispatcher.send(signal=models.signals.post_syncdb, sender=app, app=app, created_models=created_models, verbosity=verbosity, interactive=interactive) else: if self._is_multidb(): # Django 1.2+ models.signals.post_syncdb.send( sender=app, app=app, created_models=created_models, verbosity=verbosity, interactive=interactive, db=self.db_alias, ) else: # Django 1.1 - 1.0 models.signals.post_syncdb.send( sender=app, app=app, created_models=created_models, verbosity=verbosity, interactive=interactive, )
def save(self): # Check that the model name refers to a real model: model_exists = False for m in get_models(): if u'.'.join((m.__module__, m.__name__)) == self.model_name: model_exists = True break assert model_exists, \ 'type must be associated with a real model, not %s' % self.model_name # Save, signal and... done: super(self.__class__, self).save() dispatcher.send(signal=self.post_save, sender=self.__class__, instance=self)
def refresh_dirty_statistics(request): """ Refresh the dirty statistics """ # Find the redirect URL url = request.META.get("HTTP_REFERER", "/") # Check permissions if not request.user.has_perm("statistics.reload_statistics"): return HttpResponseRedirect(url) # Send the signal dispatcher.send(signal=signals.refresh_dirty_statistics_thread) # Redirect return HttpResponseRedirect(url)
def runjobs_by_dispatcher(self, when, options): """ Run jobs from the dispatcher """ # Thanks for Ian Holsman for the idea and code from extensions.management import signals from django.db import models from django.dispatch import dispatcher from django.conf import settings for app_name in settings.INSTALLED_APPS: try: __import__(app_name + '.management', '', '', ['']) except ImportError: pass for app in models.get_apps(): if options.get('verbose', False): app_name = '.'.join(app.__name__.rsplit('.')[:-1]) print "Dispatching %s job signal for: %s" % (when, app_name) if when == 'hourly': dispatcher.send(signal=signals.run_hourly_jobs, sender=app, app=app) elif when == 'daily': dispatcher.send(signal=signals.run_daily_jobs, sender=app, app=app) elif when == 'weekly': dispatcher.send(signal=signals.run_weekly_jobs, sender=app, app=app) elif when == 'monthly': dispatcher.send(signal=signals.run_monthly_jobs, sender=app, app=app)
def delete_objects(seen_objs): """ Iterate through a list of seen classes, and remove any instances that are referred to. """ ordered_classes = seen_objs.keys() ordered_classes.reverse() for cls in ordered_classes: seen_objs[cls] = seen_objs[cls].items() seen_objs[cls].sort() # Pre notify all instances to be deleted for pk_val, instance in seen_objs[cls]: dispatcher.send(signal=signals.pre_delete, sender=cls, instance=instance) pk_list = [pk for pk,instance in seen_objs[cls]] del_query = sql.DeleteQuery(cls, connection) del_query.delete_batch_related(pk_list) update_query = sql.UpdateQuery(cls, connection) for field in cls._meta.fields: if field.rel and field.null and field.rel.to in seen_objs: update_query.clear_related(field, pk_list) # Now delete the actual data for cls in ordered_classes: seen_objs[cls].reverse() pk_list = [pk for pk,instance in seen_objs[cls]] del_query = sql.DeleteQuery(cls, connection) del_query.delete_batch(pk_list) # Last cleanup; set NULLs where there once was a reference to the # object, NULL the primary key of the found objects, and perform # post-notification. for pk_val, instance in seen_objs[cls]: for field in cls._meta.fields: if field.rel and field.null and field.rel.to in seen_objs: setattr(instance, field.attname, None) dispatcher.send(signal=signals.post_delete, sender=cls, instance=instance) setattr(instance, cls._meta.pk.attname, None) transaction.commit_unless_managed()
def recursive_reload(request, language_code=None, project_code=None, catalog_code=None): """ Reload recursively the statistics """ # Find the redirect URL url = request.META.get("HTTP_REFERER", "/") # Check permissions if not request.user.has_perm("statistics.reload_statistics"): return HttpResponseRedirect(url) # Send the signal dispatcher.send( signal=signals.recursive_reload, request=request, language_code=language_code, project_code=project_code, catalog_code=catalog_code, ) # Redirect return HttpResponseRedirect(url)
def __call__(self, req): # mod_python fakes the environ, and thus doesn't process SetEnv. This fixes that os.environ.update(req.subprocess_env) # now that the environ works we can see the correct settings, so imports # that use settings now can work from django.conf import settings # if we need to set up middleware, now that settings works we can do it now. if self._request_middleware is None: self.load_middleware() set_script_prefix(req.get_options().get('django.root', '')) dispatcher.send(signal=signals.request_started) try: try: request = self.request_class(req) except UnicodeDecodeError: response = http.HttpResponseBadRequest() else: response = self.get_response(request) # Apply response middleware for middleware_method in self._response_middleware: response = middleware_method(request, response) response = self.apply_response_fixes(request, response) finally: dispatcher.send(signal=signals.request_finished) # Convert our custom HttpResponse object back into the mod_python req. req.content_type = response['Content-Type'] for key, value in list(response.items()): if key != 'content-type': req.headers_out[str(key)] = str(value) for c in list(response.cookies.values()): req.headers_out.add('Set-Cookie', c.output(header='')) req.status = response.status_code try: for chunk in response: req.write(chunk) finally: response.close() return 0 # mod_python.apache.OK
def __call__(self, req): # mod_python fakes the environ, and thus doesn't process SetEnv. This fixes that os.environ.update(req.subprocess_env) # now that the environ works we can see the correct settings, so imports # that use settings now can work from django.conf import settings # if we need to set up middleware, now that settings works we can do it now. if self._request_middleware is None: self.load_middleware() set_script_prefix(req.get_options().get('django.root', '')) dispatcher.send(signal=signals.request_started) try: try: request = self.request_class(req) except UnicodeDecodeError: response = http.HttpResponseBadRequest() else: response = self.get_response(request) # Apply response middleware for middleware_method in self._response_middleware: response = middleware_method(request, response) response = self.apply_response_fixes(request, response) finally: dispatcher.send(signal=signals.request_finished) # Convert our custom HttpResponse object back into the mod_python req. req.content_type = response['Content-Type'] for key, value in response.items(): if key != 'content-type': req.headers_out[str(key)] = str(value) for c in response.cookies.values(): req.headers_out.add('Set-Cookie', c.output(header='')) req.status = response.status_code try: for chunk in response: req.write(chunk) finally: response.close() return 0 # mod_python.apache.OK
def _prepare(cls): # Creates some methods once self._meta has been populated. opts = cls._meta opts._prepare(cls) if opts.order_with_respect_to: cls.get_next_in_order = curry(cls._get_next_or_previous_in_order, is_next=True) cls.get_previous_in_order = curry(cls._get_next_or_previous_in_order, is_next=False) setattr(opts.order_with_respect_to.rel.to, 'get_%s_order' % cls.__name__.lower(), curry(method_get_order, cls)) setattr(opts.order_with_respect_to.rel.to, 'set_%s_order' % cls.__name__.lower(), curry(method_set_order, cls)) # Give the class a docstring -- its definition. if cls.__doc__ is None: cls.__doc__ = "%s(%s)" % (cls.__name__, ", ".join([f.attname for f in opts.fields])) if hasattr(cls, 'get_absolute_url'): cls.get_absolute_url = curry(get_absolute_url, opts, cls.get_absolute_url) dispatcher.send(signal=signals.class_prepared, sender=cls)
def __store_hit(self, url, type, meta): # Get URL from database website_url, new = URL.objects.get_or_create(url=url) # Dispatches signal to get IP info for location and others informations if signals_refactored: app_signals.get_meta_info.send(sender=self, meta=meta)# or {} else: dispatcher.send(signal=app_signals.get_meta_info, sender=self, meta=meta)# or {} tmp_meta = dict([(k, meta[k]) for k in app_settings.ADS_STORED_META_KEYS if k in meta]) # Returns the log created return Log.objects.create( ad=self, type=type, url=url, website_url=website_url, meta=simplejson.dumps(tmp_meta), )
def __call__(This): from django.conf import settings if This._request_middleware is None: This.load_middleware() dispatcher.send(signal=signals.request_started) try: request = PyISAPIeRequest() response = This.get_response(request) # Apply response middleware for middleware_method in This._response_middleware: response = middleware_method(request, response) finally: dispatcher.send(signal=signals.request_finished) process_response(response)
def recursive_delete(request, language_code=None, project_code=None, catalog_code=None): """ Delete recursively the mentioned translation units """ # Find the redirect URL url = request.META.get("HTTP_REFERER", "/") if url.endswith(request.path): url = "/" # Check permissions if not request.user.has_perm("transcat.delete_transunit"): request.user.alerts.create(type="deny", message=gettext("You are not allowed to remove the translation units.")) return HttpResponseRedirect(url) # Send the signal dispatcher.send( signal=signals.recursive_delete, request=request, language_code=language_code, project_code=project_code, catalog_code=catalog_code, ) # Redirect return HttpResponseRedirect(url)
def __call__(self, environ, start_response): from django.conf import settings # Set up middleware if needed. We couldn't do this earlier, because # settings weren't available. if self._request_middleware is None: self.initLock.acquire() # Check that middleware is still uninitialised. if self._request_middleware is None: self.load_middleware() self.initLock.release() set_script_prefix(base.get_script_name(environ)) dispatcher.send(signal=signals.request_started) try: try: request = self.request_class(environ) except UnicodeDecodeError: response = http.HttpResponseBadRequest() else: response = self.get_response(request) # Apply response middleware for middleware_method in self._response_middleware: response = middleware_method(request, response) response = self.apply_response_fixes(request, response) finally: dispatcher.send(signal=signals.request_finished) try: status_text = STATUS_CODE_TEXT[response.status_code] except KeyError: status_text = 'UNKNOWN STATUS CODE' status = '%s %s' % (response.status_code, status_text) response_headers = [(str(k), str(v)) for k, v in list(response.items())] for c in list(response.cookies.values()): response_headers.append(('Set-Cookie', str(c.output(header='')))) start_response(status, response_headers) return response
def __call__(self, environ): from django.conf import settings from django.core import signals # Set up middleware if needed. We couldn't do this earlier, because # settings weren't available. if self._request_middleware is None: self.load_middleware() dispatcher.send(signal=signals.request_started) try: request = WSGIRequest(environ) response = self.get_response(request) # Apply response middleware for middleware_method in self._response_middleware: response = middleware_method(request, response) finally: dispatcher.send(signal=signals.request_finished) return response
def __call__(self, environ): from django.conf import settings from django.core import signals # Set up middleware if needed. We couldn't do this earlier, because # settings weren't available. if self._request_middleware is None: self.load_middleware() dispatcher.send(signal=signals.request_started) try: request = WSGIRequest(environ) response = self.get_response(request) # Apply response middleware. for middleware_method in self._response_middleware: response = middleware_method(request, response) response = self.apply_response_fixes(request, response) finally: dispatcher.send(signal=signals.request_finished) return response
def from_sequence(cls, values): """ An alternate class constructor, primarily for internal use. Creates a model instance from a sequence of values (which corresponds to all the non-many-to-many fields in creation order. If there are more fields than values, the remaining (final) fields are given their default values. ForeignKey fields can only be initialised using id values, not instances, in this method. """ dispatcher.send(signal=signals.pre_init, sender=cls, args=values, kwargs={}) obj = Empty() obj.__class__ = cls field_iter = iter(obj._meta.fields) for val, field in izip(values, field_iter): setattr(obj, field.attname, val) for field in field_iter: setattr(obj, field.attname, field.get_default()) dispatcher.send(signal=signals.post_init, sender=cls, instance=obj) return obj
def _prepare(cls): # TODO: How do we extend the parent classes classmethod properly? # super(CachedModel, cls)._prepare() errors opts = cls._meta opts._prepare(cls) if opts.order_with_respect_to: cls.get_next_in_order = curry(cls._get_next_or_previous_in_order, is_next=True) cls.get_previous_in_order = curry(cls._get_next_or_previous_in_order, is_next=False) setattr(opts.order_with_respect_to.rel.to, 'get_%s_order' % cls.__name__.lower(), curry(method_get_order, cls)) setattr(opts.order_with_respect_to.rel.to, 'set_%s_order' % cls.__name__.lower(), curry(method_set_order, cls)) # Give the class a docstring -- its definition. if cls.__doc__ is None: cls.__doc__ = "%s(%s)" % (cls.__name__, ", ".join([f.attname for f in opts.fields])) if hasattr(cls, 'get_absolute_url'): cls.get_absolute_url = curry(get_absolute_url, opts, cls.get_absolute_url) cls.add_to_class('objects', CacheManager()) cls.add_to_class('nocache', Manager()) cls.add_to_class('_default_manager', cls.nocache) dispatcher.send(signal=signals.class_prepared, sender=cls)
def __store_hit(self, url, type, meta): # Get URL from database website_url, new = URL.objects.get_or_create(url=url) # Dispatches signal to get IP info for location and others informations if signals_refactored: app_signals.get_meta_info.send(sender=self, meta=meta) # or {} else: dispatcher.send(signal=app_signals.get_meta_info, sender=self, meta=meta) # or {} tmp_meta = dict([(k, meta[k]) for k in app_settings.ADS_STORED_META_KEYS if k in meta]) # Returns the log created return Log.objects.create( ad=self, type=type, url=url, website_url=website_url, meta=simplejson.dumps(tmp_meta), )
def save(self, raw=False): dispatcher.send(signal=signals.pre_save, sender=self.__class__, instance=self, raw=raw) non_pks = [f for f in self._meta.fields if not f.primary_key] cursor = connection.cursor() qn = connection.ops.quote_name # First, try an UPDATE. If that doesn't update anything, do an INSERT. pk_val = self._get_pk_val() # Note: the comparison with '' is required for compatibility with # oldforms-style model creation. pk_set = pk_val is not None and smart_unicode(pk_val) != u'' record_exists = True if pk_set: # Determine whether a record with the primary key already exists. cursor.execute("SELECT 1 FROM %s WHERE %s=%%s" % \ (qn(self._meta.db_table), qn(self._meta.pk.column)), self._meta.pk.get_db_prep_lookup('exact', pk_val)) # If it does already exist, do an UPDATE. if cursor.fetchone(): db_values = [f.get_db_prep_save(raw and getattr(self, f.attname) or f.pre_save(self, False)) for f in non_pks] if db_values: cursor.execute("UPDATE %s SET %s WHERE %s=%%s" % \ (qn(self._meta.db_table), ','.join(['%s=%%s' % qn(f.column) for f in non_pks]), qn(self._meta.pk.column)), db_values + self._meta.pk.get_db_prep_lookup('exact', pk_val)) else: record_exists = False if not pk_set or not record_exists: field_names = [qn(f.column) for f in self._meta.fields if not isinstance(f, AutoField)] db_values = [f.get_db_prep_save(raw and getattr(self, f.attname) or f.pre_save(self, True)) for f in self._meta.fields if not isinstance(f, AutoField)] # If the PK has been manually set, respect that. if pk_set: field_names += [f.column for f in self._meta.fields if isinstance(f, AutoField)] db_values += [f.get_db_prep_save(raw and getattr(self, f.attname) or f.pre_save(self, True)) for f in self._meta.fields if isinstance(f, AutoField)] placeholders = ['%s'] * len(field_names) if self._meta.order_with_respect_to: field_names.append(qn('_order')) placeholders.append('%s') subsel = 'SELECT COUNT(*) FROM %s WHERE %s = %%s' % ( qn(self._meta.db_table), qn(self._meta.order_with_respect_to.column)) cursor.execute(subsel, (getattr(self, self._meta.order_with_respect_to.attname),)) db_values.append(cursor.fetchone()[0]) record_exists = False if db_values: cursor.execute("INSERT INTO %s (%s) VALUES (%s)" % \ (qn(self._meta.db_table), ','.join(field_names), ','.join(placeholders)), db_values) else: # Create a new record with defaults for everything. cursor.execute("INSERT INTO %s (%s) VALUES (%s)" % (qn(self._meta.db_table), qn(self._meta.pk.column), connection.ops.pk_default_value())) if self._meta.has_auto_field and not pk_set: setattr(self, self._meta.pk.attname, connection.ops.last_insert_id(cursor, self._meta.db_table, self._meta.pk.column)) transaction.commit_unless_managed() # Run any post-save hooks. dispatcher.send(signal=signals.post_save, sender=self.__class__, instance=self, created=(not record_exists), raw=raw)
return response except http.Http404, e: if settings.DEBUG: from django.views import debug return debug.technical_404_response(request, e) else: callback, param_dict = resolver.resolve404() return callback(request, **param_dict) except exceptions.PermissionDenied: return http.HttpResponseForbidden('<h1>Permission denied</h1>') except SystemExit: pass # See http://code.djangoproject.com/ticket/1023 except: # Handle everything else, including SuspiciousOperation, etc. # Get the exception info now, in case another exception is thrown later. exc_info = sys.exc_info() receivers = dispatcher.send(signal=signals.got_request_exception, request=request) if settings.DEBUG: from django.views import debug return debug.technical_500_response(request, *exc_info) else: # When DEBUG is False, send an error message to the admins. subject = 'Error (%s IP): %s' % ((request.META.get('REMOTE_ADDR') in settings.INTERNAL_IPS and 'internal' or 'EXTERNAL'), request.path) try: request_repr = repr(request) except: request_repr = "Request repr() unavailable" message = "%s\n\n%s" % (self._get_traceback(exc_info), request_repr) mail_admins(subject, message, fail_silently=True) # Return an HttpResponse that displays a friendly error message. callback, param_dict = resolver.resolve500() return callback(request, **param_dict)
def user_logout(request): dispatcher.send(signal=apps.users.signals.user_logout, request=request) logout(request)
def delete_objects(seen_objs): "Iterate through a list of seen classes, and remove any instances that are referred to" qn = backend.quote_name ordered_classes = seen_objs.keys() ordered_classes.reverse() cursor = connection.cursor() for cls in ordered_classes: seen_objs[cls] = seen_objs[cls].items() seen_objs[cls].sort() # Pre notify all instances to be deleted for pk_val, instance in seen_objs[cls]: dispatcher.send(signal=signals.pre_delete, sender=cls, instance=instance) pk_list = [pk for pk,instance in seen_objs[cls]] for related in cls._meta.get_all_related_many_to_many_objects(): if not isinstance(related.field, GenericRelation): for offset in range(0, len(pk_list), GET_ITERATOR_CHUNK_SIZE): cursor.execute("DELETE FROM %s WHERE %s IN (%s)" % \ (qn(related.field.m2m_db_table()), qn(related.field.m2m_reverse_name()), ','.join(['%s' for pk in pk_list[offset:offset+GET_ITERATOR_CHUNK_SIZE]])), pk_list[offset:offset+GET_ITERATOR_CHUNK_SIZE]) for f in cls._meta.many_to_many: if isinstance(f, GenericRelation): from django.contrib.contenttypes.models import ContentType query_extra = 'AND %s=%%s' % f.rel.to._meta.get_field(f.content_type_field_name).column args_extra = [ContentType.objects.get_for_model(cls).id] else: query_extra = '' args_extra = [] for offset in range(0, len(pk_list), GET_ITERATOR_CHUNK_SIZE): cursor.execute(("DELETE FROM %s WHERE %s IN (%s)" % \ (qn(f.m2m_db_table()), qn(f.m2m_column_name()), ','.join(['%s' for pk in pk_list[offset:offset+GET_ITERATOR_CHUNK_SIZE]]))) + query_extra, pk_list[offset:offset+GET_ITERATOR_CHUNK_SIZE] + args_extra) for field in cls._meta.fields: if field.rel and field.null and field.rel.to in seen_objs: for offset in range(0, len(pk_list), GET_ITERATOR_CHUNK_SIZE): cursor.execute("UPDATE %s SET %s=NULL WHERE %s IN (%s)" % \ (qn(cls._meta.db_table), qn(field.column), qn(cls._meta.pk.column), ','.join(['%s' for pk in pk_list[offset:offset+GET_ITERATOR_CHUNK_SIZE]])), pk_list[offset:offset+GET_ITERATOR_CHUNK_SIZE]) # Now delete the actual data for cls in ordered_classes: seen_objs[cls].reverse() pk_list = [pk for pk,instance in seen_objs[cls]] for offset in range(0, len(pk_list), GET_ITERATOR_CHUNK_SIZE): cursor.execute("DELETE FROM %s WHERE %s IN (%s)" % \ (qn(cls._meta.db_table), qn(cls._meta.pk.column), ','.join(['%s' for pk in pk_list[offset:offset+GET_ITERATOR_CHUNK_SIZE]])), pk_list[offset:offset+GET_ITERATOR_CHUNK_SIZE]) # Last cleanup; set NULLs where there once was a reference to the object, # NULL the primary key of the found objects, and perform post-notification. for pk_val, instance in seen_objs[cls]: for field in cls._meta.fields: if field.rel and field.null and field.rel.to in seen_objs: setattr(instance, field.attname, None) setattr(instance, cls._meta.pk.attname, None) dispatcher.send(signal=signals.post_delete, sender=cls, instance=instance) transaction.commit_unless_managed()
def save_base(self, raw=False, cls=None): """ Does the heavy-lifting involved in saving. Subclasses shouldn't need to override this method. It's separate from save() in order to hide the need for overrides of save() to pass around internal-only parameters ('raw' and 'cls'). """ if not cls: cls = self.__class__ meta = self._meta signal = True dispatcher.send(signal=signals.pre_save, sender=self.__class__, instance=self, raw=raw) else: meta = cls._meta signal = False # If we are in a raw save, save the object exactly as presented. # That means that we don't try to be smart about saving attributes # that might have come from the parent class - we just save the # attributes we have been given to the class we have been given. if not raw: for parent, field in list(meta.parents.items()): # At this point, parent's primary key field may be unknown # (for example, from administration form which doesn't fill # this field). If so, fill it. if getattr(self, parent._meta.pk.attname) is None and getattr( self, field.attname) is not None: setattr(self, parent._meta.pk.attname, getattr(self, field.attname)) self.save_base(raw, parent) setattr(self, field.attname, self._get_pk_val(parent._meta)) non_pks = [f for f in meta.local_fields if not f.primary_key] # First, try an UPDATE. If that doesn't update anything, do an INSERT. pk_val = self._get_pk_val(meta) # Note: the comparison with '' is required for compatibility with # oldforms-style model creation. pk_set = pk_val is not None and smart_unicode(pk_val) != '' record_exists = True manager = cls._default_manager if pk_set: # Determine whether a record with the primary key already exists. if manager.filter(pk=pk_val).extra(select={ 'a': 1 }).values('a').order_by(): # It does already exist, so do an UPDATE. if non_pks: values = [ (f, None, f.get_db_prep_save(raw and getattr(self, f.attname) or f.pre_save(self, False))) for f in non_pks ] manager.filter(pk=pk_val)._update(values) else: record_exists = False if not pk_set or not record_exists: if not pk_set: values = [(f, f.get_db_prep_save(raw and getattr(self, f.attname) or f.pre_save(self, True))) for f in meta.local_fields if not isinstance(f, AutoField)] else: values = [(f, f.get_db_prep_save(raw and getattr(self, f.attname) or f.pre_save(self, True))) for f in meta.local_fields] if meta.order_with_respect_to: field = meta.order_with_respect_to values.append( (meta.get_field_by_name('_order')[0], manager.filter(**{ field.name: getattr(self, field.attname) }).count())) record_exists = False update_pk = bool(meta.has_auto_field and not pk_set) if values: # Create a new record. result = manager._insert(values, return_id=update_pk) else: # Create a new record with defaults for everything. result = manager._insert( [(meta.pk, connection.ops.pk_default_value())], return_id=update_pk, raw_values=True) if update_pk: setattr(self, meta.pk.attname, result) transaction.commit_unless_managed() if signal: dispatcher.send(signal=signals.post_save, sender=self.__class__, instance=self, created=(not record_exists), raw=raw)
def __init__(self, *args, **kwargs): dispatcher.send(signal=signals.pre_init, sender=self.__class__, args=args, kwargs=kwargs) # There is a rather weird disparity here; if kwargs, it's set, then args # overrides it. It should be one or the other; don't duplicate the work # The reason for the kwargs check is that standard iterator passes in by # args, and instantiation for iteration is 33% faster. args_len = len(args) if args_len > len(self._meta.fields): # Daft, but matches old exception sans the err msg. raise IndexError("Number of args exceeds number of fields") fields_iter = iter(self._meta.fields) if not kwargs: # The ordering of the izip calls matter - izip throws StopIteration # when an iter throws it. So if the first iter throws it, the second # is *not* consumed. We rely on this, so don't change the order # without changing the logic. for val, field in zip(args, fields_iter): setattr(self, field.attname, val) else: # Slower, kwargs-ready version. for val, field in zip(args, fields_iter): setattr(self, field.attname, val) kwargs.pop(field.name, None) # Maintain compatibility with existing calls. if isinstance(field.rel, ManyToOneRel): kwargs.pop(field.attname, None) # Now we're left with the unprocessed fields that *must* come from # keywords, or default. for field in fields_iter: if kwargs: if isinstance(field.rel, ManyToOneRel): try: # Assume object instance was passed in. rel_obj = kwargs.pop(field.name) except KeyError: try: # Object instance wasn't passed in -- must be an ID. val = kwargs.pop(field.attname) except KeyError: val = field.get_default() else: # Object instance was passed in. Special case: You can # pass in "None" for related objects if it's allowed. if rel_obj is None and field.null: val = None else: try: val = getattr( rel_obj, field.rel.get_related_field().attname) except AttributeError: raise TypeError( "Invalid value: %r should be a %s instance, not a %s" % (field.name, field.rel.to, type(rel_obj))) else: val = kwargs.pop(field.attname, field.get_default()) else: val = field.get_default() setattr(self, field.attname, val) if kwargs: for prop in list(kwargs.keys()): try: if isinstance(getattr(self.__class__, prop), property): setattr(self, prop, kwargs.pop(prop)) except AttributeError: pass if kwargs: raise TypeError( "'%s' is an invalid keyword argument for this function" % list(kwargs.keys())[0]) dispatcher.send(signal=signals.post_init, sender=self.__class__, instance=self)
except http.Http404, e: if settings.DEBUG: from django.views import debug return debug.technical_404_response(request, e) else: callback, param_dict = resolver.resolve404() return callback(request, **param_dict) except exceptions.PermissionDenied: return http.HttpResponseForbidden('<h1>Permission denied</h1>') except SystemExit: # Allow sys.exit() to actually exit. See tickets #1023 and #4701 raise except: # Handle everything else, including SuspiciousOperation, etc. # Get the exception info now, in case another exception is thrown later. exc_info = sys.exc_info() receivers = dispatcher.send(signal=signals.got_request_exception, request=request) if settings.DEBUG_PROPAGATE_EXCEPTIONS: raise elif settings.DEBUG: from django.views import debug return debug.technical_500_response(request, *exc_info) else: # When DEBUG is False, send an error message to the admins. subject = 'Error (%s IP): %s' % ( (request.META.get('REMOTE_ADDR') in settings.INTERNAL_IPS and 'internal' or 'EXTERNAL'), request.path) try: request_repr = repr(request) except: request_repr = "Request repr() unavailable"
def save_base(self, raw=False, cls=None): """ Does the heavy-lifting involved in saving. Subclasses shouldn't need to override this method. It's separate from save() in order to hide the need for overrides of save() to pass around internal-only parameters ('raw' and 'cls'). """ if not cls: cls = self.__class__ meta = self._meta signal = True dispatcher.send(signal=signals.pre_save, sender=self.__class__, instance=self, raw=raw) else: meta = cls._meta signal = False # If we are in a raw save, save the object exactly as presented. # That means that we don't try to be smart about saving attributes # that might have come from the parent class - we just save the # attributes we have been given to the class we have been given. if not raw: for parent, field in list(meta.parents.items()): # At this point, parent's primary key field may be unknown # (for example, from administration form which doesn't fill # this field). If so, fill it. if getattr(self, parent._meta.pk.attname) is None and getattr(self, field.attname) is not None: setattr(self, parent._meta.pk.attname, getattr(self, field.attname)) self.save_base(raw, parent) setattr(self, field.attname, self._get_pk_val(parent._meta)) non_pks = [f for f in meta.local_fields if not f.primary_key] # First, try an UPDATE. If that doesn't update anything, do an INSERT. pk_val = self._get_pk_val(meta) # Note: the comparison with '' is required for compatibility with # oldforms-style model creation. pk_set = pk_val is not None and smart_unicode(pk_val) != '' record_exists = True manager = cls._default_manager if pk_set: # Determine whether a record with the primary key already exists. if manager.filter(pk=pk_val).extra(select={'a': 1}).values('a').order_by(): # It does already exist, so do an UPDATE. if non_pks: values = [(f, None, f.get_db_prep_save(raw and getattr(self, f.attname) or f.pre_save(self, False))) for f in non_pks] manager.filter(pk=pk_val)._update(values) else: record_exists = False if not pk_set or not record_exists: if not pk_set: values = [(f, f.get_db_prep_save(raw and getattr(self, f.attname) or f.pre_save(self, True))) for f in meta.local_fields if not isinstance(f, AutoField)] else: values = [(f, f.get_db_prep_save(raw and getattr(self, f.attname) or f.pre_save(self, True))) for f in meta.local_fields] if meta.order_with_respect_to: field = meta.order_with_respect_to values.append((meta.get_field_by_name('_order')[0], manager.filter(**{field.name: getattr(self, field.attname)}).count())) record_exists = False update_pk = bool(meta.has_auto_field and not pk_set) if values: # Create a new record. result = manager._insert(values, return_id=update_pk) else: # Create a new record with defaults for everything. result = manager._insert([(meta.pk, connection.ops.pk_default_value())], return_id=update_pk, raw_values=True) if update_pk: setattr(self, meta.pk.attname, result) transaction.commit_unless_managed() if signal: dispatcher.send(signal=signals.post_save, sender=self.__class__, instance=self, created=(not record_exists), raw=raw)
def __init__(self, *args, **kwargs): dispatcher.send(signal=signals.pre_init, sender=self.__class__, args=args, kwargs=kwargs) # There is a rather weird disparity here; if kwargs, it's set, then args # overrides it. It should be one or the other; don't duplicate the work # The reason for the kwargs check is that standard iterator passes in by # args, and instantiation for iteration is 33% faster. args_len = len(args) if args_len > len(self._meta.fields): # Daft, but matches old exception sans the err msg. raise IndexError("Number of args exceeds number of fields") fields_iter = iter(self._meta.fields) if not kwargs: # The ordering of the izip calls matter - izip throws StopIteration # when an iter throws it. So if the first iter throws it, the second # is *not* consumed. We rely on this, so don't change the order # without changing the logic. for val, field in zip(args, fields_iter): setattr(self, field.attname, val) else: # Slower, kwargs-ready version. for val, field in zip(args, fields_iter): setattr(self, field.attname, val) kwargs.pop(field.name, None) # Maintain compatibility with existing calls. if isinstance(field.rel, ManyToOneRel): kwargs.pop(field.attname, None) # Now we're left with the unprocessed fields that *must* come from # keywords, or default. for field in fields_iter: if kwargs: if isinstance(field.rel, ManyToOneRel): try: # Assume object instance was passed in. rel_obj = kwargs.pop(field.name) except KeyError: try: # Object instance wasn't passed in -- must be an ID. val = kwargs.pop(field.attname) except KeyError: val = field.get_default() else: # Object instance was passed in. Special case: You can # pass in "None" for related objects if it's allowed. if rel_obj is None and field.null: val = None else: try: val = getattr(rel_obj, field.rel.get_related_field().attname) except AttributeError: raise TypeError("Invalid value: %r should be a %s instance, not a %s" % (field.name, field.rel.to, type(rel_obj))) else: val = kwargs.pop(field.attname, field.get_default()) else: val = field.get_default() setattr(self, field.attname, val) if kwargs: for prop in list(kwargs.keys()): try: if isinstance(getattr(self.__class__, prop), property): setattr(self, prop, kwargs.pop(prop)) except AttributeError: pass if kwargs: raise TypeError("'%s' is an invalid keyword argument for this function" % list(kwargs.keys())[0]) dispatcher.send(signal=signals.post_init, sender=self.__class__, instance=self)
def sendsignal(signal=dispatcher.Any, *arg, **kw): dispatcher.send(signal=signal, *arg, **kw)
def save(self, epls=None, desls=None): dispatcher.send(signal=signals.pre_save, sender=self.__class__, instance=self) non_pks = [f for f in self._meta.fields if not f.primary_key] # Setup connection according to table labels epls, desls = self.get_write_labeling (epls, desls) cursor = connection.cursor (epls, desls) # First, try an UPDATE. If that doesn't update anything, do an INSERT. pk_val = self._get_pk_val() pk_set = bool(pk_val) record_exists = True pk_random = False if pk_set: # Determine whether a record with the primary key already exists. cursor.execute ("SELECT 1 FROM %s WHERE %s=%%s LIMIT 1" % (backend.quote_name(self._meta.db_table), backend.quote_name(self._meta.pk.column)), [pk_val]) # If it does already exist, do an UPDATE. if cursor.fetchone(): db_values = [f.get_db_prep_save(f.pre_save(self, False)) for f in non_pks] if db_values: cursor.execute("UPDATE %s SET %s WHERE %s=%%s" % (backend.quote_name(self._meta.db_table), ','.join(['%s=%%s' % backend.quote_name(f.column) for f in non_pks]), backend.quote_name(self._meta.pk.column)), db_values + [pk_val]) else: record_exists = False if not pk_set or not record_exists: tries = 0 saved = False while tries < 10 and not saved: tries += 1 saved = True try: # If primary key is not set, set it randomly. It # appears that this code assumes there is only one # AutoField, and it is the primary key. if not pk_set: pk_random = True autofields = [f for f in self._meta.fields if isinstance(f, AutoField)] if len (autofields) != 1: raise AssertionError, "I thought there was only one AutoField, not %d!" % len (autofields) import random setattr (self, autofields[0].attname, random.randint(1, 0x7ffffff)) field_names = [backend.quote_name(f.column) for f in self._meta.fields if not isinstance(f, AutoField)] db_values = [f.get_db_prep_save(f.pre_save(self, True)) for f in self._meta.fields if not isinstance(f, AutoField)] # If the PK has been manually set, respect that. if pk_set or pk_random: field_names += [f.column for f in self._meta.fields if isinstance(f, AutoField)] db_values += [f.get_db_prep_save(f.pre_save(self, True)) for f in self._meta.fields if isinstance(f, AutoField)] placeholders = ['%s'] * len(field_names) if self._meta.order_with_respect_to: field_names.append(backend.quote_name('_order')) # TODO: This assumes the database supports subqueries. placeholders.append('(SELECT COUNT(*) FROM %s WHERE %s = %%s)' % \ (backend.quote_name(self._meta.db_table), backend.quote_name(self._meta.order_with_respect_to.column))) db_values.append(getattr(self, self._meta.order_with_respect_to.attname)) if db_values: cursor.execute("INSERT INTO %s (%s) VALUES (%s)" % (backend.quote_name(self._meta.db_table), ','.join(field_names), ','.join(placeholders)), db_values) else: # Create a new record with defaults for everything. cursor.execute("INSERT INTO %s (%s) VALUES (%s)" % (backend.quote_name(self._meta.db_table), backend.quote_name(self._meta.pk.column), backend.get_pk_default_value())) # We already set the field to a random number, don't get the last insert_id from the DB #if self._meta.has_auto_field and not pk_set: # setattr(self, self._meta.pk.attname, backend.get_last_insert_id(cursor, self._meta.db_table, self._meta.pk.column)) except Exception, e: saved = False raise if not saved: raise AssertionError, "Could not save object!"
def save(self): dispatcher.send(signal=signals.pre_save, sender=self.__class__, instance=self) non_pks = [f for f in self._meta.fields if not f.primary_key] cursor = connection.cursor() # First, try an UPDATE. If that doesn't update anything, do an INSERT. pk_val = self._get_pk_val() pk_set = bool(pk_val) record_exists = True if pk_set: # Determine whether a record with the primary key already exists. cursor.execute("SELECT 1 FROM %s WHERE %s=%%s LIMIT 1" % \ (backend.quote_name(self._meta.db_table), backend.quote_name(self._meta.pk.column)), [pk_val]) # If it does already exist, do an UPDATE. if cursor.fetchone(): db_values = [ f.get_db_prep_save(f.pre_save(self, False)) for f in non_pks ] if db_values: cursor.execute("UPDATE %s SET %s WHERE %s=%%s" % \ (backend.quote_name(self._meta.db_table), ','.join(['%s=%%s' % backend.quote_name(f.column) for f in non_pks]), backend.quote_name(self._meta.pk.column)), db_values + [pk_val]) else: record_exists = False if not pk_set or not record_exists: field_names = [ backend.quote_name(f.column) for f in self._meta.fields if not isinstance(f, AutoField) ] db_values = [ f.get_db_prep_save(f.pre_save(self, True)) for f in self._meta.fields if not isinstance(f, AutoField) ] # If the PK has been manually set, respect that. if pk_set: field_names += [ f.column for f in self._meta.fields if isinstance(f, AutoField) ] db_values += [ f.get_db_prep_save(f.pre_save(self, True)) for f in self._meta.fields if isinstance(f, AutoField) ] placeholders = ['%s'] * len(field_names) if self._meta.order_with_respect_to: field_names.append(backend.quote_name('_order')) # TODO: This assumes the database supports subqueries. placeholders.append('(SELECT COUNT(*) FROM %s WHERE %s = %%s)' % \ (backend.quote_name(self._meta.db_table), backend.quote_name(self._meta.order_with_respect_to.column))) db_values.append( getattr(self, self._meta.order_with_respect_to.attname)) if db_values: cursor.execute("INSERT INTO %s (%s) VALUES (%s)" % \ (backend.quote_name(self._meta.db_table), ','.join(field_names), ','.join(placeholders)), db_values) else: # Create a new record with defaults for everything. cursor.execute("INSERT INTO %s (%s) VALUES (%s)" % (backend.quote_name(self._meta.db_table), backend.quote_name(self._meta.pk.column), backend.get_pk_default_value())) if self._meta.has_auto_field and not pk_set: setattr( self, self._meta.pk.attname, backend.get_last_insert_id(cursor, self._meta.db_table, self._meta.pk.column)) transaction.commit_unless_managed() # Run any post-save hooks. dispatcher.send(signal=signals.post_save, sender=self.__class__, instance=self)
def delete_objects(seen_objs): "Iterate through a list of seen classes, and remove any instances that are referred to" qn = backend.quote_name ordered_classes = seen_objs.keys() ordered_classes.reverse() cursor = connection.cursor() for cls in ordered_classes: seen_objs[cls] = seen_objs[cls].items() seen_objs[cls].sort() # Pre notify all instances to be deleted for pk_val, instance in seen_objs[cls]: dispatcher.send(signal=signals.pre_delete, sender=cls, instance=instance) pk_list = [pk for pk, instance in seen_objs[cls]] for related in cls._meta.get_all_related_many_to_many_objects(): if not isinstance(related.field, generic.GenericRelation): for offset in range(0, len(pk_list), GET_ITERATOR_CHUNK_SIZE): cursor.execute("DELETE FROM %s WHERE %s IN (%s)" % \ (qn(related.field.m2m_db_table()), qn(related.field.m2m_reverse_name()), ','.join(['%s' for pk in pk_list[offset:offset+GET_ITERATOR_CHUNK_SIZE]])), pk_list[offset:offset+GET_ITERATOR_CHUNK_SIZE]) for f in cls._meta.many_to_many: if isinstance(f, generic.GenericRelation): from django.contrib.contenttypes.models import ContentType query_extra = 'AND %s=%%s' % f.rel.to._meta.get_field( f.content_type_field_name).column args_extra = [ContentType.objects.get_for_model(cls).id] else: query_extra = '' args_extra = [] for offset in range(0, len(pk_list), GET_ITERATOR_CHUNK_SIZE): cursor.execute(("DELETE FROM %s WHERE %s IN (%s)" % \ (qn(f.m2m_db_table()), qn(f.m2m_column_name()), ','.join(['%s' for pk in pk_list[offset:offset+GET_ITERATOR_CHUNK_SIZE]]))) + query_extra, pk_list[offset:offset+GET_ITERATOR_CHUNK_SIZE] + args_extra) for field in cls._meta.fields: if field.rel and field.null and field.rel.to in seen_objs: for offset in range(0, len(pk_list), GET_ITERATOR_CHUNK_SIZE): cursor.execute("UPDATE %s SET %s=NULL WHERE %s IN (%s)" % \ (qn(cls._meta.db_table), qn(field.column), qn(cls._meta.pk.column), ','.join(['%s' for pk in pk_list[offset:offset+GET_ITERATOR_CHUNK_SIZE]])), pk_list[offset:offset+GET_ITERATOR_CHUNK_SIZE]) # Now delete the actual data for cls in ordered_classes: seen_objs[cls].reverse() pk_list = [pk for pk, instance in seen_objs[cls]] for offset in range(0, len(pk_list), GET_ITERATOR_CHUNK_SIZE): cursor.execute("DELETE FROM %s WHERE %s IN (%s)" % \ (qn(cls._meta.db_table), qn(cls._meta.pk.column), ','.join(['%s' for pk in pk_list[offset:offset+GET_ITERATOR_CHUNK_SIZE]])), pk_list[offset:offset+GET_ITERATOR_CHUNK_SIZE]) # Last cleanup; set NULLs where there once was a reference to the object, # NULL the primary key of the found objects, and perform post-notification. for pk_val, instance in seen_objs[cls]: for field in cls._meta.fields: if field.rel and field.null and field.rel.to in seen_objs: setattr(instance, field.attname, None) setattr(instance, cls._meta.pk.attname, None) dispatcher.send(signal=signals.post_delete, sender=cls, instance=instance) transaction.commit_unless_managed()
def get_response(self, request): "Returns an HttpResponse object for the given HttpRequest" from django.core import exceptions, urlresolvers from django.conf import settings # Apply request middleware for middleware_method in self._request_middleware: response = middleware_method(request) if response: return response # Get urlconf from request object, if available. Otherwise use default. urlconf = getattr(request, "urlconf", settings.ROOT_URLCONF) resolver = urlresolvers.RegexURLResolver(r'^/', urlconf) try: callback, callback_args, callback_kwargs = resolver.resolve( request.path_info) # Apply view middleware for middleware_method in self._view_middleware: response = middleware_method(request, callback, callback_args, callback_kwargs) if response: return response try: response = callback(request, *callback_args, **callback_kwargs) except Exception as e: # If the view raised an exception, run it through exception # middleware, and if the exception middleware returns a # response, use that. Otherwise, reraise the exception. for middleware_method in self._exception_middleware: response = middleware_method(request, e) if response: return response raise # Complain if the view returned None (a common error). if response is None: try: view_name = callback.__name__ # If it's a function except AttributeError: view_name = callback.__class__.__name__ + '.__call__' # If it's a class raise ValueError("The view %s.%s didn't return an HttpResponse object." % (callback.__module__, view_name)) return response except http.Http404 as e: if settings.DEBUG: from django.views import debug return debug.technical_404_response(request, e) else: try: callback, param_dict = resolver.resolve404() return callback(request, **param_dict) except: return self.handle_uncaught_exception(request, resolver, sys.exc_info()) except exceptions.PermissionDenied: return http.HttpResponseForbidden('<h1>Permission denied</h1>') except SystemExit: # Allow sys.exit() to actually exit. See tickets #1023 and #4701 raise except: # Handle everything else, including SuspiciousOperation, etc. # Get the exception info now, in case another exception is thrown later. exc_info = sys.exc_info() receivers = dispatcher.send(signal=signals.got_request_exception, request=request) return self.handle_uncaught_exception(request, resolver, exc_info)