def unregister(self, model): """Removes a model from version control.""" if not self.is_registered(model): raise RegistrationError, "%r has not been registered with django-reversion" % model del self._registered_models[model] post_save.disconnect(self._post_save_receiver, model) pre_delete.disconnect(self._pre_delete_receiver, model)
def manage_bookmarks(cls, enabled=True): """Connects handlers for bookmarks management. This handler could be used to automatically create a related bookmark list on given model class instance creation. i.e.: >> manage_bookmarks(User) It will auto generate a bookmark list associated to each new User's instance. To disconnect: >> manage_bookmarks(User, False) """ cls = get_model(cls) cls_name = cls.__name__.lower() create_dispatch_uid = "create_%s_bookmarks" % cls_name delete_dispatch_uid = "delete_%s_bookmarks" % cls_name if enabled: post_save.connect(_create_bookmarks, cls, dispatch_uid=create_dispatch_uid) pre_delete.connect(_delete_bookmarks, cls, dispatch_uid=delete_dispatch_uid) else: post_save.disconnect(_create_bookmarks, cls, dispatch_uid=create_dispatch_uid) pre_delete.disconnect(_delete_bookmarks, cls, dispatch_uid=delete_dispatch_uid)
def __call__(self, request: HttpRequest) -> HttpResponse: # Connect signal for automatic logging if hasattr(request, "user") and getattr(request.user, "is_authenticated", False): post_save_handler = partial(self.post_save_handler, user=request.user, request=request) pre_delete_handler = partial(self.pre_delete_handler, user=request.user, request=request) post_save.connect( post_save_handler, dispatch_uid=LOCAL.authentik["request_id"], weak=False, ) pre_delete.connect( pre_delete_handler, dispatch_uid=LOCAL.authentik["request_id"], weak=False, ) response = self.get_response(request) post_save.disconnect(dispatch_uid=LOCAL.authentik["request_id"]) pre_delete.disconnect(dispatch_uid=LOCAL.authentik["request_id"]) return response
def change_logging(request): """ Enable change logging by connecting the appropriate signals to their receivers before code is run, and disconnecting them afterward. :param request: WSGIRequest object with a unique `id` set """ # Curry signals receivers to pass the current request handle_changed_object = curry(_handle_changed_object, request) handle_deleted_object = curry(_handle_deleted_object, request) # Connect our receivers to the post_save and post_delete signals. post_save.connect(handle_changed_object, dispatch_uid="handle_changed_object") m2m_changed.connect(handle_changed_object, dispatch_uid="handle_changed_object") pre_delete.connect(handle_deleted_object, dispatch_uid="handle_deleted_object") yield # Disconnect change logging signals. This is necessary to avoid recording any errant # changes during test cleanup. post_save.disconnect(handle_changed_object, dispatch_uid="handle_changed_object") m2m_changed.disconnect(handle_changed_object, dispatch_uid="handle_changed_object") pre_delete.disconnect(handle_deleted_object, dispatch_uid="handle_deleted_object")
def unregister(cls, sender): callback = cls._registry.pop(sender, None) if callback is None: return post_save.disconnect(receiver=callback, sender=sender) pre_delete.disconnect(receiver=callback, sender=sender) m2m_changed.disconnect(receiver=callback, sender=sender)
def load(): try: # Отключаем сигналы pre_save.disconnect(receiver=project_pre_save, sender=Project) pre_delete.disconnect(receiver=project_pre_delete, sender=Project) pre_save.disconnect(receiver=task_pre_save, sender=Task) pre_delete.disconnect(receiver=task_pre_delete, sender=Task) # Загружаем данные ds = client.load() # Обрабатываем пользователей for user in ds.users: obj, _ = User.objects.update_or_create( defaults={"name": user.name}, gid=user.gid) # Обрабатываем проекты for prj in ds.projects: obj, _ = Project.objects.update_or_create( defaults={"name": prj.name}, gid=prj.gid) # Обрабатываем задачи for task in ds.tasks: obj, _ = Task.objects.update_or_create(defaults={ "name": task.name, "assignee": User.objects.get(gid=task.assignee), "project": Project.objects.get(gid=task.project) }, gid=task.gid) finally: # Подключаем сигналы обратно pre_save.connect(receiver=project_pre_save, sender=Project) pre_delete.connect(receiver=project_pre_delete, sender=Project) pre_save.connect(receiver=task_pre_save, sender=Task) pre_delete.connect(receiver=task_pre_delete, sender=Task)
def _disconnect_signals(): """ used in testing """ post_save.disconnect(plan_watchers.notify_on_plan_is_updated, TestPlan) pre_delete.disconnect(plan_watchers.load_email_settings_for_later_deletion, TestPlan) post_delete.disconnect(plan_watchers.notify_deletion_of_plan, TestPlan) pre_save.disconnect(plan_watchers.pre_save_clean, TestPlan)
def _disconnect_signals(): """ used in testing """ post_save.disconnect(plan_watchers.notify_on_plan_is_updated, TestPlan) pre_delete.disconnect( plan_watchers.load_email_settings_for_later_deletion, TestPlan) post_delete.disconnect(plan_watchers.notify_deletion_of_plan, TestPlan) pre_save.disconnect(plan_watchers.pre_save_clean, TestPlan)
def change_logging(request): """ Enables change logging by connecting the appropriate signals to their receivers before code is run, and disconnecting them afterward. """ set_request(request) thread_locals.webhook_queue = [] # Connect our receivers to the post_save and post_delete signals post_save.connect(handle_changed_object, dispatch_uid="handle_changed_object") m2m_changed.connect(handle_changed_object, dispatch_uid="handle_changed_object") pre_delete.connect(handle_deleted_object, dispatch_uid="handle_deleted_object") clear_webhooks.connect(clear_webhook_queue, dispatch_uid="clear_webhook_queue") yield # Disconnect change logging signals. This is necessary to avoid recording any # errant changes during test cleanup post_save.disconnect(handle_changed_object, dispatch_uid="handle_changed_object") m2m_changed.disconnect(handle_changed_object, dispatch_uid="handle_changed_object") pre_delete.disconnect(handle_deleted_object, dispatch_uid="handle_deleted_object") clear_webhooks.disconnect(clear_webhook_queue, dispatch_uid="clear_webhook_queue") # Flush queued webhooks to RQ flush_webhooks(thread_locals.webhook_queue) del thread_locals.webhook_queue # Clear the request from thread-local storage set_request(None)
def teardown(self): # pragma: no cover from aristotle_mdr.models import Status, _concept post_save.disconnect(self.handle_status_change, sender=Status) post_delete.disconnect(self.handle_status_change, sender=Status) post_save.disconnect(self.handle_concept_save, sender=_concept) pre_delete.disconnect(self.handle_concept_delete, sender=_concept) super(AristotleSignalProcessor,self).teardown()
def unregister(model): "Unregisters a model from versioning." if model not in registry: raise NotRegistered(u'The model {0} is not registered'.format( model.__name__)) cache = registry.pop(model) pre_delete.disconnect(sender=model, dispatch_uid=cache['dispatch_uid']) post_save.disconnect(sender=model, dispatch_uid=cache['dispatch_uid'])
def __enter__(self): for model in self.models: post_save.disconnect( algolia_engine._AlgoliaEngine__post_save_receiver, sender=model) pre_delete.disconnect( algolia_engine._AlgoliaEngine__pre_delete_receiver, sender=model)
def unregister(self, model_class): """Removes a model from version control.""" try: registration_info = self._registry.pop(model_class) except KeyError: raise RegistrationError, "%r has not been registered with Reversion." % model_class else: post_save.disconnect(self.post_save_receiver, model_class) pre_delete.disconnect(self.pre_delete_receiver, model_class)
def disconnect_signals(): pre_save.disconnect(receiver=signals.pre_save_config, sender=AutogroupsConfig) pre_delete.disconnect(receiver=signals.pre_delete_config, sender=AutogroupsConfig) post_save.disconnect(receiver=signals.check_groups_on_profile_update, sender=UserProfile) m2m_changed.disconnect(receiver=signals.autogroups_states_changed, sender=AutogroupsConfig.states.through)
def __enter__(self): for model in self.models: post_save.disconnect( algolia_engine._AlgoliaEngine__post_save_receiver, sender=model ) pre_delete.disconnect( algolia_engine._AlgoliaEngine__pre_delete_receiver, sender=model )
def disconnect(self, model): if model in self.models: post_save.disconnect(dispatch_uid=str(self.__class__) + str(model) + "_create") pre_save.disconnect(dispatch_uid=str(self.__class__) + str(model) + "_update") pre_delete.disconnect(dispatch_uid=str(self.__class__) + str(model) + "_delete") self.models.pop(model) for m2mfield in model._meta.many_to_many: m2m_attr = getattr(model, m2mfield.name) m2m_changed.disconnect(dispatch_uid=str(self.__class__) + str(m2m_attr.through) + "_associate")
def tearDown(self): redis.client = None pre_save.disconnect(redis.listing_pre_save, sender=Listing) post_save.disconnect(redis.listing_post_save, sender=Listing) pre_delete.disconnect(redis.listing_pre_delete, sender=Listing) post_delete.disconnect(redis.listing_post_delete, sender=Listing) content_published.disconnect(redis.publishable_published) content_unpublished.disconnect(redis.publishable_unpublished) super(TestRedisListings, self).tearDown() self.redis.flushdb()
def disconnect(): """ disconnect signals """ post_save.disconnect(node_created_handler, sender=Node, dispatch_uid='notify_node_created') node_status_changed.disconnect(node_status_changed_handler, sender=Node, dispatch_uid='notify_node_status_changed') pre_delete.disconnect(node_deleted_handler, sender=Node, dispatch_uid='notify_node_deleted')
def unregister(self, model_class): """Removes a model from version control.""" try: registration_info = self._registry.pop(model_class) except KeyError: raise RegistrationError, "%r has not been registered with Reversion." % model_class else: for field in registration_info.file_fields: field.storage = field.storage.wrapped_storage post_save.disconnect(self.post_save_receiver, model_class) pre_delete.disconnect(self.pre_delete_receiver, model_class)
def unregister(model): """Unregisters a model from automatic indexing. Return value: the tuple of fields which were being indexed """ pre_delete.disconnect(unindex_instance, sender=model) post_save.disconnect(index_instance, sender=model) pre_save.disconnect(unindex_old_instance, sender=model) fields = registry[model] del registry[model] return fields
def unregister(self, model): """Removes a model from version control.""" if isinstance(model, (str, unicode)): model = get_model(*model.split(".")) if not self.is_registered(model): raise RegistrationError("{model} has not been registered with django-reversion".format(model=model)) del self._registered_models[model] post_save.disconnect(self._post_save_receiver, model) pre_delete.disconnect(self._pre_delete_receiver, model) pre_save.disconnect(self.pre_save_smart_handler, model) post_delete.disconnect(self.post_delete_smart_handler, model)
def tearDown(self): redis.client = None pre_save.disconnect(redis.listing_pre_save, sender=Listing) post_save.disconnect(redis.listing_post_save, sender=Listing) pre_delete.disconnect(redis.listing_pre_delete, sender=Listing) post_delete.disconnect(redis.listing_post_delete, sender=Listing) content_published.disconnect(redis.publishable_published) content_unpublished.disconnect(redis.publishable_unpublished) super(TestAuthorLH, self).tearDown() self.redis.flushdb()
def unregister(model): """Unregister a model to the audit code. :param model: Model to unregister. :type model: object """ try: pre_save.disconnect(_pre_save, sender=model, dispatch_uid=str(model)) post_save.disconnect(_post_save, sender=model, dispatch_uid=str(model)) pre_delete.disconnect(_pre_delete, sender=model, dispatch_uid=str(model)) except Exception as e: logger.error("<Unregister> %s", e.message)
def disconnect_signals(): document_form_saved.disconnect(update_index, sender=Document, dispatch_uid='update_index') post_save.disconnect(update_index, sender=Document, dispatch_uid='update_index') pre_delete.disconnect(remove_from_index, sender=Document, dispatch_uid='remove_from_index') post_save.disconnect(save_mapping, sender=Category, dispatch_uid='put_category_mapping')
def handle(self, *args, **keywordargs): try: pre_delete.disconnect(geoserver_pre_delete, sender=Layer) cat = Layer.objects.gs_catalog storenames = [s.name for s in cat.get_stores()] layernames = [l.name for l in cat.get_resources()] for l in Layer.objects.all(): if l.store not in storenames or l.name not in layernames: l.delete() print '[cleared] Layer %s' % l except URLError: print "Couldn't connect to GeoServer; is it running? Make sure the GEOSERVER_BASE_URL setting is set correctly." finally: pre_delete.connect(geoserver_pre_delete, sender=Layer)
def __call__(self, request): # Prepare to collect objects that have been changed local_thread.changed_objects = [] # Assign an ID to the given request in case we have to handle multiple objects request.id = uuid.uuid4() # Listen for objects being saved (created/updated) and deleted post_save.connect(cache_changed_object, dispatch_uid="log_object_being_changed") pre_delete.connect( cache_deleted_object, dispatch_uid="log_object_being_deleted" ) # Process the request response = self.get_response(request) # Nothing to do as there are no changes to process if not local_thread.changed_objects: return response # Stop listening for object changes post_save.disconnect( cache_changed_object, dispatch_uid="log_object_being_changed" ) pre_delete.disconnect( cache_deleted_object, dispatch_uid="log_object_being_deleted" ) # Record change for each object that need to be tracked has_redis_failed = False for changed_object, action in local_thread.changed_objects: if hasattr(changed_object, "log_change"): changed_object.log_change(request.user, request.id, action) try: enqueue_webhooks(changed_object, request.user, request.id, action) except RedisError as e: if not has_redis_failed: messages.error( request, f"An error has occured while processing webhooks for this request. Check that the Redis service is running and reachable. The full error details were: {e}", ) has_redis_failed = True # Cleanup object changes that are too old (based on changelog retention) if local_thread.changed_objects and settings.CHANGELOG_RETENTION: date_limit = timezone.now() - timedelta(days=settings.CHANGELOG_RETENTION) ObjectChange.objects.filter(time__lt=date_limit).delete() return response
def remove_xform(xform): # disconnect parsed instance pre delete signal pre_delete.disconnect(_remove_from_mongo, sender=ParsedInstance) # delete instances from mongo db query = { ParsedInstance.USERFORM_ID: "%s_%s" % (xform.user.username, xform.id_string)} xform_instances.remove(query, j=True) # delete xform, and all related models xform.delete() # reconnect parsed instance pre delete signal? pre_delete.connect(_remove_from_mongo, sender=ParsedInstance)
def test_delete_object_is_deleted(self): pre_delete.disconnect(pre_delete_handler, dispatch_uid='chemtrails.signals.handlers.pre_delete_handler') pre_delete.connect(pre_delete_handler, dispatch_uid='pre_delete_handler.test') try: book = BookFixture(Book).create_one() klass = get_node_class_for_model(Book) pk = book.pk try: book.delete() klass.nodes.get(pk=pk) self.fail('Did not raise when trying to get non-existent book node.') except klass.DoesNotExist as e: self.assertEqual(str(e), "{'pk': %d}" % pk) finally: pre_delete.connect(pre_delete_handler, dispatch_uid='chemtrails.signals.handlers.pre_delete_handler') pre_delete.disconnect(pre_delete_handler, dispatch_uid='pre_delete_handler.test')
def silenced_email_pre_delete(): """Disable the handling of pre_delete signal for Email. This is required because when trying to delete the entire thread, it doesn't make sense to keep rebalancing the parent of the thread. This is what the `on_pre_delete` signal handler does. We use this only when we are absolutely sure that we are going to delete the thread because we know it can land us a weird state where there are no parents or references to non-existent rows. """ pre_delete.disconnect(Email_on_pre_delete, sender=Email) post_delete.disconnect(Email_on_post_delete, sender=Email) yield pre_delete.connect(Email_on_pre_delete, sender=Email) post_delete.connect(Email_on_post_delete, sender=Email)
def delete_objects_for_missing_videos(youtube_ids_in_filesystem, videos_marked_at_all): # VideoFile objects say they're available, but that don't actually exist. deleted_video_ids = [] videos_flagged_for_download = set([video.youtube_id for video in VideoFile.objects.filter(flagged_for_download=True)]) videos_needing_model_deletion_chunked = break_into_chunks(videos_marked_at_all - youtube_ids_in_filesystem - videos_flagged_for_download) # Disconnect cache-invalidation listener to prevent it from being called multiple times pre_delete.disconnect(receiver=updates.invalidate_on_video_delete, sender=VideoFile) for chunk in videos_needing_model_deletion_chunked: video_files_needing_model_deletion = VideoFile.objects.filter(youtube_id__in=chunk) deleted_video_ids += [video_file.youtube_id for video_file in video_files_needing_model_deletion] video_files_needing_model_deletion.delete() if deleted_video_ids: caching.invalidate_all_caches() self.stdout.write("Deleted %d VideoFile models (because the videos didn't exist in the filesystem)\n" % len(deleted_video_ids)) return deleted_video_ids pre_delete.connect(receiver=updates.invalidate_on_video_delete, sender=VideoFile)
def setUp(self): post_save.disconnect(person_save_handler, Person) pre_delete.disconnect(person_delete_handler, Person) post_save.disconnect(organization_save_handler, Organization) pre_delete.disconnect(organization_delete_handler, Organization) post_save.disconnect(membership_save_handler, Membership) pre_delete.disconnect(membership_delete_handler, Membership) post_save.disconnect(post_save_handler, Post) pre_delete.disconnect(post_delete_handler, Post)
def unregister(self, model): """ Unregisters the given model with Algolia engine. If the given model is not registered with Algolia engine, a RegistrationError will be raised. """ if not self.is_registered(model): raise RegistrationError( '{} is not registered with Algolia engine'.format(model)) # Perform the unregistration. del self.__registered_models[model] # Disconnect fron the signalling framework. post_save.disconnect(self.__post_save_receiver, model) pre_delete.disconnect(self.__pre_delete_receiver, model) logger.info('UNREGISTER %s', model)
def unregister(self, model): """ Unregisters the given model with Algolia engine. If the given model is not registered with Algolia engine, a RegistrationError will be raised. """ if not self.is_registered(model): raise RegistrationError( '{} is not registered with Algolia engine'.format(model)) # Perform the unregistration. del self.__registered_models[model] # Disconnect from the signalling framework. post_save.disconnect(self.__post_save_receiver, model) pre_delete.disconnect(self.__pre_delete_receiver, model) logger.info('UNREGISTER %s', model)
def disconnect_discussion_signals(): """ Disconnect all the signals on Comment model provided by Zinnia. """ post_save.disconnect(sender=comment_model, dispatch_uid=COMMENT_PS_COUNT_DISCUSSIONS) pre_delete.disconnect(sender=comment_model, dispatch_uid=COMMENT_PD_COUNT_DISCUSSIONS) comment_was_flagged.disconnect(sender=comment_model, dispatch_uid=COMMENT_WF_COUNT_DISCUSSIONS) comment_was_posted.disconnect(sender=comment_model, dispatch_uid=COMMENT_WP_COUNT_COMMENTS) pingback_was_posted.disconnect(sender=comment_model, dispatch_uid=PINGBACK_WP_COUNT_PINGBACKS) trackback_was_posted.disconnect(sender=comment_model, dispatch_uid=TRACKBACK_WP_COUNT_TRACKBACKS)
def unregister(self, model): """ Unregisters the given model with this search engine. If the given model is not registered with this search engine, a RegistrationError will be raised. """ # Add in custom live filters. if isinstance(model, QuerySet): model = model.model # Check for registration. if not self.is_registered(model): raise RegistrationError("{model!r} is not registered with this search engine".format(model=model)) # Perform the unregistration. del self._registered_models[model] # Disconnect from the signalling framework. post_save.disconnect(self._post_save_receiver, model) pre_delete.disconnect(self._pre_delete_receiver, model)
def log_delete(sender,instance,**kwargs): try: pre_delete.disconnect(log_delete) flag = DELETION usuario, path, request= obtener_request_atributos() if not "admin" in path: try: LogEntry.objects.log_action( user_id=usuario.pk, content_type_id=get_content_type_for_model(instance).pk, object_id=instance.pk, object_repr=force_text(instance), action_flag=flag ) except Exception as e: print e pre_delete.connect(log_delete) except: pass
def unregister(self, model): """ Unregisters the given model with this search engine. If the given model is not registered with this search engine, a RegistrationError will be raised. """ # Add in custom live filters. if isinstance(model, QuerySet): model = model.model # Check for registration. if not self.is_registered(model): raise RegistrationError( "{model!r} is not registered with this search engine".format( model=model, )) # Perform the unregistration. del self._registered_models[model] # Disconnect from the signalling framework. post_save.disconnect(self._post_save_receiver, model) pre_delete.disconnect(self._pre_delete_receiver, model)
def change_logging(request): """ Enable change logging by connecting the appropriate signals to their receivers before code is run, and disconnecting them afterward. :param request: WSGIRequest object with a unique `id` set """ webhook_queue = [] # Curry signals receivers to pass the current request handle_changed_object = curry(_handle_changed_object, request, webhook_queue) handle_deleted_object = curry(_handle_deleted_object, request, webhook_queue) clear_webhook_queue = curry(_clear_webhook_queue, webhook_queue) # Connect our receivers to the post_save and post_delete signals. post_save.connect(handle_changed_object, dispatch_uid='handle_changed_object') m2m_changed.connect(handle_changed_object, dispatch_uid='handle_changed_object') pre_delete.connect(handle_deleted_object, dispatch_uid='handle_deleted_object') clear_webhooks.connect(clear_webhook_queue, dispatch_uid='clear_webhook_queue') yield # Disconnect change logging signals. This is necessary to avoid recording any errant # changes during test cleanup. post_save.disconnect(handle_changed_object, dispatch_uid='handle_changed_object') m2m_changed.disconnect(handle_changed_object, dispatch_uid='handle_changed_object') pre_delete.disconnect(handle_deleted_object, dispatch_uid='handle_deleted_object') clear_webhooks.disconnect(clear_webhook_queue, dispatch_uid='clear_webhook_queue') # Flush queued webhooks to RQ flush_webhooks(webhook_queue) del webhook_queue
def _disconnect(cls): """ Disconnect signal from current model """ pre_save.disconnect(notify_items_pre_save, sender=cls, dispatch_uid='knocker_pre_save_{0}'.format( cls.__name__)) post_save.disconnect(notify_items_post_save, sender=cls, dispatch_uid='knocker_post_save_{0}'.format( cls.__name__)) pre_delete.disconnect(notify_items_pre_delete, sender=cls, dispatch_uid='knocker_pre_delete_{0}'.format( cls.__name__)) post_delete.disconnect(notify_items_post_delete, sender=cls, dispatch_uid='knocker_post_delete_{0}'.format( cls.__name__))
def disconnect_signals(self): """Disconnect django signals that update aggregate counts when items are modified. We re-compute these counts as a separate step""" post_save.disconnect(forum.models.update_num_threads_on_thread_insert, sender=forum.models.Thread) pre_save.disconnect(forum.models.update_num_threads_on_thread_update, sender=forum.models.Thread) post_delete.disconnect(forum.models.update_last_post_on_thread_delete, sender=forum.models.Thread) pre_save.disconnect(forum.models.update_num_posts_on_save_if_moderation_changes, sender=forum.models.Post) post_save.disconnect(forum.models.update_num_posts_on_post_insert, sender=forum.models.Post) post_delete.disconnect(forum.models.update_last_post_on_post_delete, sender=forum.models.Post) post_delete.disconnect(ratings.models.post_delete_rating, sender=ratings.models.SoundRating) post_save.disconnect(ratings.models.update_num_ratings_on_post_save, sender=ratings.models.SoundRating) post_delete.disconnect(sounds.models.update_num_downloads_on_delete, sender=sounds.models.Download) post_save.disconnect(sounds.models.update_num_downloads_on_insert, sender=sounds.models.Download) post_delete.disconnect(sounds.models.update_num_downloads_on_delete_pack, sender=sounds.models.PackDownload) post_save.disconnect(sounds.models.update_num_downloads_on_insert_pack, sender=sounds.models.PackDownload) pre_delete.disconnect(sounds.models.on_delete_sound, sender=sounds.models.Sound) post_delete.disconnect(sounds.models.post_delete_sound, sender=sounds.models.Sound) post_delete.disconnect(comments.models.on_delete_comment, sender=comments.models.Comment) post_save.disconnect(tickets.models.create_ticket_message, sender=tickets.models.TicketComment)
def disconnect_discussion_signals(): """ Disconnect all the signals on Comment model provided by Zinnia. """ post_save.disconnect( sender=comment_model, dispatch_uid=COMMENT_PS_COUNT_DISCUSSIONS) pre_delete.disconnect( sender=comment_model, dispatch_uid=COMMENT_PD_COUNT_DISCUSSIONS) comment_was_flagged.disconnect( sender=comment_model, dispatch_uid=COMMENT_WF_COUNT_DISCUSSIONS) comment_was_posted.disconnect( sender=comment_model, dispatch_uid=COMMENT_WP_COUNT_COMMENTS) pingback_was_posted.disconnect( sender=comment_model, dispatch_uid=PINGBACK_WP_COUNT_PINGBACKS) trackback_was_posted.disconnect( sender=comment_model, dispatch_uid=TRACKBACK_WP_COUNT_TRACKBACKS)
def disconnect_signals(): post_save.disconnect(receiver=reassess_on_profile_save, sender=UserProfile) pre_save.disconnect(receiver=signals.pre_save_config, sender=AutogroupsConfig) pre_delete.disconnect(receiver=signals.pre_delete_config, sender=AutogroupsConfig) post_save.disconnect(receiver=signals.check_groups_on_profile_update, sender=UserProfile) m2m_changed.disconnect(receiver=signals.autogroups_states_changed, sender=AutogroupsConfig.states.through)
def disconnect(): """ disconnect signals """ post_save.disconnect(node_created_handler, sender=Node, dispatch_uid='notify_node_created') node_status_changed.disconnect(node_status_changed_handler, dispatch_uid='notify_node_status_changed') pre_delete.disconnect(node_deleted_handler, sender=Node, dispatch_uid='notify_node_deleted')
def _disconnect_signals(): """ used in testing """ post_save.disconnect(plan_watchers.on_plan_save, TestPlan) pre_delete.disconnect(plan_watchers.on_plan_delete, TestPlan) pre_save.disconnect(plan_watchers.pre_save_clean, TestPlan)
def test01(self): """We create a member, and three GFK-related objects whose `owner` fields point to that member. And then we try to delete that member. """ Member = rt.modules.gfktest.Member Note = rt.modules.gfktest.Note Memo = rt.modules.gfktest.Memo Comment = rt.modules.gfktest.Comment BrokenGFKs = rt.modules.gfks.BrokenGFKs def check_status(*args): for i, m in enumerate((Member, Comment, Note, Memo)): n = m.objects.all().count() if n != args[i]: msg = "Expected %d objects in %s but found %d" msg %= (args[i], m.__name__, n) self.fail(msg) gfklist = [ (f.model, f.fk_field, f.ct_field) for f in settings.SITE.kernel.GFK_LIST] self.assertEqual(gfklist, [ (Comment, 'owner_id', 'owner_type'), (Note, 'owner_id', 'owner_type'), (Memo, 'owner_id', 'owner_type')]) def create_objects(): mbr = Member(name="John",id=1) mbr.save() self.assertEqual(mbr.name, "John") Comment(owner=mbr, text="Just a comment...").save() Note(owner=mbr, text="John owes us 100€").save() Memo(owner=mbr, text="More about John and his friends").save() return mbr mbr = create_objects() check_status(1, 1, 1, 1) try: mbr.delete() except Warning as e: self.assertEqual( unicode(e), "Cannot delete member John because 1 notes refer to it.") else: self.fail("Expected an exception") # they are all still there: check_status(1, 1, 1, 1) # delete the note manually Note.objects.all().delete() check_status(1, 1, 0, 1) mbr.delete() # the memo remains: check_status(0, 0, 0, 1) Memo.objects.all().delete() # The above behaviour is thanks to a `pre_delete_handler` # which Lino adds automatically. Theoretically it is no longer # possible to produce broken GFKs. But now we disable this # `pre_delete_handler` and use Django's raw `delete` method in # order to produce some broken GFKs: from django.db.models.signals import pre_delete from lino.core.model import pre_delete_handler pre_delete.disconnect(pre_delete_handler) check_status(0, 0, 0, 0) mbr = create_objects() check_status(1, 1, 1, 1) models.Model.delete(mbr) pre_delete.connect(pre_delete_handler) # The member has been deleted, but all generic related objects # are still there: check_status(0, 1, 1, 1) # That's what the BrokenGFKs table is supposed to show: # rst = BrokenGFKs.request().table2rst() rst = BrokenGFKs.request().to_rst() # print rst self.assertEqual(rst, """\ ================ ================== ======================================================== ======== Database model Database object Message Action ---------------- ------------------ -------------------------------------------------------- -------- *comment* *Comment object* Invalid primary key 1 for gfktest.Member in `owner_id` delete *note* *Note object* Invalid primary key 1 for gfktest.Member in `owner_id` manual *memo* *Memo object* Invalid primary key 1 for gfktest.Member in `owner_id` clear ================ ================== ======================================================== ======== """)
def disconnect(): """ disconnect signals """ post_save.disconnect(node_created_handler, sender=Node) node_status_changed.disconnect(node_status_changed_handler) pre_delete.disconnect(node_deleted_handler, sender=Node)
def teardown(self): # pragma: no cover from aristotle_mdr.models import _concept post_save.disconnect(self.handle_concept_save, sender=_concept) # post_revision_commit.disconnect(self.handle_concept_revision) pre_delete.disconnect(self.handle_concept_delete, sender=_concept) super(AristotleSignalProcessor, self).teardown()
def tearDown(self, model): post_save.disconnect(self._post_save_callback, sender=model) pre_delete.disconnect(self._pre_delete_callback, sender=model)
def unwatch_model(self, model): pre_save.disconnect(self.pre_save, sender=model) post_save.disconnect(self.post_save, sender=model) pre_delete.disconnect(self.pre_delete, sender=model) if m2m_changed is not None: m2m_changed.disconnect(self.m2m_changed, sender=model)
def teardown(self): post_save.disconnect(self.enqueue_save) pre_delete.disconnect(self.enqueue_delete)