def change_logging(request): """ Enables change logging by connecting the appropriate signals to their receivers before code is run, and disconnecting them afterward. """ set_request(request) thread_locals.webhook_queue = [] # Connect our receivers to the post_save and post_delete signals post_save.connect(handle_changed_object, dispatch_uid="handle_changed_object") m2m_changed.connect(handle_changed_object, dispatch_uid="handle_changed_object") pre_delete.connect(handle_deleted_object, dispatch_uid="handle_deleted_object") clear_webhooks.connect(clear_webhook_queue, dispatch_uid="clear_webhook_queue") yield # Disconnect change logging signals. This is necessary to avoid recording any # errant changes during test cleanup post_save.disconnect(handle_changed_object, dispatch_uid="handle_changed_object") m2m_changed.disconnect(handle_changed_object, dispatch_uid="handle_changed_object") pre_delete.disconnect(handle_deleted_object, dispatch_uid="handle_deleted_object") clear_webhooks.disconnect(clear_webhook_queue, dispatch_uid="clear_webhook_queue") # Flush queued webhooks to RQ flush_webhooks(thread_locals.webhook_queue) del thread_locals.webhook_queue # Clear the request from thread-local storage set_request(None)
def connect_signals(cls, connected=True): if (cls.model and getattr(cls, 'post_save_connect', False) and hasattr(cls, 'post_save')): if connected: post_save.connect(cls.post_save, sender=cls.model) else: post_save.disconnect(cls.post_save, sender=cls.model) for field in cls.model._meta.get_fields(): if isinstance(field, ManyToManyField): if connected: m2m_changed.connect(cls.m2m_changed, sender=getattr( cls.model, field.name).through) else: m2m_changed.disconnect(cls.m2m_changed, sender=getattr( cls.model, field.name).through) if (cls.model and getattr(cls, 'post_delete_connect', False) and hasattr(cls, 'post_delete')): if connected: post_delete.connect(cls.post_delete, sender=cls.model) else: post_delete.disconnect(cls.post_delete, sender=cls.model)
def unregister(cls, sender): callback = cls._registry.pop(sender, None) if callback is None: return post_save.disconnect(receiver=callback, sender=sender) pre_delete.disconnect(receiver=callback, sender=sender) m2m_changed.disconnect(receiver=callback, sender=sender)
def test_admin_form_should_stop_user_demoting_themselves(self): self.login() from models import IntranetGroup manager = IntranetGroup.objects.get(name="Manager") self.assertTrue(manager.administrators, """This test will not work unless the Manager group's administrators flag is set""") self.assertTrue(self.current_user.is_superuser) self.assertIn(manager.group, self.current_user.groups.all()) url = reverse('admin:binder_intranetuser_change', args=[self.current_user.id]) response = self.client.get(url) # POST without changing anything should be fine form = self.assertInDict('adminform', response.context).form new_values = self.update_form_values(form) response = self.client.post(url, new_values, follow=True) self.assert_changelist_not_admin_form_with_errors(response) # but changing the group should result in an error user = IntranetGroup.objects.get(name="User") new_values = self.update_form_values(form, groups=[user.pk]) response = self.client.post(url, new_values) self.assert_admin_form_with_errors_not_changelist(response, {'groups': ['You cannot demote yourself from the %s group' % manager.name]}) # shouldn't be allowed to do anything that removes our superuser flag # remove us from manager group, but keep superuser flag. # temporarily disable the signal listener so that it doesn't # automatically demote us from superuser from django.db.models.signals import m2m_changed from django.dispatch import receiver m2m_changed.disconnect(sender=User.groups.through, receiver=IntranetUser.groups_changed, dispatch_uid="User_groups_changed") self.current_user.groups = [user] m2m_changed.connect(sender=User.groups.through, receiver=IntranetUser.groups_changed, dispatch_uid="User_groups_changed") self.current_user = self.current_user.reload() self.assertItemsEqual([user.group], self.current_user.groups.all()) self.assertTrue(self.current_user.is_superuser) # now we're not removing ourselves from any groups, but saving # would still demote us automatically from being a superuser. response = self.client.post(url, new_values) self.assert_admin_form_with_errors_not_changelist(response, {'groups': ['You cannot demote yourself from being a superuser. ' + 'You must put yourself in one of the Administrators groups: ' + '%s' % IntranetGroup.objects.filter(administrators=True)]}) # we shouldn't be allowed to delete ourselves either deleted = IntranetGroup.objects.get(name="Deleted") user = IntranetGroup.objects.get(name="User") new_values = self.update_form_values(form, groups=[manager.pk, deleted.pk]) # import pdb; pdb.set_trace() response = self.client.post(url, new_values) self.assert_admin_form_with_errors_not_changelist(response, {'groups': ['You cannot place yourself in the %s group' % deleted.name]})
def change_logging(request): """ Enable change logging by connecting the appropriate signals to their receivers before code is run, and disconnecting them afterward. :param request: WSGIRequest object with a unique `id` set """ # Curry signals receivers to pass the current request handle_changed_object = curry(_handle_changed_object, request) handle_deleted_object = curry(_handle_deleted_object, request) # Connect our receivers to the post_save and post_delete signals. post_save.connect(handle_changed_object, dispatch_uid="handle_changed_object") m2m_changed.connect(handle_changed_object, dispatch_uid="handle_changed_object") pre_delete.connect(handle_deleted_object, dispatch_uid="handle_deleted_object") yield # Disconnect change logging signals. This is necessary to avoid recording any errant # changes during test cleanup. post_save.disconnect(handle_changed_object, dispatch_uid="handle_changed_object") m2m_changed.disconnect(handle_changed_object, dispatch_uid="handle_changed_object") pre_delete.disconnect(handle_deleted_object, dispatch_uid="handle_deleted_object")
def setUpClass(cls): """ Disconnect the m2m signal handler before the fixture is loaded, because it messes up when loading a fixture. """ super(DeleteModelsTests, cls).setUpClass() m2m_changed.disconnect(clean_choices)
def disable(): """ Disconnects the tracking signals, and so disables tracking. """ post_init.disconnect(dispatch_uid=_dispatch_uid(post_init_receiver)) post_save.disconnect(dispatch_uid=_dispatch_uid(post_save_receiver)) m2m_changed.disconnect(dispatch_uid=_dispatch_uid(m2m_changed_receiver)) post_delete.disconnect(dispatch_uid=_dispatch_uid(post_delete_receiver))
def disconnect_signals(): pre_save.disconnect(receiver=signals.pre_save_config, sender=AutogroupsConfig) pre_delete.disconnect(receiver=signals.pre_delete_config, sender=AutogroupsConfig) post_save.disconnect(receiver=signals.check_groups_on_profile_update, sender=UserProfile) m2m_changed.disconnect(receiver=signals.autogroups_states_changed, sender=AutogroupsConfig.states.through)
def disable_tile_serialization_signals(): """ Disconnects all assets signals that trigger tile serialization """ # lazy load circular import (signals.py -> models.py -> utils.py -> signals.py) import apps.assets.signals as signals import apps.assets.models as models post_save.disconnect(receiver=signals.tile_saved, sender=models.Tile) m2m_changed.disconnect(signals.tile_m2m_changed) m2m_changed.disconnect(signals.content_m2m_changed)
def stop(self): """ Stop queuing of tiles to serialize """ import apps.assets.models as models post_save.disconnect(receiver=self._record_tile_save, sender=models.Tile) m2m_changed.disconnect(self._record_m2m_changed) enable_tile_serialization_signals()
def disconnect(self, model): if model in self.models: post_save.disconnect(dispatch_uid=str(self.__class__) + str(model) + "_create") pre_save.disconnect(dispatch_uid=str(self.__class__) + str(model) + "_update") pre_delete.disconnect(dispatch_uid=str(self.__class__) + str(model) + "_delete") self.models.pop(model) for m2mfield in model._meta.many_to_many: m2m_attr = getattr(model, m2mfield.name) m2m_changed.disconnect(dispatch_uid=str(self.__class__) + str(m2m_attr.through) + "_associate")
def deregister_signal_handlers(): # Disconnects the signal handlers for easy access in importers m2m_changed.disconnect(product_category_m2m_changed_signal_handler, sender=Product.categories.through) post_save.disconnect(product_post_save_signal_handler, sender=Product) post_delete.disconnect(product_post_delete_signal_handler, sender=Product) post_save.disconnect(category_change_handler, sender=Category) post_delete.disconnect(category_change_handler, sender=Category) if settings.HANDLE_STOCKRECORD_CHANGES: post_save.disconnect(stockrecord_change_handler, sender=StockRecord) post_delete.disconnect(stockrecord_post_delete_handler, sender=StockRecord) request_finished.disconnect(update_index.synchronize_searchindex)
def test_m2m_changed_post_clear(self): m2m_changed.disconnect(m2m_changed_handler, dispatch_uid='chemtrails.signals.handlers.m2m_changed_handler') m2m_changed.connect(m2m_changed_handler, dispatch_uid='m2m_changed_handler.test') try: book = BookFixture(Book, generate_m2m={'authors': (1, 1)}).create_one() self.assertEqual(1, len(get_node_class_for_model(Book).nodes.has(authors=True))) book.authors.clear() self.assertEqual(0, len(get_node_class_for_model(Book).nodes.has(authors=True))) self.assertEqual(0, len(get_node_class_for_model(Author).nodes.has(book_set=True))) finally: m2m_changed.connect(m2m_changed_handler, dispatch_uid='chemtrails.signals.handlers.m2m_changed_handler') m2m_changed.disconnect(m2m_changed_handler, dispatch_uid='m2m_changed_handler.test')
def test_m2m_changed_post_add_reverse(self): m2m_changed.disconnect(m2m_changed_handler, dispatch_uid='chemtrails.signals.handlers.m2m_changed_handler') m2m_changed.connect(m2m_changed_handler, dispatch_uid='m2m_changed_handler.test') try: author = AuthorFixture(Author).create_one() self.assertEqual(0, len(get_node_class_for_model(Author).nodes.has(book_set=True))) book = BookFixture(Book, follow_m2m=False, field_values={'authors': []}).create_one() author.book_set.add(book) self.assertEqual(1, len(get_node_class_for_model(Author).nodes.has(book_set=True))) finally: m2m_changed.connect(m2m_changed_handler, dispatch_uid='chemtrails.signals.handlers.m2m_changed_handler') m2m_changed.disconnect(m2m_changed_handler, dispatch_uid='m2m_changed_handler.test')
def disable_computed_fields(): post_save.disconnect(emit_update_inventory_on_created_or_deleted, sender=Host) post_delete.disconnect(emit_update_inventory_on_created_or_deleted, sender=Host) post_save.disconnect(emit_update_inventory_on_created_or_deleted, sender=Group) post_delete.disconnect(emit_update_inventory_on_created_or_deleted, sender=Group) m2m_changed.disconnect(emit_update_inventory_computed_fields, sender=Group.hosts.through) m2m_changed.disconnect(emit_update_inventory_computed_fields, sender=Group.parents.through) m2m_changed.disconnect(emit_update_inventory_computed_fields, sender=Host.inventory_sources.through) m2m_changed.disconnect(emit_update_inventory_computed_fields, sender=Group.inventory_sources.through) post_save.disconnect(emit_update_inventory_on_created_or_deleted, sender=InventorySource) post_delete.disconnect(emit_update_inventory_on_created_or_deleted, sender=InventorySource) post_save.disconnect(emit_update_inventory_on_created_or_deleted, sender=Job) post_delete.disconnect(emit_update_inventory_on_created_or_deleted, sender=Job) yield connect_computed_field_signals()
def turn_off_syncing(for_post_save=True, for_post_delete=True, for_m2m_changed=True, for_post_bulk_operation=True): """ Disables all of the signals for syncing entities. By default, everything is turned off. If the user wants to turn off everything but one signal, for example the post_save signal, they would do: turn_off_sync(for_post_save=False) """ if for_post_save: post_save.disconnect(save_entity_signal_handler, dispatch_uid='save_entity_signal_handler') if for_post_delete: post_delete.disconnect(delete_entity_signal_handler, dispatch_uid='delete_entity_signal_handler') if for_m2m_changed: m2m_changed.disconnect(m2m_changed_entity_signal_handler, dispatch_uid='m2m_changed_entity_signal_handler') if for_post_bulk_operation: post_bulk_operation.disconnect(bulk_operation_signal_handler, dispatch_uid='bulk_operation_signal_handler')
def _disconnect_signals(self, model): """ Disconnect signals for the model. """ for signal, receiver in self._signals.items(): signal.disconnect(sender=model, dispatch_uid=self._dispatch_uid( signal, receiver)) for field_name, receiver in self._m2m_signals[model].items(): field = getattr(model, field_name) m2m_model = getattr(field, "through") m2m_changed.disconnect( sender=m2m_model, dispatch_uid=self._dispatch_uid(m2m_changed, receiver), ) del self._m2m_signals[model]
def trigger_group_syncer(sender, instance, created=False, **kwargs): """ :param sender: The model that triggered this hook :param instance: The model instance triggering this hook :param created: True if the instance was created, False if the instance was updated Calls the SynchronizeGroups Task if a group is updated. (Not if it's the initial creation of a group) """ global sync_uuid if created: # If a new instance is created, we do not need to trigger group sync. pass else: # If sync is triggered by adding a user to group or a group to a user # then we need to detach the signal hook listening to m2m changes on # those models as they will trigger a recursive call to this method. if sender == User.groups.through: logger.debug( 'Disconnect m2m_changed signal hook with uuid %s before synchronizing groups' % sync_uuid) if m2m_changed.disconnect(sender=sender, dispatch_uid=sync_uuid): logger.debug('Signal with uuid %s disconnected' % sync_uuid) run_group_syncer(instance) sync_uuid = uuid.uuid1() logger.debug('m2m_changed signal hook reconnected with uuid: %s' % sync_uuid) m2m_changed.connect(receiver=trigger_group_syncer, dispatch_uid=sync_uuid, sender=User.groups.through) else: run_group_syncer(instance)
def activate(sync_user, sync_groups, sync_profile): if sync_groups: logger.warning("Group changes will be synchronised to LDAP") m2m_changed.connect(group_sync_handler) else: m2m_changed.disconnect(group_sync_handler) if sync_user: logger.warning("User changes will be synchronised to LDAP") post_save.connect(user_sync_handler) else: post_save.disconnect(user_sync_handler) if sync_profile: logger.warning("User profile changes will be synchronised to LDAP") post_save.connect(profile_sync_handler) else: post_save.disconnect(profile_sync_handler)
def change_logging(request): """ Enable change logging by connecting the appropriate signals to their receivers before code is run, and disconnecting them afterward. :param request: WSGIRequest object with a unique `id` set """ webhook_queue = [] # Curry signals receivers to pass the current request handle_changed_object = curry(_handle_changed_object, request, webhook_queue) handle_deleted_object = curry(_handle_deleted_object, request, webhook_queue) clear_webhook_queue = curry(_clear_webhook_queue, webhook_queue) # Connect our receivers to the post_save and post_delete signals. post_save.connect(handle_changed_object, dispatch_uid='handle_changed_object') m2m_changed.connect(handle_changed_object, dispatch_uid='handle_changed_object') pre_delete.connect(handle_deleted_object, dispatch_uid='handle_deleted_object') clear_webhooks.connect(clear_webhook_queue, dispatch_uid='clear_webhook_queue') yield # Disconnect change logging signals. This is necessary to avoid recording any errant # changes during test cleanup. post_save.disconnect(handle_changed_object, dispatch_uid='handle_changed_object') m2m_changed.disconnect(handle_changed_object, dispatch_uid='handle_changed_object') pre_delete.disconnect(handle_deleted_object, dispatch_uid='handle_deleted_object') clear_webhooks.disconnect(clear_webhook_queue, dispatch_uid='clear_webhook_queue') # Flush queued webhooks to RQ flush_webhooks(webhook_queue) del webhook_queue
def test_apply_new_approve_definition(self): from river.models.factories import ApprovementMetaObjectFactory, TransitionObjectFactory ct = self.approvement_meta.transition.content_type # self.assertEqual(0, Approvement.objects.filter(workflow_object=self.object).count()) # ObjectService.register_object(self.object, self.field) self.assertEqual( 1, Approvement.objects.filter(workflow_object=self.object).count()) transition = TransitionObjectFactory(content_type=ct, field=self.field, source_state=self.state2, destination_state=self.state3) m2m_changed.disconnect(post_group_change, ApprovementMeta.groups.through) m2m_changed.disconnect(post_permissions_change, ApprovementMeta.permissions.through) approvement_meta = ApprovementMetaObjectFactory( transition=transition, permissions__in=self.approvement_meta.permissions.all()) self.assertEqual( 1, Approvement.objects.filter(workflow_object=self.object, field=self.field).count()) ApprovementMetaService.apply_new_approvement_meta(approvement_meta) self.assertEqual( 2, Approvement.objects.filter(workflow_object=self.object, field=self.field).count()) approvement_meta.save() self.assertEqual( 2, Approvement.objects.filter(workflow_object=self.object, field=self.field).count())
def test_apply_new_proceed_definition(self): from river.models.factories import ProceedingMetaObjectFactory, TransitionObjectFactory ct = self.proceeding_meta.content_type # self.assertEqual(0, Proceeding.objects.filter(workflow_object=self.object).count()) # ObjectService.register_object(self.object, self.field) self.assertEqual( 1, Proceeding.objects.filter(workflow_object=self.object).count()) transition = TransitionObjectFactory(source_state=self.state2, destination_state=self.state3) m2m_changed.disconnect(post_group_change, ProceedingMeta.groups.through) m2m_changed.disconnect(post_permissions_change, ProceedingMeta.permissions.through) proceeding_meta = ProceedingMetaObjectFactory( content_type=ct, transition=transition, permissions__in=self.proceeding_meta.permissions.all()) self.assertEqual( 1, Proceeding.objects.filter(workflow_object=self.object).count()) ProceedingMetaService.apply_new_proceeding_meta(proceeding_meta) self.assertEqual( 2, Proceeding.objects.filter(workflow_object=self.object).count()) proceeding_meta.save() self.assertEqual( 2, Proceeding.objects.filter(workflow_object=self.object).count())
def disconnect_signals(cls): m2m_changed.disconnect(m2m_changed_user_groups, sender=User.groups.through) m2m_changed.disconnect(m2m_changed_group_permissions, sender=Group.permissions.through) m2m_changed.disconnect(m2m_changed_user_permissions, sender=User.user_permissions.through) pre_save.disconnect(pre_save_user, sender=User) pre_save.disconnect(pre_save_auth_state, sender=AuthServicesInfo)
def turn_off_syncing(for_post_save=True, for_post_delete=True, for_m2m_changed=True, for_post_bulk_operation=True): """ Disables all of the signals for syncing entities. By default, everything is turned off. If the user wants to turn off everything but one signal, for example the post_save signal, they would do: turn_off_sync(for_post_save=False) """ if for_post_save: post_save.disconnect(save_entity_signal_handler, dispatch_uid='save_entity_signal_handler') if for_post_delete: post_delete.disconnect(delete_entity_signal_handler, dispatch_uid='delete_entity_signal_handler') if for_m2m_changed: m2m_changed.disconnect( m2m_changed_entity_signal_handler, dispatch_uid='m2m_changed_entity_signal_handler') if for_post_bulk_operation: post_bulk_operation.disconnect( bulk_operation_signal_handler, dispatch_uid='bulk_operation_signal_handler')
def test_apply_new_proceed_definition(self): from river.models.factories import ProceedingMetaObjectFactory, TransitionObjectFactory ct = self.proceeding_meta.content_type # self.assertEqual(0, Proceeding.objects.filter(workflow_object=self.object).count()) # ObjectService.register_object(self.object, self.field) self.assertEqual(1, Proceeding.objects.filter(workflow_object=self.object).count()) transition = TransitionObjectFactory(source_state=self.state2, destination_state=self.state3) m2m_changed.disconnect(post_group_change, ProceedingMeta.groups.through) m2m_changed.disconnect(post_permissions_change, ProceedingMeta.permissions.through) proceeding_meta = ProceedingMetaObjectFactory(content_type=ct, field=self.field, transition=transition, permissions__in=self.proceeding_meta.permissions.all()) self.assertEqual(1, Proceeding.objects.filter(workflow_object=self.object, field=self.field).count()) ProceedingMetaService.apply_new_proceeding_meta(proceeding_meta) self.assertEqual(2, Proceeding.objects.filter(workflow_object=self.object, field=self.field).count()) proceeding_meta.save() self.assertEqual(2, Proceeding.objects.filter(workflow_object=self.object, field=self.field).count())
def disconnect(self): for signal_name in ('pre_migrate', 'post_migrate'): signal = globals()[signal_name] dispatch_uid = self.get_dispatch_uid(signal_name) disconnected = signal.disconnect( dispatch_uid=dispatch_uid, ) if disconnected: log_trace('%r: disconnect %s', self, dispatch_uid) dispatch_uid = self.get_dispatch_uid('m2m_changed') disconnected = m2m_changed.disconnect( dispatch_uid=dispatch_uid, ) if disconnected: log_trace('%r: disconnect %s', self, dispatch_uid)
def test_signals(self): """ Test signals emitted by various M2M operations. """ # Create some sample data category1 = TestCategory(title='test cat 1') category1.save() category2 = TestCategory(title='test cat 2') category2.save() tag1 = TestTag(name='test tag 1') tag1.save() tag2 = TestTag(name='test tag 2') tag2.save() article = TestArticle(title='test article 1', text='test article 1 text', main_category=category1) # Test pre_add / post_add self.on_add_called = 0 def on_add(sender, instance, action, reverse, model, pk_set, *args, **kwargs): self.on_add_called += 1 self.assertEqual( sender, TestArticle.categories.through ) # sender is always the autocreated through model self.assertEqual(instance, article) self.assertEqual( model, TestCategory) # model is always the to-side of the relation self.assertIn(action, ('pre_add', 'post_add')) self.assertEqual(reverse, False) self.assertEqual(set(pk_set), set([category1.id])) # before add, the current categories should be empty if action == 'pre_add': self.assertEqual(article.categories.count(), 0) # after add, the current categories should be 1 else: self.assertEqual(article.categories.count(), 1) m2m_changed.connect(on_add) article.categories.add(category1) self.assertEqual(self.on_add_called, 2) m2m_changed.disconnect(on_add) # Test pre_remove / post_remove self.on_remove_called = 0 def on_remove(sender, instance, action, reverse, model, pk_set, *args, **kwargs): self.on_remove_called += 1 self.assertEqual( sender, TestArticle.categories.through ) # sender is always the autocreated through model self.assertEqual(instance, article) self.assertEqual( model, TestCategory) # model is always the to-side of the relation self.assertIn(action, ('pre_remove', 'post_remove')) self.assertEqual(reverse, False) self.assertEqual(set(pk_set), set([category1.id])) # before remove, the current categories should be 1 if action == 'pre_remove': self.assertEqual(article.categories.count(), 1) # after remove, the current categories should be empty else: self.assertEqual(article.categories.count(), 0) m2m_changed.connect(on_remove) article.categories.remove(category1) self.assertEqual(self.on_remove_called, 2) m2m_changed.disconnect(on_remove) # Test pre_clear / post_clear article.categories.add(category1) self.on_clear_called = 0 def on_clear(sender, instance, action, reverse, model, pk_set, *args, **kwargs): self.on_clear_called += 1 self.assertEqual( sender, TestArticle.categories.through ) # sender is always the autocreated through model self.assertEqual(instance, article) self.assertEqual( model, TestCategory) # model is always the to-side of the relation self.assertIn(action, ('pre_clear', 'post_clear')) self.assertEqual(reverse, False) self.assertEqual(set(pk_set), set([category1.id])) # before remove, the current categories should be 1 if action == 'pre_clear': self.assertEqual(article.categories.count(), 1) # after remove, the current categories should be empty else: self.assertEqual(article.categories.count(), 0) m2m_changed.connect(on_clear) article.categories.clear() self.assertEqual(self.on_clear_called, 2) m2m_changed.disconnect(on_clear)
def disconnect_signals(cls): m2m_changed.disconnect(m2m_changed_user_groups, sender=User.groups.through) m2m_changed.disconnect(m2m_changed_group_permissions, sender=Group.permissions.through) m2m_changed.disconnect(m2m_changed_user_permissions, sender=User.user_permissions.through) m2m_changed.disconnect(m2m_changed_state_permissions, sender=State.permissions.through) pre_save.disconnect(disable_services_on_inactive, sender=User) m2m_changed.disconnect(state_member_corporations_changed, sender=State.member_corporations.through) m2m_changed.disconnect(state_member_characters_changed, sender=State.member_characters.through) m2m_changed.disconnect(state_member_alliances_changed, sender=State.member_alliances.through) post_save.disconnect(state_saved, sender=State)
def tearDown(self, model): m2m_changed.disconnect(self._m2m_changed_callback, sender=model.owners.through) pre_delete.disconnect(self._pre_delete_callback, sender=model)
def unwatch_model(self, model): pre_save.disconnect(self.pre_save, sender=model) post_save.disconnect(self.post_save, sender=model) pre_delete.disconnect(self.pre_delete, sender=model) if m2m_changed is not None: m2m_changed.disconnect(self.m2m_changed, sender=model)
def synchro_disconnect(): post_save.disconnect(dispatch_uid='synchro_add_chg') post_delete.disconnect(dispatch_uid='synchro_del') m2m_changed.disconnect(dispatch_uid='synchro_m2m')
def setUp(self): print(self._testMethodName) from django_discord_connector.signals import ( user_group_change_sync_discord_groups, remove_discord_user_on_discord_token_removal, sync_discord_groups_on_client_save) from django.db.models.signals import ( m2m_changed, post_delete, post_save) from django.contrib.auth.models import User from django_discord_connector.models import DiscordToken, DiscordClient m2m_changed.disconnect( user_group_change_sync_discord_groups, sender=User.groups.through) post_delete.disconnect( remove_discord_user_on_discord_token_removal, sender=DiscordToken ) post_save.disconnect( sync_discord_groups_on_client_save, sender=DiscordClient ) DiscordClient.objects.create( callback_url="https://localhost:8000", server_id="1", client_id="1", client_secret="null", bot_token="null", invite_link="https://localhost", ) self.group = Group.objects.create( name="Group" ) self.user = User.objects.create( username="******" ) self.discord_user = DiscordUser.objects.create( username="******", nickname="test", external_id=1, ) self.no_token_discord_user = DiscordUser.objects.create( username="******", nickname="notoken", external_id=2, ) self.discord_token = DiscordToken.objects.create( access_token="null", refresh_token="null", discord_user=self.discord_user, user=self.user ) self.discord_group = DiscordGroup.objects.create( name="DiscordGroup", external_id=1, group=self.group )
def test_recursive_connect(self): post_save.disconnect( post_save_handler, dispatch_uid='chemtrails.signals.handlers.post_save_handler') m2m_changed.disconnect( m2m_changed_handler, dispatch_uid='chemtrails.signals.handlers.m2m_changed_handler') try: book = BookFixture(Book, generate_m2m={ 'authors': (1, 1) }).create_one() for depth in range(3): db.cypher_query( 'MATCH (n)-[r]-() WHERE n.type = "ModelNode" DELETE r' ) # Delete all relationships book_node = get_node_for_object(book).save() book_node.recursive_connect(depth) if depth == 0: # Max depth 0 means that no recursion should occur, and no connections # can be made, because the connected objects might not exist. for prop in book_node.defined_properties( aliases=False, properties=False).keys(): relation = getattr(book_node, prop) try: self.assertEqual(len(relation.all()), 0) except CardinalityViolation: # Will raise CardinalityViolation for nodes which has a single # required relationship continue elif depth == 1: self.assertEqual( 0, len( get_node_class_for_model(Book).nodes.has( store_set=True))) self.assertEqual( 0, len( get_node_class_for_model(Store).nodes.has( books=True))) self.assertEqual( 0, len( get_node_class_for_model(Book).nodes.has( bestseller_stores=True))) self.assertEqual( 0, len( get_node_class_for_model(Store).nodes.has( bestseller=True))) self.assertEqual( 1, len( get_node_class_for_model(Book).nodes.has( publisher=True))) self.assertEqual( 1, len( get_node_class_for_model(Publisher).nodes.has( book_set=True))) self.assertEqual( 1, len( get_node_class_for_model(Book).nodes.has( authors=True))) self.assertEqual( 1, len( get_node_class_for_model(Author).nodes.has( book_set=True))) self.assertEqual( 0, len( get_node_class_for_model(Author).nodes.has( user=True))) self.assertEqual( 0, len( get_node_class_for_model(User).nodes.has( author=True))) self.assertEqual( 1, len( get_node_class_for_model(Book).nodes.has( tags=True))) self.assertEqual( 0, len( get_node_class_for_model(Tag).nodes.has( content_type=True))) elif depth == 2: self.assertEqual( 1, len( get_node_class_for_model(Author).nodes.has( user=True))) self.assertEqual( 1, len( get_node_class_for_model(User).nodes.has( author=True))) self.assertEqual( 1, len( get_node_class_for_model(Tag).nodes.has( content_type=True))) self.assertEqual( 1, len( get_node_class_for_model(ContentType).nodes.has( content_type_set_for_tag=True))) finally: post_save.connect( post_save_handler, dispatch_uid='chemtrails.signals.handlers.post_save_handler') m2m_changed.connect( m2m_changed_handler, dispatch_uid='chemtrails.signals.handlers.m2m_changed_handler')
def import_from_extapi(self): # Get external resourses cache_buster = [random.choice(string.ascii_letters + string.digits) for n in xrange(30)] cache_buster = "".join(cache_buster) params = {'username': self.user_name, 'api_key': self.api_key, 'format': self.format, 'cache_buster': cache_buster} r = requests.get(self.endpoint, params=params) resourses = r.json['objects'] # Get model model = self.internal_model.model_class() # Get all relevant fields fields = model._meta.get_all_field_names() # Find unique fields query_fields = [] for model_field in model._meta.fields: if model_field.unique and not model_field.primary_key: query_fields += [model_field.name] for resourse in resourses: # Remove all unwanted fields data = resourse.copy() for key in resourse: if not key in fields: del data[key] # Special case! # If some key have a dict of array as value # is mean its a f-field or a m2m f_resources = [] l_resources = [] for key in resourse: if type(resourse[key]) == dict: f_resources += [{"key_name": key, "data": data.pop(key)}] if type(resourse[key]) == list: l_resources += [{"key_name": key, "data": data.pop(key)}] for f_resource in f_resources: # Field name field_name = f_resource['key_name'] # Model class n_resource_model = getattr(model, field_name).field.rel.to # Get relevant fields n_fields = n_resource_model._meta.get_all_field_names() n_data = f_resource['data'].copy() for key in f_resource['data']: if not key in n_fields: del n_data[key] m_objects = n_resource_model.objects n_old_obj, n_obj = m_objects.get_or_create(**n_data) if n_old_obj: data[field_name] = n_old_obj if n_obj: data[field_name] = n_obj m2m_data = [] for l_resource in l_resources: # Field name field_name = l_resource['key_name'] # Model class n_resource_model = getattr(model, field_name).field.rel.to # Array of data data_array = l_resource['data'] # Find unique fields n_query_fields = [] for model_field in n_resource_model._meta.fields: if model_field.unique and not model_field.primary_key: n_query_fields += [model_field.name] # Get relevant fields m_fields = n_resource_model._meta.fields n_fields = [field.name for field in m_fields] for data_obj in data_array: temp = data_obj.copy() for key in data_obj: if not key in n_fields: del temp[key] data_obj = temp.copy() n_query = {} for field in n_query_fields: n_query[field] = data_obj[field] try: obj = n_resource_model.objects.get(**n_query) obj.__dict__.update(**data_obj) except n_resource_model.DoesNotExist: obj = n_resource_model(**data_obj) obj.save() # Save m2m data to apply this later on the main model m2m_data += [{"field": field_name, "data": obj}] # create a query on unique fields and # try to get the obj. If it not exist, # then create it. query = {} for field in query_fields: query[field] = data[field] try: obj = model.objects.get(**query) obj.__dict__.update(**data) except model.DoesNotExist: obj = model(**data) # Avoid syncing to extapi when saving # Dissconnect save signal post_save.disconnect(account_lead_post_save_handler, sender=AccountLead) obj.save() # Reconnect save signal post_save.connect(account_lead_post_save_handler, sender=AccountLead) # Dissconnect save signal m2m_changed.disconnect(account_lead_m2m_changed_handler, sender=AccountLead.external_api.through) obj.external_api.add(self) m2m_changed.connect(account_lead_m2m_changed_handler, sender=AccountLead.external_api.through) # Apply m2m data for d in m2m_data: getattr(obj, d['field']).add(d['data'])
def _disconnect_signal_handlers(self): post_save.disconnect(self.handle_object_update, sender=self.model) pre_delete.disconnect(self.handle_object_deletion, sender=self.model) if hasattr(self, '_through'): m2m_changed.disconnect(self.handle_object_update, self._through)
def disconnect_signals(sender=None): pre_save.disconnect(receiver=log_pre_save_delete, sender=sender) post_save.disconnect(receiver=log_post_save, sender=sender) pre_delete.disconnect(receiver=log_pre_save_delete, sender=sender) post_delete.disconnect(receiver=log_post_delete, sender=sender) m2m_changed.disconnect(receiver=log_m2m_change, sender=sender)
def disconnect_signals(): post_save.disconnect(receiver=reassess_on_profile_save, sender=UserProfile) pre_save.disconnect(receiver=signals.pre_save_config, sender=AutogroupsConfig) pre_delete.disconnect(receiver=signals.pre_delete_config, sender=AutogroupsConfig) post_save.disconnect(receiver=signals.check_groups_on_profile_update, sender=UserProfile) m2m_changed.disconnect(receiver=signals.autogroups_states_changed, sender=AutogroupsConfig.states.through)
def disconnect_couchdb_signals(): post_save.disconnect(create_auth_token, sender=KartenUser) post_save.disconnect(update_couchdb_password, sender=Token) m2m_changed.disconnect(update_allowed_users_on_couchdb, sender=KartenStack.allowed_users.through)
from os import path from django.db.models.signals import post_save, m2m_changed from django.dispatch import Signal from django.test import TestCase # Create your tests here. from frisbeer.models import Player, Game from frisbeer.signals import calculate_ranks, update_elo from frisbeer.signals import create_auth_token, update_statistics post_save.disconnect(update_statistics) m2m_changed.disconnect(update_statistics) class RankingTestCase(TestCase): fixtures = [ path.join(path.dirname(path.abspath(__file__)), 'testdata.json') ] def test_calculate_ranks(self): update_elo() # calculate_ranks(Game.objects.all()[0]) self.assertEqual(4, Player.objects.filter(rank="").count()) self.assertEqual(2, Player.objects.filter(rank__regex=".+").count()) for player in Player.objects.all(): print("{} - {}".format(player.name, player.rank))
def disconnect_signals(cls): m2m_changed.disconnect(m2m_changed_user_groups, sender=User.groups.through) m2m_changed.disconnect(m2m_changed_group_permissions, sender=Group.permissions.through) m2m_changed.disconnect(m2m_changed_user_permissions, sender=User.user_permissions.through) m2m_changed.disconnect(m2m_changed_state_permissions, sender=State.permissions.through) pre_save.disconnect(pre_save_user, sender=User) m2m_changed.disconnect(state_member_corporations_changed, sender=State.member_corporations.through) m2m_changed.disconnect(state_member_characters_changed, sender=State.member_characters.through) m2m_changed.disconnect(state_member_alliances_changed, sender=State.member_alliances.through) post_save.disconnect(state_saved, sender=State)