def test_is_expired_after_config_deleted(self): """Testing IntegrationManager.is_expired after config deleted""" manager = IntegrationManager(IntegrationConfig) self.assertFalse(manager.is_expired()) post_delete.send(sender=IntegrationConfig) self.assertTrue(manager.is_expired())
def delete(self, using=None, keep_parents=False): """ Model.delete() override that also mark model as deleted and by whom. Requires the `deleted_by` field to be set by the caller, if SMARTMODELS_DEFAULT_REQUIRED=True. Owner of the model is changed to the sentinel owner by the ORM behind the scene through `on_delete=models.SET(get_sentinel_user)`. Cf the owner field definition. """ # this is a guard to ensure `deleted_by` is set # so that we know we deletes an instance if get_setting('DEFAULT_REQUIRED'): assert self.deleted_by, ( '{model_class}(SmartModel) instance missing "deleted_by" attribute.' 'To use the builtin defaults, set `SMARTMODELS_DEFAULT_REQUIRED=False`' 'in Django settings'.format( model_class=self.__class__.__name__)) self.deleted_at = timezone.now() self.owner = get_sentinel_user() # calling save instead of regular `delete()` method, # will route to the `smartmodels.models.prepare_smart_fields()` pre_save signal handler # also, let's manually fire the `post_delete` signal to leave change to listeners to cope with the deletion. post_delete.send(sender=self.__class__, instance=self, deleted_by=self.deleted_by) self.save()
def delete(self, using=None, force=False): if force: super().delete(using) else: pre_delete.send(sender=self.__class__, instance=self) self.deleted = True self.save() post_delete.send(sender=self.__class__, instance=self)
def on_model_post_delete(self, model, instance): try: model_cls = self.model_classes.get(model) post_delete.send(model_cls, using=self, instance=self.pre_delete_django_instance) except Exception as e: logger.warning('[!] on_model_post_delete signal failed: {}'.format( str(e)))
def delete(self, **kwargs): force = kwargs.pop('force', False) if force: super(DeletableTimeStampedModel, self).delete(**kwargs) else: pre_delete.send(self.__class__, instance=self) self.deleted = True self.save() post_delete.send(self.__class__, instance=self)
def delete(self, using=None, force=False): if force: super(BaseModel, self).delete(using=using) else: model_class = type(self) pre_delete.send(sender=model_class, instance=self, using=self._state.db) self.deleted = now() self.save() post_delete.send(sender=model_class, instance=self, using=self._state.db)
def delete(self): pre_delete.send(sender=self.__class__, instance=self, using=self._state.db) self.deleted_at = datetime.datetime.now(pytz.UTC) self.save() post_delete.send(sender=self.__class__, instance=self, using=self._state.db)
def delete(self): pre_delete.send(sender=self.__class__, instance=self) super().delete() post_delete.send(sender=self.__class__, instance=self) # We also clean the DRF cache clear_for_instance(self)
def update_instrument(self, instance, old_value): if instance is not None and instance.instrument is not None: post_save.send(sender=Instrument, instance=instance.instrument, in_real_signal=True) if old_value is not None: instru = Instrument.objects.get(id=old_value) if not isinstance( old_value, Instrument) else old_value nb = instru.performances.count() if nb == 0: post_delete.send(sender=Instrument, instance=instru)
def update_alias(self, instance, old_value): if instance is not None and instance.alias is not None: post_save.send(sender=InstrumentAlias, instance=instance.alias, in_real_signal=True) if old_value is not None: alias = InstrumentAlias.objects.get( id=old_value) if not isinstance(old_value, InstrumentAlias) else old_value nb = alias.performances.count() if nb == 0: post_delete.send(sender=InstrumentAlias, instance=alias)
def delete(self, obj): """ deletes an object :param obj: object to be deleted """ if obj.id: pre_delete.send(sender=obj.__class__, instance=obj, using='repository') self.connection.delete(obj.id) post_delete.send(sender=obj.__class__, instance=obj, using='repository')
def delete(self, *args, **kwargs): if self.customer: using = router.db_for_write(self.__class__, isinstance=self) with schema_context(self.customer.schema_name): LOG.info( f"PROVIDER {self.name} ({self.pk}) CASCADE DELETE -- SCHEMA {self.customer.schema_name}" ) cascade_delete(self.__class__, self.__class__, self.__class__.objects.filter(pk=self.pk)) post_delete.send(sender=self.__class__, instance=self, using=using) else: super().delete()
def delete(self, force=False): if force: return super(DataQuerySet, self).delete() else: # otherwise this list will be different in the next loop :) to_be_notified = list(self) for obj in to_be_notified: pre_delete.send(sender=self.model, instance=obj, using=self._db) qs = self.update(deleted=now()) for obj in to_be_notified: post_delete.send(sender=self.model, instance=obj, using=self._db) return qs
def delete(self, **kwargs): # pylint: disable=arguments-differ """ Override delete method to allow for "soft" deleting. If `force` is True delete from the database, otherwise set model.deleted = True :param kwargs: dict - add force=True to delete from the database :return: """ force = kwargs.pop('force', False) if force: super().delete(**kwargs) else: pre_delete.send(self.__class__, instance=self) self.deleted = True self.save() post_delete.send(self.__class__, instance=self)
def delete(self, force=False): """ Add support for the queryset.delete method. Use select_for_update and set deleted to True. Manually send post_delete signals on delete. """ if force: return super().delete() else: with transaction.atomic(savepoint=False): instances = super().select_for_update() for instance in instances: pre_delete.send(sender=instance.__class__, instance=instance) delete_result = instances.delete() for instance in instances: post_delete.send(sender=instance.__class__, instance=instance) return delete_result
def update_location(self, instance, old_value): if instance is not None and instance.location is not None: loc = Location.objects.filter( Q(current_location=instance.location) | Q(descendant_relations__location=instance.location)) localias = LocationAlias.objects.filter(location=instance.location) post_save.send(sender=Location, instance=instance.location, in_real_signal=True) for l in loc: post_save.send(sender=Location, instance=l, in_real_signal=True) for l in localias: post_save.send(sender=LocationAlias, instance=l, in_real_signal=True) if old_value is not None: location = Location.objects.get(id=old_value) if not isinstance( old_value, Location) else old_value loc = Location.objects.filter( Q(current_location=location) | Q(descendant_relations__location=location)) can_delete_alias = post_delete.send(sender=Location, instance=location, in_real_signal=True)[0][1] for l in loc: post_delete.send(sender=Location, instance=l, in_real_signal=True) if can_delete_alias: localias = LocationAlias.objects.filter(location=location) for l in localias: post_delete.send(sender=LocationAlias, instance=l, in_real_signal=True)
def delete(self, *args, **kwargs): pre_delete.send(sender=self.__class__, instance=self) self.deleted_at = now() self.__class__.objects.filter(id=self.id).update( deleted_at=self.deleted_at, ) post_delete.send(sender=self.__class__, instance=self)
def redirect_post_delete(sender, signal=None, *args, **kwargs): post_delete.send(BaseGroup, *args, **kwargs)
def merge_into(self, other, callback=lambda x: x, using='default'): """ Collects objects related to ``self`` and updates their foreign keys to point to ``other``. If ``callback`` is specified, it will be executed on each collected chunk before any changes are made, and should return a modified list of results that still need updated. NOTE: Duplicates (unique constraints) which exist and are bound to ``other`` are preserved, and relations on ``self`` are discarded. """ # TODO: proper support for database routing s_model = type(self) # Find all the objects than need to be deleted. collector = EverythingCollector(using=using) collector.collect([self]) for model, objects in collector.data.iteritems(): # find all potential keys which match our type fields = set( f.name for f in model._meta.fields if isinstance(f, ForeignKey) and f.rel.to == s_model if f.rel.to ) if not fields: # the collector pulls in the self reference, so if it's our model # we actually assume it's probably not related to itself, and its # perfectly ok if model == s_model: continue raise TypeError('Unable to determine related keys on %r' % model) for obj in objects: send_signals = not model._meta.auto_created # find fields which need changed update_kwargs = {} for f_name in fields: if getattr(obj, f_name) == self: update_kwargs[f_name] = other if not update_kwargs: # as before, if we're referencing ourself, this is ok if obj == self: continue raise ValueError('Mismatched row present in related results') signal_kwargs = { 'sender': model, 'instance': obj, 'using': using, 'migrated': True, } if send_signals: pre_delete.send(**signal_kwargs) post_delete.send(**signal_kwargs) for k, v in update_kwargs.iteritems(): setattr(obj, k, v) if send_signals: pre_save.send(created=True, **signal_kwargs) try: with transaction.atomic(): model.objects.using(using).filter(pk=obj.pk).update(**update_kwargs) except IntegrityError: # duplicate key exists, destroy the relations model.objects.using(using).filter(pk=obj.pk).delete() if send_signals: post_save.send(created=True, **signal_kwargs)
def redirect_post_delete(sender, signal=None, *args, **kwargs): post_delete.send(BaseUser, *args, **kwargs)
def on_model_post_delete(self, model, instance): model_cls = self.model_classes.get(model) post_delete.send(model_cls, using=self, instance=self.pre_delete_django_instance)
def merge_into(self, other, callback=lambda x: x, using='default'): """ Collects objects related to ``self`` and updates their foreign keys to point to ``other``. If ``callback`` is specified, it will be executed on each collected chunk before any changes are made, and should return a modified list of results that still need updated. NOTE: Duplicates (unique constraints) which exist and are bound to ``other`` are preserved, and relations on ``self`` are discarded. """ # TODO: proper support for database routing s_model = type(self) # Find all the objects than need to be deleted. collector = EverythingCollector(using=using) collector.collect([self]) for model, objects in six.iteritems(collector.data): # find all potential keys which match our type fields = set(f.name for f in model._meta.fields if isinstance(f, ForeignKey) and f.rel.to == s_model if f.rel.to) if not fields: # the collector pulls in the self reference, so if it's our model # we actually assume it's probably not related to itself, and its # perfectly ok if model == s_model: continue raise TypeError('Unable to determine related keys on %r' % model) for obj in objects: send_signals = not model._meta.auto_created # find fields which need changed update_kwargs = {} for f_name in fields: if getattr(obj, f_name) == self: update_kwargs[f_name] = other if not update_kwargs: # as before, if we're referencing ourself, this is ok if obj == self: continue raise ValueError('Mismatched row present in related results') signal_kwargs = { 'sender': model, 'instance': obj, 'using': using, 'migrated': True, } if send_signals: pre_delete.send(**signal_kwargs) post_delete.send(**signal_kwargs) for k, v in six.iteritems(update_kwargs): setattr(obj, k, v) if send_signals: pre_save.send(created=True, **signal_kwargs) try: with transaction.atomic(using=using): model.objects.using(using).filter(pk=obj.pk).update( **update_kwargs) except IntegrityError: # duplicate key exists, destroy the relations model.objects.using(using).filter(pk=obj.pk).delete() if send_signals: post_save.send(created=True, **signal_kwargs)
def delete(self, using=None): post_delete.send(sender=MockedWithFieldsPostDelete, instance=self, using=using)
def delete(self): pre_delete.send(sender=self.__class__, instance=self) self.is_deleted = True self.save() post_delete.send(sender=self.__class__, instance=self)