示例#1
0
    def allocate(self, quantity):
        """
        Record a stock allocation.

        This normally happens when a product is bought at checkout.  When the
        product is actually shipped, then we 'consume' the allocation.

        """
        # Send the pre-save signal
        signals.pre_save.send(
            sender=self.__class__,
            instance=self,
            created=False,
            raw=False,
            using=router.db_for_write(self.__class__, instance=self))

        # Atomic update
        (self.__class__.objects
            .filter(pk=self.pk)
            .update(num_allocated=(
                Coalesce(F('num_allocated'), Value(0)) + quantity)))

        # Make sure the current object is up-to-date
        if self.num_allocated is None:
            self.num_allocated = 0
        self.num_allocated += quantity

        # Send the post-save signal
        signals.post_save.send(
            sender=self.__class__,
            instance=self,
            created=False,
            raw=False,
            using=router.db_for_write(self.__class__, instance=self))
    def __set__(self, instance, value):
        if instance is None:
            raise AttributeError("%s must be accessed via instance" % self.related.opts.object_name)

        # The similarity of the code below to the code in
        # ReverseSingleRelatedObjectDescriptor is annoying, but there's a bunch
        # of small differences that would make a common base class convoluted.

        # If null=True, we can assign null here, but otherwise the value needs
        # to be an instance of the related class.
        if value is None and self.related.field.null == False:
            raise ValueError('Cannot assign None: "%s.%s" does not allow null values.' %
                                (instance._meta.object_name, self.related.get_accessor_name()))
        elif value is not None and not isinstance(value, self.related.model):
            raise ValueError('Cannot assign "%r": "%s.%s" must be a "%s" instance.' %
                                (value, instance._meta.object_name,
                                 self.related.get_accessor_name(), self.related.opts.object_name))
        elif value is not None:
            if instance._state.db is None:
                instance._state.db = router.db_for_write(instance.__class__, instance=value)
            elif value._state.db is None:
                value._state.db = router.db_for_write(value.__class__, instance=instance)
            elif value._state.db is not None and instance._state.db is not None:
                if not router.allow_relation(value, instance):
                    raise ValueError('Cannot assign "%r": instance is on database "%s", value is is on database "%s"' %
                                        (value, instance._state.db, value._state.db))

        # Set the value of the related field to the value of the related object's related field
        setattr(value, self.related.field.attname, getattr(instance, self.related.field.rel.get_related_field().attname))

        # Since we already know what the related object is, seed the related
        # object caches now, too. This avoids another db hit if you get the
        # object you just set.
        setattr(instance, self.cache_name, value)
        setattr(value, self.related.field.get_cache_name(), instance)
示例#3
0
    def test_partial_router(self):
        "A router can choose to implement a subset of methods"
        dive = Book.objects.using('other').create(title="Dive into Python",
                                                  published=datetime.date(2009, 5, 4))

        # First check the baseline behaviour

        self.assertEqual(router.db_for_read(User), 'other')
        self.assertEqual(router.db_for_read(Book), 'other')

        self.assertEqual(router.db_for_write(User), 'default')
        self.assertEqual(router.db_for_write(Book), 'default')

        self.assertTrue(router.allow_relation(dive, dive))

        self.assertTrue(router.allow_syncdb('default', User))
        self.assertTrue(router.allow_syncdb('default', Book))

        router.routers = [WriteRouter(), AuthRouter(), TestRouter()]

        self.assertEqual(router.db_for_read(User), 'default')
        self.assertEqual(router.db_for_read(Book), 'other')

        self.assertEqual(router.db_for_write(User), 'writer')
        self.assertEqual(router.db_for_write(Book), 'writer')

        self.assertTrue(router.allow_relation(dive, dive))

        self.assertFalse(router.allow_syncdb('default', User))
        self.assertTrue(router.allow_syncdb('default', Book))
示例#4
0
    def __set__(self, instance, value):
        """
        Set the related instance through the reverse relation.

        With the example above, when setting ``place.restaurant = restaurant``:

        - ``self`` is the descriptor managing the ``restaurant`` attribute
        - ``instance`` is the ``place`` instance
        - ``value`` is the ``restaurant`` instance on the right of the equal sign

        Keep in mind that ``Restaurant`` holds the foreign key to ``Place``.
        """
        # The similarity of the code below to the code in
        # ForwardManyToOneDescriptor is annoying, but there's a bunch
        # of small differences that would make a common base class convoluted.

        if value is None:
            # Update the cached related instance (if any) & clear the cache.
            # Following the example above, this would be the cached
            # ``restaurant`` instance (if any).
            rel_obj = self.related.get_cached_value(instance, default=None)
            if rel_obj is not None:
                # Remove the ``restaurant`` instance from the ``place``
                # instance cache.
                self.related.delete_cached_value(instance)
                # Set the ``place`` field on the ``restaurant``
                # instance to None.
                setattr(rel_obj, self.related.field.name, None)
        elif not isinstance(value, self.related.related_model):
            # An object must be an instance of the related class.
            raise ValueError(
                'Cannot assign "%r": "%s.%s" must be a "%s" instance.' % (
                    value,
                    instance._meta.object_name,
                    self.related.get_accessor_name(),
                    self.related.related_model._meta.object_name,
                )
            )
        else:
            if instance._state.db is None:
                instance._state.db = router.db_for_write(instance.__class__, instance=value)
            elif value._state.db is None:
                value._state.db = router.db_for_write(value.__class__, instance=instance)
            elif value._state.db is not None and instance._state.db is not None:
                if not router.allow_relation(value, instance):
                    raise ValueError('Cannot assign "%r": the current database router prevents this relation.' % value)

            related_pk = tuple(getattr(instance, field.attname) for field in self.related.field.foreign_related_fields)
            # Set the value of the related field to the value of the related object's related field
            for index, field in enumerate(self.related.field.local_related_fields):
                setattr(value, field.attname, related_pk[index])

            # Set the related instance cache used by __get__ to avoid an SQL query
            # when accessing the attribute we just set.
            self.related.set_cached_value(instance, value)

            # Set the forward accessor cache on the related object to the current
            # instance to avoid an extra SQL query if it's accessed later on.
            self.related.field.set_cached_value(value, instance)
    def __set__(self, instance, value):
        if instance is None:
            raise AttributeError("%s must be accessed via instance" % self._field.name)

        # If null=True, we can assign null here, but otherwise the value needs
        # to be an instance of the related class.
        if value is None and self.field.null == False:
            raise ValueError('Cannot assign None: "%s.%s" does not allow null values.' %
                                (instance._meta.object_name, self.field.name))
        elif value is not None and not isinstance(value, self.field.rel.to):
            raise ValueError('Cannot assign "%r": "%s.%s" must be a "%s" instance.' %
                                (value, instance._meta.object_name,
                                 self.field.name, self.field.rel.to._meta.object_name))
        elif value is not None:
            if instance._state.db is None:
                instance._state.db = router.db_for_write(instance.__class__, instance=value)
            elif value._state.db is None:
                value._state.db = router.db_for_write(value.__class__, instance=instance)
            elif value._state.db is not None and instance._state.db is not None:
                if not router.allow_relation(value, instance):
                    raise ValueError('Cannot assign "%r": instance is on database "%s", value is is on database "%s"' %
                                        (value, instance._state.db, value._state.db))

        # If we're setting the value of a OneToOneField to None, we need to clear
        # out the cache on any old related object. Otherwise, deleting the
        # previously-related object will also cause this object to be deleted,
        # which is wrong.
        if value is None:
            # Look up the previously-related object, which may still be available
            # since we've not yet cleared out the related field.
            # Use the cache directly, instead of the accessor; if we haven't
            # populated the cache, then we don't care - we're only accessing
            # the object to invalidate the accessor cache, so there's no
            # need to populate the cache just to expire it again.
            related = getattr(instance, self.field.get_cache_name(), None)

            # If we've got an old related object, we need to clear out its
            # cache. This cache also might not exist if the related object
            # hasn't been accessed yet.
            if related:
                cache_name = self.field.related.get_cache_name()
                try:
                    delattr(related, cache_name)
                except AttributeError:
                    pass

        # Set the value of the related field
        try:
            val = getattr(value, self.field.rel.get_related_field().attname)
        except AttributeError:
            val = None
        setattr(instance, self.field.attname, val)

        # Since we already know what the related object is, seed the related
        # object cache now, too. This avoids another db hit if you get the
        # object you just set.
        setattr(instance, self.field.get_cache_name(), value)
示例#6
0
文件: merge.py 项目: 280185386/sentry
def merge_objects(models, group, new_group, limit=1000,
                  logger=None):
    from sentry.models import GroupTagKey, GroupTagValue

    has_more = False
    for model in models:
        if logger is not None:
            logger.info('Merging %r objects where %r into %r', model, group,
                        new_group)
        all_fields = model._meta.get_all_field_names()
        has_group = 'group' in all_fields
        if has_group:
            queryset = model.objects.filter(group=group)
        else:
            queryset = model.objects.filter(group_id=group.id)
        for obj in queryset[:limit]:
            try:
                with transaction.atomic(using=router.db_for_write(model)):
                    if has_group:
                        model.objects.filter(
                            id=obj.id
                        ).update(group=new_group)
                    else:
                        model.objects.filter(
                            id=obj.id
                        ).update(group_id=new_group.id)
            except IntegrityError:
                delete = True
            else:
                delete = False

            if delete:
                # Before deleting, we want to merge in counts
                try:
                    if model == GroupTagKey:
                        with transaction.atomic(using=router.db_for_write(model)):
                            model.objects.filter(
                                group=new_group,
                                key=obj.key,
                            ).update(values_seen=F('values_seen') + obj.values_seen)
                    elif model == GroupTagValue:
                        with transaction.atomic(using=router.db_for_write(model)):
                            model.objects.filter(
                                group=new_group,
                                key=obj.key,
                                value=obj.value,
                            ).update(times_seen=F('times_seen') + obj.times_seen)
                except DataError:
                    # it's possible to hit an out of range value for counters
                    pass
                obj.delete()
            has_more = True

        if has_more:
            return True
    return has_more
    def __set__(self, instance, value):
        """
        Set the related instance through the reverse relation.

        With the example above, when setting ``place.restaurant = restaurant``:

        - ``self`` is the descriptor managing the ``restaurant`` attribute
        - ``instance`` is the ``place`` instance
        - ``value`` in the ``restaurant`` instance on the right of the equal sign

        Keep in mind that ``Restaurant`` holds the foreign key to ``Place``.
        """
        # The similarity of the code below to the code in
        # ForwardManyToOneDescriptor is annoying, but there's a bunch
        # of small differences that would make a common base class convoluted.

        # If null=True, we can assign null here, but otherwise the value needs
        # to be an instance of the related class.
        if value is None and self.related.field.null is False:
            raise ValueError(
                'Cannot assign None: "%s.%s" does not allow null values.' % (
                    instance._meta.object_name,
                    self.related.get_accessor_name(),
                )
            )
        elif value is not None and not isinstance(value, self.related.related_model):
            raise ValueError(
                'Cannot assign "%r": "%s.%s" must be a "%s" instance.' % (
                    value,
                    instance._meta.object_name,
                    self.related.get_accessor_name(),
                    self.related.related_model._meta.object_name,
                )
            )
        elif value is not None:
            if instance._state.db is None:
                instance._state.db = router.db_for_write(instance.__class__, instance=value)
            elif value._state.db is None:
                value._state.db = router.db_for_write(value.__class__, instance=instance)
            elif value._state.db is not None and instance._state.db is not None:
                if not router.allow_relation(value, instance):
                    raise ValueError('Cannot assign "%r": the current database router prevents this relation.' % value)

        related_pk = tuple(getattr(instance, field.attname) for field in self.related.field.foreign_related_fields)
        # Set the value of the related field to the value of the related object's related field
        for index, field in enumerate(self.related.field.local_related_fields):
            setattr(value, field.attname, related_pk[index])

        # Set the related instance cache used by __get__ to avoid a SQL query
        # when accessing the attribute we just set.
        setattr(instance, self.cache_name, value)

        # Set the forward accessor cache on the related object to the current
        # instance to avoid an extra SQL query if it's accessed later on.
        setattr(value, self.related.field.get_cache_name(), instance)
示例#8
0
 def make_authors(self):
     # since this might be a GET request and it does a db updates,
     #   ensure it uses the 'write' db for reads also
     author_mgr = m.Author.objects.db_manager(router.db_for_write(m.Author))
     user_mgr = User.objects.db_manager(router.db_for_write(User))
     users_used = author_mgr.filter(
         user__isnull=False).values_list('user', flat=True)
     candidates_for_author = user_mgr.exclude(id__in=users_used)
     for user in candidates_for_author:
         author_mgr.get_or_create(name='', user=user)
     return author_mgr.all()
示例#9
0
    def _value_from_db(self, value, field, field_kind, db_type):
        """
        Deconverts keys, dates and times (also in collections).
        """

        # It is *crucial* that this is written as a direct check --
        # when value is an instance of serializer.LazyModelInstance
        # calling its __eq__ method does a database query.
        if value is None:
            return None

        # All keys have been turned into ObjectIds.
        if db_type == "key" and router.db_for_write(field.model) in _mongodbs:
            value = unicode(value)

        # We've converted dates and times to datetimes.
        elif db_type == "date":
            value = datetime.date(value.year, value.month, value.day)
        elif db_type == "time":
            value = datetime.time(value.hour, value.minute, value.second, value.microsecond)

        # Revert the decimal-to-string encoding.
        if field_kind == "DecimalField":
            value = decimal.Decimal(value)

        return super(DatabaseOperations, self)._value_from_db(value, field, field_kind, db_type)
示例#10
0
    def set(self, *tags, **kwargs):
        """
        Set the object's tags to the given n tags. If the clear kwarg is True
        then all existing tags are removed (using `.clear()`) and the new tags
        added. Otherwise, only those tags that are not present in the args are
        removed and any new tags added.
        """
        db = router.db_for_write(self.through, instance=self.instance)
        clear = kwargs.pop('clear', False)

        if clear:
            self.clear()
            self.add(*tags)
        else:
            # make sure we're working with a collection of a uniform type
            objs = self._to_tag_model_instances(tags)

            # get the existing tag strings
            old_tag_strs = set(self.through._default_manager
                               .using(db)
                               .filter(**self._lookup_kwargs())
                               .values_list('tag__name', flat=True))

            new_objs = []
            for obj in objs:
                if obj.name in old_tag_strs:
                    old_tag_strs.remove(obj.name)
                else:
                    new_objs.append(obj)

            self.remove(*old_tag_strs)
            self.add(*new_objs)
示例#11
0
 def delete(self, using=None):
     # FIXME: if the Translation is the one used as default/fallback,
     # then deleting it will mean the corresponding field on the related
     # model will stay empty even if there are translations in other
     # languages!
     cls = self.__class__
     using = using or router.db_for_write(cls, instance=self)
     # Look for all translations for the same string (id=self.id) except the
     # current one (autoid=self.autoid).
     qs = cls.objects.filter(id=self.id).exclude(autoid=self.autoid)
     if qs.using(using).exists():
         # If other Translations for the same id exist, we just need to
         # delete this one and *only* this one, without letting Django
         # collect dependencies (it'd remove the others, which we want to
         # keep).
         assert self._get_pk_val() is not None
         collector = Collector(using=using)
         collector.collect([self], collect_related=False)
         # In addition, because we have FK pointing to a non-unique column,
         # we need to force MySQL to ignore constraints because it's dumb
         # and would otherwise complain even if there are remaining rows
         # that matches the FK.
         with connections[using].constraint_checks_disabled():
             collector.delete()
     else:
         # If no other Translations with that id exist, then we should let
         # django behave normally. It should find the related model and set
         # the FKs to NULL.
         return super(Translation, self).delete(using=using)
示例#12
0
 def _remove_items_at(self, timestamp, source_field_name, target_field_name, *objs):
     if objs:
         if timestamp is None:
             timestamp = get_utc_now()
         old_ids = set()
         for obj in objs:
             if isinstance(obj, self.model):
                 # The Django 1.7-way is preferred
                 if hasattr(self, 'target_field'):
                     fk_val = self.target_field.get_foreign_related_value(obj)[0]
                 # But the Django 1.6.x -way is supported for backward compatibility
                 elif hasattr(self, '_get_fk_val'):
                     fk_val = self._get_fk_val(obj, target_field_name)
                 else:
                     raise TypeError("We couldn't find the value of the foreign key, this might be due to the "
                                     "use of an unsupported version of Django")
                 old_ids.add(fk_val)
             else:
                 old_ids.add(obj)
         db = router.db_for_write(self.through, instance=self.instance)
         qs = self.through._default_manager.using(db).filter(**{
             source_field_name: self.instance.id,
             '%s__in' % target_field_name: old_ids
         }).as_of(timestamp)
         for relation in qs:
             relation._delete_at(timestamp)
示例#13
0
        def delete(self, force_policy=None, **kwargs):
            current_policy = policy if (force_policy is None) else force_policy

            if current_policy == NO_DELETE:

                # Don't do anything.
                return

            elif current_policy == SOFT_DELETE:

                # Only soft-delete the object, marking it as deleted.
                self.deleted = True
                super(Model, self).save(**kwargs)
                # send post-delete signal
                using = kwargs.get('using') or router.db_for_write(self.__class__, instance=self)
                post_softdelete.send(sender=self.__class__, instance=self, using=using)

            elif current_policy == HARD_DELETE:

                # Normally hard-delete the object.
                super(Model, self).delete()

            elif current_policy == HARD_DELETE_NOCASCADE:

                # Hard-delete the object only if nothing would be deleted with it

                if sum(1 for _ in related_objects(self)) > 0:
                    self.delete(force_policy=SOFT_DELETE, **kwargs)
                else:
                    self.delete(force_policy=HARD_DELETE, **kwargs)
示例#14
0
 def hand_clean_DELETE(self):
     """
     We don't validate the 'DELETE' field itself because on
     templates it's not rendered using the field information, but
     just using a generic "deletion_field" of the InlineModelAdmin.
     """
     if self.cleaned_data.get(DELETION_FIELD_NAME, False):
         using = router.db_for_write(self._meta.model)
         collector = NestedObjects(using=using)
         if self.instance.pk is None:
             return
         collector.collect([self.instance])
         if collector.protected:
             objs = []
             for p in collector.protected:
                 objs.append(
                     # Translators: Model verbose name and instance representation,
                     # suitable to be an item in a list.
                     _('%(class_name)s %(instance)s') % {
                         'class_name': p._meta.verbose_name,
                         'instance': p}
                 )
             params = {'class_name': self._meta.model._meta.verbose_name,
                       'instance': self.instance,
                       'related_objects': get_text_list(objs, _('and'))}
             msg = _("Deleting %(class_name)s %(instance)s would require "
                     "deleting the following protected related objects: "
                     "%(related_objects)s")
             raise ValidationError(msg, code='deleting_protected', params=params)
示例#15
0
文件: db.py 项目: 0xmilk/appscale
    def _base_set(self, mode, key, value, timeout=None):
        if timeout is None:
            timeout = self.default_timeout
        db = router.db_for_write(self.cache_model_class)
        table = connections[db].ops.quote_name(self._table)
        cursor = connections[db].cursor()

        cursor.execute("SELECT COUNT(*) FROM %s" % table)
        num = cursor.fetchone()[0]
        now = datetime.now().replace(microsecond=0)
        exp = datetime.fromtimestamp(time.time() + timeout).replace(microsecond=0)
        if num > self._max_entries:
            self._cull(db, cursor, now)
        encoded = base64.encodestring(pickle.dumps(value, 2)).strip()
        cursor.execute("SELECT cache_key, expires FROM %s WHERE cache_key = %%s" % table, [key])
        try:
            result = cursor.fetchone()
            if result and (mode == 'set' or
                    (mode == 'add' and result[1] < now)):
                cursor.execute("UPDATE %s SET value = %%s, expires = %%s WHERE cache_key = %%s" % table,
                               [encoded, connections[db].ops.value_to_db_datetime(exp), key])
            else:
                cursor.execute("INSERT INTO %s (cache_key, value, expires) VALUES (%%s, %%s, %%s)" % table,
                               [key, encoded, connections[db].ops.value_to_db_datetime(exp)])
        except DatabaseError:
            # To be threadsafe, updates/inserts are allowed to fail silently
            transaction.rollback_unless_managed(using=db)
            return False
        else:
            transaction.commit_unless_managed(using=db)
            return True
示例#16
0
文件: base.py 项目: dynamicguy/django
    def delete(self, using=None):
        using = using or router.db_for_write(self.__class__, instance=self)
        assert self._get_pk_val() is not None, "%s object can't be deleted because its %s attribute is set to None." % (self._meta.object_name, self._meta.pk.attname)

        collector = Collector(using=using)
        collector.collect([self])
        collector.delete()
示例#17
0
    def add(self, *tags):
        db = router.db_for_write(self.through, instance=self.instance)

        tag_objs = self._to_tag_model_instances(tags)
        new_ids = set(t.pk for t in tag_objs)

        # NOTE: can we hardcode 'tag_id' here or should the column name be got
        # dynamically from somewhere?
        vals = (self.through._default_manager.using(db)
                .values_list('tag_id', flat=True)
                .filter(**self._lookup_kwargs()))

        new_ids = new_ids - set(vals)

        signals.m2m_changed.send(
            sender=self.through, action="pre_add",
            instance=self.instance, reverse=False,
            model=self.through.tag_model(), pk_set=new_ids, using=db,
        )

        for tag in tag_objs:
            self.through._default_manager.using(db).get_or_create(
                tag=tag, **self._lookup_kwargs())

        signals.m2m_changed.send(
            sender=self.through, action="post_add",
            instance=self.instance, reverse=False,
            model=self.through.tag_model(), pk_set=new_ids, using=db,
        )
示例#18
0
文件: query.py 项目: faulkner/sentry
def update(self, using=None, **kwargs):
    """
    Updates specified attributes on the current instance.
    """
    assert self.pk, "Cannot update an instance that has not yet been created."

    using = using or router.db_for_write(self.__class__, instance=self)

    for field in self._meta.fields:
        if getattr(field, 'auto_now', False) and field.name not in kwargs:
            kwargs[field.name] = field.pre_save(self, False)

    affected = self.__class__._base_manager.using(using).filter(pk=self.pk).update(**kwargs)
    for k, v in six.iteritems(kwargs):
        if isinstance(v, ExpressionNode):
            v = resolve_expression_node(self, v)
        setattr(self, k, v)
    if affected == 1:
        post_save.send(sender=self.__class__, instance=self, created=False)
        return affected
    elif affected == 0:
        return affected
    elif affected < 0:
        raise ValueError("Somehow we have updated a negative amount of rows, you seem to have a problem with your db backend.")
    else:
        raise ValueError("Somehow we have updated multiple rows, and you are now royally f****d.")
    def load(self, model_name, csv_path=None):
        """
        Loads the source CSV for the provided model.
        """

        if self.verbosity > 2:
            self.log(" Loading %s" % model_name)

        model = apps.get_model(self.app_name, model_name)
        csv_path = csv_path or self.csv or model.objects.get_csv_path()

        if getattr(settings, 'CALACCESS_DAT_SOURCE', None) and six.PY2:
            self.load_dat(model, csv_path)
        self.database = self.database or router.db_for_write(model=model)

        try:
            engine = settings.DATABASES[self.database]['ENGINE']
        except KeyError:
            raise TypeError("{} not configured in DATABASES settings.".format(self.database))

        self.connection = connections[self.database]
        self.cursor = self.connection.cursor()

        if engine == 'django.db.backends.mysql':
            self.load_mysql(model, csv_path)
        else:
            self.failure("Sorry your database engine is unsupported")
            raise CommandError(
                "Only MySQL backend is supported."
            )
示例#20
0
文件: merge.py 项目: berny9015/sentry
def merge_objects(models, group, new_group, limit=1000,
                  logger=None):

    has_more = False
    for model in models:
        if logger is not None:
            logger.info('Merging %r objects where %r into %r', model, group,
                        new_group)
        has_group = 'group' in model._meta.get_all_field_names()
        if has_group:
            queryset = model.objects.filter(group=group)
        else:
            queryset = model.objects.filter(group_id=group.id)
        for obj in queryset[:limit]:
            try:
                with transaction.atomic(using=router.db_for_write(model)):
                    if has_group:
                        model.objects.filter(
                            id=obj.id
                        ).update(group=new_group)
                    else:
                        model.objects.filter(
                            id=obj.id
                        ).update(group_id=new_group.id)
            except IntegrityError:
                delete = True
            else:
                delete = False
            if delete:
                obj.delete()
            has_more = True

        if has_more:
            return True
    return has_more
示例#21
0
文件: db.py 项目: neogoku/nfv_api
    def get(self, key, default=None, version=None):
        key = self.make_key(key, version=version)
        self.validate_key(key)
        db = router.db_for_read(self.cache_model_class)
        connection = connections[db]
        table = connection.ops.quote_name(self._table)

        with connection.cursor() as cursor:
            cursor.execute("SELECT cache_key, value, expires FROM %s "
                           "WHERE cache_key = %%s" % table, [key])
            row = cursor.fetchone()
        if row is None:
            return default

        expires = row[2]
        expression = models.Expression(output_field=models.DateTimeField())
        for converter in (connection.ops.get_db_converters(expression) +
                              expression.get_db_converters(connection)):
            expires = converter(expires, expression, connection, {})

        if expires < timezone.now():
            db = router.db_for_write(self.cache_model_class)
            connection = connections[db]
            with connection.cursor() as cursor:
                cursor.execute("DELETE FROM %s "
                               "WHERE cache_key = %%s" % table, [key])
            return default

        value = connection.ops.process_clob(row[1])
        return pickle.loads(base64.b64decode(force_bytes(value)))
示例#22
0
    def _get_event_user(self, project, data):
        user_data = data.get('sentry.interfaces.User')
        if not user_data:
            return

        euser = EventUser(
            project=project,
            ident=user_data.get('id'),
            email=user_data.get('email'),
            username=user_data.get('username'),
            ip_address=user_data.get('ip_address'),
        )

        if not euser.tag_value:
            return

        cache_key = 'euser:{}:{}'.format(
            project.id,
            md5_text(euser.tag_value).hexdigest(),
        )
        cached = default_cache.get(cache_key)
        if cached is None:
            try:
                with transaction.atomic(using=router.db_for_write(EventUser)):
                    euser.save()
            except IntegrityError:
                pass
            default_cache.set(cache_key, '', 3600)

        return euser
示例#23
0
 def save(self, *args, **kwargs):
     if not self.pk and not self.slug:
         self.slug = self.slugify(self.name)
         from django.db import router
         using = kwargs.get("using") or router.db_for_write(
             type(self), instance=self)
         # Make sure we write to the same db for all attempted writes,
         # with a multi-master setup, theoretically we could try to
         # write and rollback on different DBs
         kwargs["using"] = using
         # Be oportunistic and try to save the tag, this should work for
         # most cases ;)
         try:
             with atomic(using=using):
                 res = super(TagBase, self).save(*args, **kwargs)
             return res
         except IntegrityError:
             pass
         # Now try to find existing slugs with similar names
         slugs = set(
             self.__class__._default_manager
             .filter(slug__startswith=self.slug)
             .values_list('slug', flat=True)
         )
         i = 1
         while True:
             slug = self.slugify(self.name, i)
             if slug not in slugs:
                 self.slug = slug
                 # We purposely ignore concurrecny issues here for now.
                 # (That is, till we found a nice solution...)
                 return super(TagBase, self).save(*args, **kwargs)
             i += 1
     else:
         return super(TagBase, self).save(*args, **kwargs)
示例#24
0
        def set(self, objs, *, clear=False):
            if not rel.through._meta.auto_created:
                opts = self.through._meta
                raise AttributeError(
                    "Cannot set values on a ManyToManyField which specifies an "
                    "intermediary model. Use %s.%s's Manager instead." %
                    (opts.app_label, opts.object_name)
                )

            # Force evaluation of `objs` in case it's a queryset whose value
            # could be affected by `manager.clear()`. Refs #19816.
            objs = tuple(objs)

            db = router.db_for_write(self.through, instance=self.instance)
            with transaction.atomic(using=db, savepoint=False):
                if clear:
                    self.clear()
                    self.add(*objs)
                else:
                    old_ids = set(self.using(db).values_list(self.target_field.target_field.attname, flat=True))

                    new_objs = []
                    for obj in objs:
                        fk_val = (
                            self.target_field.get_foreign_related_value(obj)[0]
                            if isinstance(obj, self.model) else obj
                        )
                        if fk_val in old_ids:
                            old_ids.remove(fk_val)
                        else:
                            new_objs.append(obj)

                    self.remove(*old_ids)
                    self.add(*new_objs)
示例#25
0
    def evolver(self, model):
        db_name = None

        if is_multi_db():
            db_name = router.db_for_write(model)

        return EvolutionOperationsMulti(db_name).get_evolver()
示例#26
0
    def save(self, must_create=False,schema = None):
        """
        Saves the current session data to the database. If 'must_create' is
        True, a database error will be raised if the saving operation doesn't
        create a *new* entry (as opposed to possibly updating an existing
        entry).
        """
        if schema == None:  # add schema
            import inspect
            fx = inspect.stack()
            error_detail = ""
            for x in fx:
                error_detail += "\n\t {0}, line {1}".format(fx[1], fx[2])
            raise (
                Exception(
                    "can not call ''{1}'' without schema in '{0}'.\nDetail:\n{2}".format(
                        __file__, "db.save",
                        error_detail
                    )))

        obj = Session(
            session_key=self._get_or_create_session_key(),
            session_data=self.encode(self._get_session(no_load=must_create)),
            expire_date=self.get_expiry_date()
        )
        using = router.db_for_write(Session, instance=obj)
        try:
            with transaction.atomic(using=using):
                obj.save(force_insert=must_create, using=using, schema = schema)
        except IntegrityError:
            if must_create:
                raise CreateError
            raise
示例#27
0
        def add(self, *objs, bulk=True):
            self._remove_prefetched_objects()
            objs = list(objs)
            db = router.db_for_write(self.model, instance=self.instance)

            def check_and_update_obj(obj):
                if not isinstance(obj, self.model):
                    raise TypeError("'%s' instance expected, got %r" % (
                        self.model._meta.object_name, obj,
                    ))
                setattr(obj, self.field.name, self.instance)

            if bulk:
                pks = []
                for obj in objs:
                    check_and_update_obj(obj)
                    if obj._state.adding or obj._state.db != db:
                        raise ValueError(
                            "%r instance isn't saved. Use bulk=False or save "
                            "the object first." % obj
                        )
                    pks.append(obj.pk)
                self.model._base_manager.using(db).filter(pk__in=pks).update(**{
                    self.field.name: self.instance,
                })
            else:
                with transaction.atomic(using=db, savepoint=False):
                    for obj in objs:
                        check_and_update_obj(obj)
                        obj.save()
示例#28
0
文件: base.py 项目: DemonZX/django
    def save_base(self, raw=False, force_insert=False,
                  force_update=False, using=None, update_fields=None):
        """
        Handles the parts of saving which should be done only once per save,
        yet need to be done in raw saves, too. This includes some sanity
        checks and signal sending.

        The 'raw' argument is telling save_base not to save any parent
        models and not to do any changes to the values before save. This
        is used by fixture loading.
        """
        using = using or router.db_for_write(self.__class__, instance=self)
        assert not (force_insert and (force_update or update_fields))
        assert update_fields is None or len(update_fields) > 0
        cls = origin = self.__class__
        # Skip proxies, but keep the origin as the proxy model.
        if cls._meta.proxy:
            cls = cls._meta.concrete_model
        meta = cls._meta
        if not meta.auto_created:
            signals.pre_save.send(sender=origin, instance=self, raw=raw, using=using,
                                  update_fields=update_fields)
        with transaction.commit_on_success_unless_managed(using=using, savepoint=False):
            if not raw:
                self._save_parents(cls, using, update_fields)
            updated = self._save_table(raw, cls, force_insert, force_update, using, update_fields)
        # Store the database on which the object was saved
        self._state.db = using
        # Once saved, this is no longer a to-be-added instance.
        self._state.adding = False

        # Signal that the save is complete
        if not meta.auto_created:
            signals.post_save.send(sender=origin, instance=self, created=(not updated),
                                   update_fields=update_fields, raw=raw, using=using)
示例#29
0
    def save(self, *args, **kwargs):
        if not self.pk and not self.slug:
            self.slug = self.slugify(self.name)
            if django.VERSION >= (1, 2):
                from django.db import router

                using = kwargs.get("using") or router.db_for_write(type(self), instance=self)
                # Make sure we write to the same db for all attempted writes,
                # with a multi-master setup, theoretically we could try to
                # write and rollback on different DBs
                kwargs["using"] = using
                trans_kwargs = {"using": using}
            else:
                trans_kwargs = {}
            i = 0
            while True:
                i += 1
                try:
                    sid = transaction.savepoint(**trans_kwargs)
                    res = super(TagBase, self).save(*args, **kwargs)
                    transaction.savepoint_commit(sid, **trans_kwargs)
                    return res
                except IntegrityError:
                    transaction.savepoint_rollback(sid, **trans_kwargs)
                    self.slug = self.slugify(self.name, i)
        else:
            return super(TagBase, self).save(*args, **kwargs)
    def _base_set(self, mode, key, value, timeout=DEFAULT_TIMEOUT):
        exp = self.get_backend_timeout(timeout)
        db = router.db_for_write(self.cache_model_class)
        table = connections[db].ops.quote_name(self._table)

        self._maybe_cull()
        with connections[db].cursor() as cursor:

            value, value_type = self.encode(value)

            if mode == 'set':
                query = self._set_query
                params = (key, value, value_type, exp)
            elif mode == 'add':
                query = self._add_query
                params = (key, value, value_type, exp, self._now())

            cursor.execute(query.format(table=table), params)

            if mode == 'set':
                return True
            elif mode == 'add':
                # Use a special code in the add query for "did insert"
                insert_id = cursor.lastrowid
                return (insert_id != 444)
示例#31
0
 def clear(self):
     db = router.db_for_write(self.cache_model_class)
     connection = connections[db]
     table = connection.ops.quote_name(self._table)
     with connection.cursor() as cursor:
         cursor.execute('DELETE FROM %s' % table)
示例#32
0
 def delete(self, using=None, keep_parents=False):
     """Copy of Django delete with changed collector."""
     using = using or router.db_for_write(self.__class__, instance=self)
     collector = FastCollector(using=using)
     collector.collect([self], keep_parents=keep_parents)
     return collector.delete()
示例#33
0
    def _base_set(self, mode, key, value, timeout=DEFAULT_TIMEOUT):
        timeout = self.get_backend_timeout(timeout)
        db = router.db_for_write(self.cache_model_class)
        connection = connections[db]
        quote_name = connection.ops.quote_name
        table = quote_name(self._table)

        with connection.cursor() as cursor:
            cursor.execute("SELECT COUNT(*) FROM %s" % table)
            num = cursor.fetchone()[0]
            now = timezone.now()
            now = now.replace(microsecond=0)
            if timeout is None:
                exp = datetime.max
            elif settings.USE_TZ:
                exp = datetime.utcfromtimestamp(timeout)
            else:
                exp = datetime.fromtimestamp(timeout)
            exp = exp.replace(microsecond=0)
            if num > self._max_entries:
                self._cull(db, cursor, now)
            pickled = pickle.dumps(value, self.pickle_protocol)
            # The DB column is expecting a string, so make sure the value is a
            # string, not bytes. Refs #19274.
            b64encoded = base64.b64encode(pickled).decode('latin1')
            try:
                # Note: typecasting for datetimes is needed by some 3rd party
                # database backends. All core backends work without typecasting,
                # so be careful about changes here - test suite will NOT pick
                # regressions.
                with transaction.atomic(using=db):
                    cursor.execute(
                        'SELECT %s, %s FROM %s WHERE %s = %%s' % (
                            quote_name('cache_key'),
                            quote_name('expires'),
                            table,
                            quote_name('cache_key'),
                        ), [key])
                    result = cursor.fetchone()

                    if result:
                        current_expires = result[1]
                        expression = models.Expression(
                            output_field=models.DateTimeField())
                        for converter in (
                                connection.ops.get_db_converters(expression) +
                                expression.get_db_converters(connection)):
                            current_expires = converter(
                                current_expires, expression, connection)

                    exp = connection.ops.adapt_datetimefield_value(exp)
                    if result and mode == 'touch':
                        cursor.execute(
                            'UPDATE %s SET %s = %%s WHERE %s = %%s' %
                            (table, quote_name('expires'),
                             quote_name('cache_key')), [exp, key])
                    elif result and (mode == 'set' or
                                     (mode == 'add'
                                      and current_expires < now)):
                        cursor.execute(
                            'UPDATE %s SET %s = %%s, %s = %%s WHERE %s = %%s' %
                            (
                                table,
                                quote_name('value'),
                                quote_name('expires'),
                                quote_name('cache_key'),
                            ), [b64encoded, exp, key])
                    elif mode != 'touch':
                        cursor.execute(
                            'INSERT INTO %s (%s, %s, %s) VALUES (%%s, %%s, %%s)'
                            % (
                                table,
                                quote_name('cache_key'),
                                quote_name('value'),
                                quote_name('expires'),
                            ), [key, b64encoded, exp])
                    else:
                        return False  # touch failed.
            except DatabaseError:
                # To be threadsafe, updates/inserts are allowed to fail silently
                return False
            else:
                return True
示例#34
0
    def __init__(self, model, data, mapping, layer=0,
                 source_srs=None, encoding='utf-8',
                 transaction_mode='commit_on_success',
                 transform=True, unique=None, using=None):
        """
        A LayerMapping object is initialized using the given Model (not an instance),
        a DataSource (or string path to an OGR-supported data file), and a mapping
        dictionary.  See the module level docstring for more details and keyword
        argument usage.
        """
        # Getting the DataSource and the associated Layer.
        if isinstance(data, six.string_types):
            self.ds = DataSource(data, encoding=encoding)
        else:
            self.ds = data
        self.layer = self.ds[layer]

        self.using = using if using is not None else router.db_for_write(model)
        self.spatial_backend = connections[self.using].ops

        # Setting the mapping & model attributes.
        self.mapping = mapping
        self.model = model

        # Checking the layer -- initialization of the object will fail if
        # things don't check out before hand.
        self.check_layer()

        # Getting the geometry column associated with the model (an
        # exception will be raised if there is no geometry column).
        if connections[self.using].features.supports_transform:
            self.geo_field = self.geometry_field()
        else:
            transform = False

        # Checking the source spatial reference system, and getting
        # the coordinate transformation object (unless the `transform`
        # keyword is set to False)
        if transform:
            self.source_srs = self.check_srs(source_srs)
            self.transform = self.coord_transform()
        else:
            self.transform = transform

        # Setting the encoding for OFTString fields, if specified.
        if encoding:
            # Making sure the encoding exists, if not a LookupError
            # exception will be thrown.
            from codecs import lookup
            lookup(encoding)
            self.encoding = encoding
        else:
            self.encoding = None

        if unique:
            self.check_unique(unique)
            transaction_mode = 'autocommit'  # Has to be set to autocommit.
            self.unique = unique
        else:
            self.unique = None

        # Setting the transaction decorator with the function in the
        # transaction modes dictionary.
        self.transaction_mode = transaction_mode
        if transaction_mode == 'autocommit':
            self.transaction_decorator = None
        elif transaction_mode == 'commit_on_success':
            self.transaction_decorator = transaction.atomic
        else:
            raise LayerMapError('Unrecognized transaction mode: %s' % transaction_mode)
示例#35
0
 def add_view(self, request, form_url='', extra_context=None):
     with transaction.atomic(using=router.db_for_write(self.model)):
         return self._add_view(request, form_url, extra_context)