示例#1
0
    def _value_for_db_model(self, value, field, field_kind, db_type, lookup):
        """
        Converts a field => value mapping received from an
        EmbeddedModelField the format chosen for the field storage.

        The embedded instance fields' values are also converted /
        deconverted using value_for/from_db, so any back-end
        conversions will be applied.

        Returns (field.column, value) pairs, possibly augmented with
        model info (to be able to deconvert the embedded instance for
        untyped fields) encoded according to the db_type chosen.
        If "dict" db_type is given a Python dict is returned.
        If "list db_type is chosen a list with columns and values
        interleaved will be returned. Note that just a single level of
        the list is flattened, so it still may be nested -- when the
        embedded instance holds other embedded models or collections).
        Using "bytes" or "string" pickles the mapping using pickle
        protocol 0 or 2 respectively.
        If an unknown db_type is used a generator yielding (column,
        value) pairs with values converted will be returned.

        TODO: How should EmbeddedModelField lookups work?
        """
        if lookup:
            # raise NotImplementedError("Needs specification.")
            return value

        # Convert using proper instance field's info, change keys from
        # fields to columns.
        # TODO/XXX: Arguments order due to Python 2.5 compatibility.
        value = ((subfield.column,
                  self._value_for_db(subvalue,
                                     lookup=lookup,
                                     *self._convert_as(subfield, lookup)))
                 for subfield, subvalue in six.iteritems(value))

        # Cast to a dict, interleave columns with values on a list,
        # serialize, or return a generator.
        if db_type == 'dict':
            value = dict(value)
        elif db_type == 'list':
            value = list(item for pair in value for item in pair)
        elif db_type == 'bytes':
            value = pickle.dumps(dict(value), protocol=2)
        elif db_type == 'string':
            value = pickle.dumps(dict(value))

        return value
示例#2
0
    def _value_for_db_model(self, value, field, field_kind, db_type, lookup):
        """
        Converts a field => value mapping received from an
        EmbeddedModelField the format chosen for the field storage.

        The embedded instance fields' values are also converted /
        deconverted using value_for/from_db, so any back-end
        conversions will be applied.

        Returns (field.column, value) pairs, possibly augmented with
        model info (to be able to deconvert the embedded instance for
        untyped fields) encoded according to the db_type chosen.
        If "dict" db_type is given a Python dict is returned.
        If "list db_type is chosen a list with columns and values
        interleaved will be returned. Note that just a single level of
        the list is flattened, so it still may be nested -- when the
        embedded instance holds other embedded models or collections).
        Using "bytes" or "string" pickles the mapping using pickle
        protocol 0 or 2 respectively.
        If an unknown db_type is used a generator yielding (column,
        value) pairs with values converted will be returned.

        TODO: How should EmbeddedModelField lookups work?
        """
        if lookup:
            # raise NotImplementedError("Needs specification.")
            return value

        # Convert using proper instance field's info, change keys from
        # fields to columns.
        # TODO/XXX: Arguments order due to Python 2.5 compatibility.
        value = (
            (subfield.column, self._value_for_db(
                subvalue, lookup=lookup, *self._convert_as(subfield, lookup)))
            for subfield, subvalue in value.iteritems())

        # Cast to a dict, interleave columns with values on a list,
        # serialize, or return a generator.
        if db_type == 'dict':
            value = dict(value)
        elif db_type == 'list':
            value = list(item for pair in value for item in pair)
        elif db_type == 'bytes':
            value = pickle.dumps(dict(value), protocol=2)
        elif db_type == 'string':
            value = pickle.dumps(dict(value))

        return value
示例#3
0
文件: db.py 项目: calendar42/django
    def _base_set(self, mode, key, value, timeout=DEFAULT_TIMEOUT):
        timeout = self.get_backend_timeout(timeout)
        db = router.db_for_write(self.cache_model_class)
        table = connections[db].ops.quote_name(self._table)

        with connections[db].cursor() as cursor:
            cursor.execute("SELECT COUNT(*) FROM %s" % table)
            num = cursor.fetchone()[0]
            now = timezone.now()
            now = now.replace(microsecond=0)
            if timeout is None:
                exp = datetime.max
            elif settings.USE_TZ:
                exp = datetime.utcfromtimestamp(timeout)
            else:
                exp = datetime.fromtimestamp(timeout)
            exp = exp.replace(microsecond=0)
            if num > self._max_entries:
                self._cull(db, cursor, now)
            pickled = pickle.dumps(value, pickle.HIGHEST_PROTOCOL)
            b64encoded = base64.b64encode(pickled)
            # The DB column is expecting a string, so make sure the value is a
            # string, not bytes. Refs #19274.
            if six.PY3:
                b64encoded = b64encoded.decode('latin1')
            try:
                # Note: typecasting for datetimes is needed by some 3rd party
                # database backends. All core backends work without typecasting,
                # so be careful about changes here - test suite will NOT pick
                # regressions.
                with transaction.atomic(using=db):
                    cursor.execute(
                        "SELECT cache_key, expires FROM %s "
                        "WHERE cache_key = %%s" % table, [key])
                    result = cursor.fetchone()
                    if result:
                        current_expires = result[1]
                        if (connections[db].features.needs_datetime_string_cast
                                and not isinstance(current_expires, datetime)):
                            current_expires = typecast_timestamp(
                                str(current_expires))
                    exp = connections[db].ops.value_to_db_datetime(exp)
                    if result and (mode == 'set' or
                                   (mode == 'add' and current_expires < now)):
                        cursor.execute(
                            "UPDATE %s SET value = %%s, expires = %%s "
                            "WHERE cache_key = %%s" % table,
                            [b64encoded, exp, key])
                    elif not result:
                        cursor.execute(
                            "INSERT INTO %s (cache_key, value, expires) "
                            "VALUES (%%s, %%s, %%s)" % table,
                            [key, b64encoded, exp])
                    else:
                        return False
            except DatabaseError:
                # To be threadsafe, updates/inserts are allowed to fail silently
                return False
            else:
                return True
示例#4
0
 def test_pickle(self):
     "Testing pickle support."
     g1 = OGRGeometry("LINESTRING(1 1 1,2 2 2,3 3 3)", "WGS84")
     g2 = pickle.loads(pickle.dumps(g1))
     self.assertEqual(g1, g2)
     self.assertEqual(4326, g2.srs.srid)
     self.assertEqual(g1.srs.wkt, g2.srs.wkt)
示例#5
0
 def test17_pickle(self):
     "Testing pickle support."
     g1 = OGRGeometry('LINESTRING(1 1 1,2 2 2,3 3 3)', 'WGS84')
     g2 = pickle.loads(pickle.dumps(g1))
     self.assertEqual(g1, g2)
     self.assertEqual(4326, g2.srs.srid)
     self.assertEqual(g1.srs.wkt, g2.srs.wkt)
示例#6
0
    def test_pickle(self):
        "Testing pickling and unpickling support."
        # Using both pickle and cPickle -- just 'cause.
        from django.utils.six.moves import cPickle
        import pickle

        # Creating a list of test geometries for pickling,
        # and setting the SRID on some of them.
        def get_geoms(lst, srid=None):
            return [GEOSGeometry(tg.wkt, srid) for tg in lst]

        tgeoms = get_geoms(self.geometries.points)
        tgeoms.extend(get_geoms(self.geometries.multilinestrings, 4326))
        tgeoms.extend(get_geoms(self.geometries.polygons, 3084))
        tgeoms.extend(get_geoms(self.geometries.multipolygons, 3857))

        # The SRID won't be exported in GEOS 3.0 release candidates.
        no_srid = self.null_srid == -1
        for geom in tgeoms:
            s1, s2 = cPickle.dumps(geom), pickle.dumps(geom)
            g1, g2 = cPickle.loads(s1), pickle.loads(s2)
            for tmpg in (g1, g2):
                self.assertEqual(geom, tmpg)
                if not no_srid:
                    self.assertEqual(geom.srid, tmpg.srid)
示例#7
0
    def test_pickle(self):
        "Testing pickling and unpickling support."
        # Using both pickle and cPickle -- just 'cause.
        from django.utils.six.moves import cPickle
        import pickle

        # Creating a list of test geometries for pickling,
        # and setting the SRID on some of them.
        def get_geoms(lst, srid=None):
            return [GEOSGeometry(tg.wkt, srid) for tg in lst]

        tgeoms = get_geoms(self.geometries.points)
        tgeoms.extend(get_geoms(self.geometries.multilinestrings, 4326))
        tgeoms.extend(get_geoms(self.geometries.polygons, 3084))
        tgeoms.extend(get_geoms(self.geometries.multipolygons, 900913))

        # The SRID won't be exported in GEOS 3.0 release candidates.
        no_srid = self.null_srid == -1
        for geom in tgeoms:
            s1, s2 = cPickle.dumps(geom), pickle.dumps(geom)
            g1, g2 = cPickle.loads(s1), pickle.loads(s2)
            for tmpg in (g1, g2):
                self.assertEqual(geom, tmpg)
                if not no_srid:
                    self.assertEqual(geom.srid, tmpg.srid)
示例#8
0
 def set(self, key, value, timeout=DEFAULT_TIMEOUT, version=None):
     self._createdir()  # Cache dir can be deleted at any time.
     fname = self._key_to_file(key, version)
     self._cull()  # make some room if necessary
     fd, tmp_path = tempfile.mkstemp(dir=self._dir)
     renamed = False
     try:
         with io.open(fd, 'wb') as f:
             expiry = self.get_backend_timeout(timeout)
             f.write(pickle.dumps(expiry, -1))
             f.write(zlib.compress(pickle.dumps(value), -1))
         file_move_safe(tmp_path, fname, allow_overwrite=True)
         renamed = True
     finally:
         if not renamed:
             os.remove(tmp_path)
示例#9
0
 def set(self, key, value, timeout=DEFAULT_TIMEOUT, version=None):
     self._createdir()  # Cache dir can be deleted at any time.
     fname = self._key_to_file(key, version)
     self._cull()  # make some room if necessary
     fd, tmp_path = tempfile.mkstemp(dir=self._dir)
     renamed = False
     try:
         with io.open(fd, 'wb') as f:
             expiry = self.get_backend_timeout(timeout)
             f.write(pickle.dumps(expiry, pickle.HIGHEST_PROTOCOL))
             f.write(zlib.compress(pickle.dumps(value, pickle.HIGHEST_PROTOCOL)))
         file_move_safe(tmp_path, fname, allow_overwrite=True)
         renamed = True
     finally:
         if not renamed:
             os.remove(tmp_path)
示例#10
0
文件: db.py 项目: nbsky/django
    def _base_set(self, mode, key, value, timeout=DEFAULT_TIMEOUT):
        timeout = self.get_backend_timeout(timeout)
        db = router.db_for_write(self.cache_model_class)
        connection = connections[db]
        table = connection.ops.quote_name(self._table)

        with connection.cursor() as cursor:
            cursor.execute("SELECT COUNT(*) FROM %s" % table)
            num = cursor.fetchone()[0]
            now = timezone.now()
            now = now.replace(microsecond=0)
            if timeout is None:
                exp = datetime.max
            elif settings.USE_TZ:
                exp = datetime.utcfromtimestamp(timeout)
            else:
                exp = datetime.fromtimestamp(timeout)
            exp = exp.replace(microsecond=0)
            if num > self._max_entries:
                self._cull(db, cursor, now)
            pickled = pickle.dumps(value, pickle.HIGHEST_PROTOCOL)
            b64encoded = base64.b64encode(pickled)
            # The DB column is expecting a string, so make sure the value is a
            # string, not bytes. Refs #19274.
            if six.PY3:
                b64encoded = b64encoded.decode("latin1")
            try:
                # Note: typecasting for datetimes is needed by some 3rd party
                # database backends. All core backends work without typecasting,
                # so be careful about changes here - test suite will NOT pick
                # regressions.
                with transaction.atomic(using=db):
                    cursor.execute("SELECT cache_key, expires FROM %s " "WHERE cache_key = %%s" % table, [key])
                    result = cursor.fetchone()

                    if result:
                        current_expires = result[1]
                        expression = models.Expression(output_field=models.DateTimeField())
                        for converter in connection.ops.get_db_converters(expression) + expression.get_db_converters(
                            connection
                        ):
                            current_expires = converter(current_expires, expression, connection, {})

                    exp = connection.ops.adapt_datetimefield_value(exp)
                    if result and (mode == "set" or (mode == "add" and current_expires < now)):
                        cursor.execute(
                            "UPDATE %s SET value = %%s, expires = %%s " "WHERE cache_key = %%s" % table,
                            [b64encoded, exp, key],
                        )
                    else:
                        cursor.execute(
                            "INSERT INTO %s (cache_key, value, expires) " "VALUES (%%s, %%s, %%s)" % table,
                            [key, b64encoded, exp],
                        )
            except DatabaseError:
                # To be threadsafe, updates/inserts are allowed to fail silently
                return False
            else:
                return True
示例#11
0
def encode_param(args, kwargs):
    """
    序列化参数,保证相同的参数返回相同的值,不同的参数返回不同的值
    """
    #return repr((args, kwargs)) # repr 没法区分 Model 类型
    value = pickle.dumps((args, kwargs), pickle.HIGHEST_PROTOCOL)
    value = md5(value).hexdigest()  # 防止返回值太长
    return value
示例#12
0
 def set(self, key, value, timeout=DEFAULT_TIMEOUT, version=None):
     key = self.make_key(key, version=version)
     self.validate_key(key)
     with self._lock.writer():
         try:
             pickled = pickle.dumps(value, pickle.HIGHEST_PROTOCOL)
             self._set(key, pickled, timeout)
         except pickle.PickleError:
             pass
示例#13
0
 def add(self, key, value, timeout=DEFAULT_TIMEOUT, version=None):
     key = self.make_key(key, version=version)
     self.validate_key(key)
     pickled = pickle.dumps(value, pickle.HIGHEST_PROTOCOL)
     with self._lock.writer():
         if self._has_expired(key):
             self._set(key, pickled, timeout)
             return True
         return False
示例#14
0
文件: locmem.py 项目: 10sr/hue
 def set(self, key, value, timeout=DEFAULT_TIMEOUT, version=None):
     key = self.make_key(key, version=version)
     self.validate_key(key)
     with self._lock.writer():
         try:
             pickled = pickle.dumps(value, pickle.HIGHEST_PROTOCOL)
             self._set(key, pickled, timeout)
         except pickle.PickleError:
             pass
示例#15
0
 def encode(self, session_dict):
     data = pickle.dumps(session_dict, pickle.HIGHEST_PROTOCOL)
     flag = 0
     if conf['COMPRESS_LIB'] and len(data) >= conf['COMPRESS_MIN_LENGTH']:
         compressed = compress_lib.compress(data)
         if len(compressed) < len(data):
             flag |= FLAG_COMPRESSED
             data = compressed
     return chr(flag) + data
示例#16
0
def test_storage_gone(pic1):
    product = Product.objects.create(image=pic1['filename'])
    assert os.path.exists(pic1['path'])
    product.image = 'new.jpg'
    product = pickle.loads(pickle.dumps(product))
    assert hasattr(product.image, 'storage')
    with transaction.atomic(get_using(product)):
        product.save()
    assert not os.path.exists(pic1['path'])
示例#17
0
文件: locmem.py 项目: 1oscar/django
 def add(self, key, value, timeout=DEFAULT_TIMEOUT, version=None):
     key = self.make_key(key, version=version)
     self.validate_key(key)
     pickled = pickle.dumps(value, pickle.HIGHEST_PROTOCOL)
     with self._lock.writer():
         if self._has_expired(key):
             self._set(key, pickled, timeout)
             return True
         return False
示例#18
0
def test_storage_gone(pic1):
    product = Product.objects.create(image=pic1['filename'])
    assert os.path.exists(pic1['path'])
    product.image = random_pic()
    product = pickle.loads(pickle.dumps(product))
    assert hasattr(product.image, 'storage')
    with transaction.atomic(get_using(product)):
        product.save()
    assert not os.path.exists(pic1['path'])
示例#19
0
 def add(self, key, value, timeout=DEFAULT_TIMEOUT, version=None):
     key = self.make_key(key, version=version)
     self.validate_key(key)
     pickled = pickle.dumps(value, pickle.HIGHEST_PROTOCOL)
     with self._lock.writer():
         exp = self._expire_info.get(key)
         if exp is None or exp <= time.time():
             self._set(key, pickled, timeout)
             return True
         return False
示例#20
0
文件: locmem.py 项目: 1oscar/django
 def incr(self, key, delta=1, version=None):
     with self._lock.writer():
         value = self.get(key, version=version, acquire_lock=False)
         if value is None:
             raise ValueError("Key '%s' not found" % key)
         new_value = value + delta
         key = self.make_key(key, version=version)
         pickled = pickle.dumps(new_value, pickle.HIGHEST_PROTOCOL)
         self._cache[key] = pickled
     return new_value
示例#21
0
 def incr(self, key, delta=1, version=None):
     with self._lock.writer():
         value = self.get(key, version=version, acquire_lock=False)
         if value is None:
             raise ValueError("Key '%s' not found" % key)
         new_value = value + delta
         key = self.make_key(key, version=version)
         pickled = pickle.dumps(new_value, pickle.HIGHEST_PROTOCOL)
         self._cache[key] = pickled
     return new_value
示例#22
0
 def add(self, key, value, timeout=DEFAULT_TIMEOUT, version=None):
     key = self.make_key(key, version=version)
     self.validate_key(key)
     pickled = pickle.dumps(value, pickle.HIGHEST_PROTOCOL)
     with self._lock.writer():
         exp = self._expire_info.get(key)
         if exp is None or exp <= time.time():
             self._set(key, pickled, timeout)
             return True
         return False
示例#23
0
文件: db.py 项目: GregMeno/django
    def _base_set(self, mode, key, value, timeout=DEFAULT_TIMEOUT):
        if timeout == DEFAULT_TIMEOUT:
            timeout = self.default_timeout
        db = router.db_for_write(self.cache_model_class)
        table = connections[db].ops.quote_name(self._table)
        cursor = connections[db].cursor()

        cursor.execute("SELECT COUNT(*) FROM %s" % table)
        num = cursor.fetchone()[0]
        now = timezone.now()
        now = now.replace(microsecond=0)
        if timeout is None:
            exp = datetime.max
        elif settings.USE_TZ:
            exp = datetime.utcfromtimestamp(time.time() + timeout)
        else:
            exp = datetime.fromtimestamp(time.time() + timeout)
        exp = exp.replace(microsecond=0)
        if num > self._max_entries:
            self._cull(db, cursor, now)
        pickled = pickle.dumps(value, pickle.HIGHEST_PROTOCOL)
        b64encoded = base64.b64encode(pickled)
        # The DB column is expecting a string, so make sure the value is a
        # string, not bytes. Refs #19274.
        if six.PY3:
            b64encoded = b64encoded.decode('latin1')
        try:
            # Note: typecasting for datetimes is needed by some 3rd party
            # database backends. All core backends work without typecasting,
            # so be careful about changes here - test suite will NOT pick
            # regressions.
            with transaction.atomic(using=db):
                cursor.execute("SELECT cache_key, expires FROM %s "
                               "WHERE cache_key = %%s" % table, [key])
                result = cursor.fetchone()
                if result:
                    current_expires = result[1]
                    if (connections[db].features.needs_datetime_string_cast and not
                            isinstance(current_expires, datetime)):
                        current_expires = typecast_timestamp(str(current_expires))
                exp = connections[db].ops.value_to_db_datetime(exp)
                if result and (mode == 'set' or (mode == 'add' and current_expires < now)):
                    cursor.execute("UPDATE %s SET value = %%s, expires = %%s "
                                   "WHERE cache_key = %%s" % table,
                                   [b64encoded, exp, key])
                else:
                    cursor.execute("INSERT INTO %s (cache_key, value, expires) "
                                   "VALUES (%%s, %%s, %%s)" % table,
                                   [key, b64encoded, exp])
        except DatabaseError:
            # To be threadsafe, updates/inserts are allowed to fail silently
            return False
        else:
            return True
示例#24
0
    def _build_test_chunk_data(self, num_chunks):
        """Build enough test data to fill up the specified number of chunks.

        This takes into account the size of the pickle data, and will
        get us to exactly the specified number of chunks of data in the cache.
        """
        data = 'x' * (CACHE_CHUNK_SIZE * num_chunks - 3 * num_chunks)
        pickled_data = pickle.dumps(data)

        self.assertEqual(len(pickled_data), CACHE_CHUNK_SIZE * num_chunks)

        return data, pickled_data
示例#25
0
    def test_pickle(self):
        letters = 'abcde'
        cache.clear()

        for num, val in enumerate(letters):
            cache.set(val, num)

        data = pickle.dumps(cache)
        other = pickle.loads(data)

        for key in letters:
            self.assertEqual(other.get(key), cache.get(key))
示例#26
0
    def _build_test_chunk_data(self, num_chunks):
        """Build enough test data to fill up the specified number of chunks.

        This takes into account the size of the pickle data, and will
        get us to exactly the specified number of chunks of data in the cache.
        """
        data = 'x' * (CACHE_CHUNK_SIZE * num_chunks - 3 * num_chunks)
        pickled_data = pickle.dumps(data)

        self.assertEqual(len(pickled_data), CACHE_CHUNK_SIZE * num_chunks)

        return data, pickled_data
示例#27
0
文件: locmem.py 项目: 10sr/hue
 def incr(self, key, delta=1, version=None):
     value = self.get(key, version=version)
     if value is None:
         raise ValueError("Key '%s' not found" % key)
     new_value = value + delta
     key = self.make_key(key, version=version)
     with self._lock.writer():
         try:
             pickled = pickle.dumps(new_value, pickle.HIGHEST_PROTOCOL)
             self._cache[key] = pickled
         except pickle.PickleError:
             pass
     return new_value
示例#28
0
 def add(self, key, value, timeout=None, version=None):
     key = self.make_key(key, version=version)
     self.validate_key(key)
     with self._lock.writer():
         exp = self._expire_info.get(key)
         if exp is None or exp <= time.time():
             try:
                 pickled = pickle.dumps(value, pickle.HIGHEST_PROTOCOL)
                 self._set(key, pickled, timeout)
                 return True
             except pickle.PickleError:
                 pass
         return False
示例#29
0
 def add(self, key, value, timeout=None, version=None):
     key = self.make_key(key, version=version)
     self.validate_key(key)
     with self._lock.writer():
         exp = self._expire_info.get(key)
         if exp is None or exp <= time.time():
             try:
                 pickled = pickle.dumps(value, pickle.HIGHEST_PROTOCOL)
                 self._set(key, pickled, timeout)
                 return True
             except pickle.PickleError:
                 pass
         return False
示例#30
0
 def incr(self, key, delta=1, version=None):
     value = self.get(key, version=version)
     if value is None:
         raise ValueError("Key '%s' not found" % key)
     new_value = value + delta
     key = self.make_key(key, version=version)
     with self._lock.writer():
         try:
             pickled = pickle.dumps(new_value, pickle.HIGHEST_PROTOCOL)
             self._cache[key] = pickled
         except pickle.PickleError:
             pass
     return new_value
示例#31
0
    def _base_set(self, mode, key, value, timeout=None):
        if timeout is None:
            timeout = self.default_timeout
        db = router.db_for_write(self.cache_model_class)
        table = connections[db].ops.quote_name(self._table)
        cursor = connections[db].cursor()

        cursor.execute("SELECT COUNT(*) FROM %s" % table)
        num = cursor.fetchone()[0]
        now = timezone.now()
        now = now.replace(microsecond=0)
        if settings.USE_TZ:
            exp = datetime.utcfromtimestamp(time.time() + timeout)
        else:
            exp = datetime.fromtimestamp(time.time() + timeout)
        exp = exp.replace(microsecond=0)
        if num > self._max_entries:
            self._cull(db, cursor, now)
        pickled = pickle.dumps(value, pickle.HIGHEST_PROTOCOL)
        b64encoded = base64.b64encode(pickled)
        # The DB column is expecting a string, so make sure the value is a
        # string, not bytes. Refs #19274.
        if six.PY3:
            b64encoded = b64encoded.decode('latin1')
        cursor.execute(
            "SELECT cache_key, expires FROM %s "
            "WHERE cache_key = %%s" % table, [key])
        try:
            result = cursor.fetchone()
            if result and (mode == 'set' or
                           (mode == 'add' and result[1] < now)):
                cursor.execute(
                    "UPDATE %s SET value = %%s, expires = %%s "
                    "WHERE cache_key = %%s" % table, [
                        b64encoded,
                        connections[db].ops.value_to_db_datetime(exp), key
                    ])
            else:
                cursor.execute(
                    "INSERT INTO %s (cache_key, value, expires) "
                    "VALUES (%%s, %%s, %%s)" % table, [
                        key, b64encoded,
                        connections[db].ops.value_to_db_datetime(exp)
                    ])
        except DatabaseError:
            # To be threadsafe, updates/inserts are allowed to fail silently
            transaction.rollback_unless_managed(using=db)
            return False
        else:
            transaction.commit_unless_managed(using=db)
            return True
示例#32
0
    def dump(self, obj):
        """
        Return the pickled representation of the object as a bytes object.

        We use ``deepcopy()`` here to avoid a problem with cPickle, where
        ``dumps()`` can generate different character streams for same lookup
        value if they  are referenced differently.

        The reason this is important is because we do all of our lookups as
        simple string matches, thus the character streams must be the same for
        the lookups to work properly.

        """
        return pickle.dumps(deepcopy(obj), self.protocol)
示例#33
0
    def encode(self, obj):
        """
        Take a Python object and return it as a tuple (value, value_type), a
        blob and a one-char code for what type it is
        """
        if self._is_valid_mysql_bigint(obj):
            return obj, 'i'

        value = pickle.dumps(obj, pickle.HIGHEST_PROTOCOL)
        value_type = 'p'
        if (self._compress_min_length
                and len(value) >= self._compress_min_length):
            value = zlib.compress(value, self._compress_level)
            value_type = 'z'
        return value, value_type
示例#34
0
def queue(users, label, extra_context=None, sender=None):
    """
    Queue the notification in NoticeQueueBatch. This allows for large amounts
    of user notifications to be deferred to a seperate process running outside
    the webserver.
    """
    if extra_context is None:
        extra_context = {}
    if isinstance(users, QuerySet):
        users = [row["pk"] for row in users.values("pk")]
    else:
        users = [user.pk for user in users]
    notices = []
    for user in users:
        notices.append((user, label, extra_context, sender))
    NoticeQueueBatch(pickled_data=base64.b64encode(pickle.dumps(notices))).save()
def queue(users, label, extra_context=None, sender=None):
    """
    Queue the notification in NoticeQueueBatch. This allows for large amounts
    of user notifications to be deferred to a seperate process running outside
    the webserver.
    """
    if extra_context is None:
        extra_context = {}
    if isinstance(users, QuerySet):
        users = [row["pk"] for row in users.values("pk")]
    else:
        users = [user.pk for user in users]
    notices = []
    for user in users:
        notices.append((user, label, extra_context, sender))
    NoticeQueueBatch(pickled_data=base64.b64encode(pickle.dumps(notices))).save()
示例#36
0
def _cache_store_items(cache, key, items, expiration, compress_large_data):
    """Store items in the cache.

    The items will be individually pickled and combined into a binary blob,
    which can then optionally be compressed. The resulting data is then
    cached over one or more keys, each representing a chunk about 1MB in size.

    A main cache key will be set that contains information on the other keys.
    """
    results = ((pickle.dumps(item), True, item) for item in items)

    if compress_large_data:
        results = _cache_compress_pickled_data(results)

    for item in _cache_store_chunks(results, key, expiration):
        yield item
示例#37
0
def pickle_dumps(obj):
    """Return a pickled representation of an object.

    This will always use Pickle protocol 0, which is the default on Python 2,
    for compatibility across Python 2 and 3.

    Args:
        obj (object):
            The object to dump.

    Returns:
        unicode:
        The Unicode pickled representation of the object, safe for storing
        in the database.
    """
    return pickle.dumps(obj, protocol=0).decode('latin1')
示例#38
0
    def test_cache_memoize_large_files_uncompressed_off_by_one(self):
        """Testing cache_memoize with large files without compression and
        one byte larger than an even chunk size."""
        cache_key = 'abc123'

        # This takes into account the size of the pickle data, and will
        # get us to just barely 3 chunks of data in cache.
        data = self._build_test_chunk_data(num_chunks=2)[0] + 'x'
        pickled_data = pickle.dumps(data, protocol=0)

        def cache_func():
            return data

        self.spy_on(cache_func, call_original=True)

        result = cache_memoize(cache_key,
                               cache_func,
                               large_data=True,
                               compress_large_data=False)
        self.assertEqual(result, data)
        self.assertTrue(cache_func.spy.called)

        cache_key_0 = make_cache_key('%s-0' % cache_key)
        cache_key_1 = make_cache_key('%s-1' % cache_key)
        cache_key_2 = make_cache_key('%s-2' % cache_key)

        self.assertTrue(make_cache_key(cache_key) in cache)
        self.assertTrue(cache_key_0 in cache)
        self.assertTrue(cache_key_1 in cache)
        self.assertTrue(cache_key_2 in cache)
        self.assertFalse(make_cache_key('%s-3' % cache_key) in cache)

        # Verify the contents of the stored data.
        stored_data = b''.join(
            cache.get(cache_key_0) + cache.get(cache_key_1) +
            cache.get(cache_key_2))
        self.assertEqual(stored_data, pickled_data)

        # Try fetching the data we stored.
        cache_func.spy.reset_calls()

        result = cache_memoize(cache_key,
                               cache_func,
                               large_data=True,
                               compress_large_data=False)
        self.assertEqual(result, data)
        self.assertFalse(cache_func.spy.called)
    def encode(self, obj):
        """
        Take a Python object and return it as a tuple (value, value_type), a
        blob and a one-char code for what type it is
        """
        if self._is_valid_mysql_bigint(obj):
            return obj, 'i'

        value = pickle.dumps(obj, pickle.HIGHEST_PROTOCOL)
        value_type = 'p'
        if (
            self._compress_min_length and
            len(value) >= self._compress_min_length
        ):
            value = zlib.compress(value, self._compress_level)
            value_type = 'z'
        return value, value_type
示例#40
0
文件: db.py 项目: SlashRoot/django
    def _base_set(self, mode, key, value, timeout=DEFAULT_TIMEOUT, use_tz=False):
        if timeout == DEFAULT_TIMEOUT:
            timeout = self.default_timeout
        db = router.db_for_write(self.cache_model_class)
        table = connections[db].ops.quote_name(self._table)
        cursor = connections[db].cursor()

        cursor.execute("SELECT COUNT(*) FROM %s" % table)
        num = cursor.fetchone()[0]
        now = timezone.now()
        now = now.replace(microsecond=0)
        if timeout is None:
            exp = datetime.max
        elif use_tz:
            exp = datetime.utcfromtimestamp(time.time() + timeout)
        else:
            exp = datetime.fromtimestamp(time.time() + timeout)
        exp = exp.replace(microsecond=0)
        if num > self._max_entries:
            self._cull(db, cursor, now)
        pickled = pickle.dumps(value, pickle.HIGHEST_PROTOCOL)
        b64encoded = base64.b64encode(pickled)
        # The DB column is expecting a string, so make sure the value is a
        # string, not bytes. Refs #19274.
        if six.PY3:
            b64encoded = b64encoded.decode('latin1')
        try:
            with transaction.atomic(using=db):
                cursor.execute("SELECT cache_key, expires FROM %s "
                               "WHERE cache_key = %%s" % table, [key])
                result = cursor.fetchone()
                exp = connections[db].ops.value_to_db_datetime(exp)
                if result and (mode == 'set' or (mode == 'add' and result[1] < now)):
                    cursor.execute("UPDATE %s SET value = %%s, expires = %%s "
                                   "WHERE cache_key = %%s" % table,
                                   [b64encoded, exp, key])
                else:
                    cursor.execute("INSERT INTO %s (cache_key, value, expires) "
                                   "VALUES (%%s, %%s, %%s)" % table,
                                   [key, b64encoded, exp])
        except DatabaseError:
            # To be threadsafe, updates/inserts are allowed to fail silently
            return False
        else:
            return True
示例#41
0
def _cache_store_items(cache, key, items, expiration, compress_large_data):
    """Store items in the cache.

    The items will be individually pickled and combined into a binary blob,
    which can then optionally be compressed. The resulting data is then
    cached over one or more keys, each representing a chunk about 1MB in size.

    A main cache key will be set that contains information on the other keys.
    """
    # Note that we want to use pickle protocol 0 in order to be compatible
    # across both Python 2 and 3. On Python 2, 0 is the default.
    results = ((pickle.dumps(item, protocol=0), True, item) for item in items)

    if compress_large_data:
        results = _cache_compress_pickled_data(results)

    for item in _cache_store_chunks(results, key, expiration):
        yield item
示例#42
0
def _cache_store_items(cache, key, items, expiration, compress_large_data):
    """Store items in the cache.

    The items will be individually pickled and combined into a binary blob,
    which can then optionally be compressed. The resulting data is then
    cached over one or more keys, each representing a chunk about 1MB in size.

    A main cache key will be set that contains information on the other keys.
    """
    results = (
        (pickle.dumps(item), True, item)
        for item in items
    )

    if compress_large_data:
        results = _cache_compress_pickled_data(results)

    for item in _cache_store_chunks(results, key, expiration):
        yield item
示例#43
0
    def test_cache_memoize_large_files_uncompressed_off_by_one(self):
        """Testing cache_memoize with large files without compression and
        one byte larger than an even chunk size."""
        cache_key = 'abc123'

        # This takes into account the size of the pickle data, and will
        # get us to just barely 3 chunks of data in cache.
        data = self._build_test_chunk_data(num_chunks=2)[0] + 'x'
        pickled_data = pickle.dumps(data)

        def cache_func():
            return data

        self.spy_on(cache_func, call_original=True)

        result = cache_memoize(cache_key, cache_func, large_data=True,
                               compress_large_data=False)
        self.assertEqual(result, data)
        self.assertTrue(cache_func.spy.called)

        cache_key_0 = make_cache_key('%s-0' % cache_key)
        cache_key_1 = make_cache_key('%s-1' % cache_key)
        cache_key_2 = make_cache_key('%s-2' % cache_key)

        self.assertTrue(make_cache_key(cache_key) in cache)
        self.assertTrue(cache_key_0 in cache)
        self.assertTrue(cache_key_1 in cache)
        self.assertTrue(cache_key_2 in cache)
        self.assertFalse(make_cache_key('%s-3' % cache_key) in cache)

        # Verify the contents of the stored data.
        stored_data = b''.join(cache.get(cache_key_0) +
                               cache.get(cache_key_1) +
                               cache.get(cache_key_2))
        self.assertEqual(stored_data, pickled_data)

        # Try fetching the data we stored.
        cache_func.spy.reset_calls()

        result = cache_memoize(cache_key, cache_func, large_data=True,
                               compress_large_data=False)
        self.assertEqual(result, data)
        self.assertFalse(cache_func.spy.called)
    def submit(self, client, job):
        # Log tasks to DB, before submitting the batch, as mcpclient then updates them
        Task.bulk_log(self.tasks, job)

        data = {"tasks": {}}
        for task in self.tasks:
            task_uuid = six.text_type(task.uuid)
            data["tasks"][task_uuid] = self.serialize_task(task)

        pickled_data = cPickle.dumps(data)

        self.pending = client.submit_job(
            task=six.binary_type(job.name),
            data=pickled_data,
            unique=six.binary_type(self.uuid),
            wait_until_complete=False,
            background=False,
            max_retries=0,
        )
        logger.debug("Submitted gearman job %s (%s)", self.uuid, job.name)
示例#45
0
def _cache_store_items(cache, key, items, expiration, compress_large_data):
    """Store items in the cache.

    The items will be individually pickled and combined into a binary blob,
    which can then optionally be compressed. The resulting data is then
    cached over one or more keys, each representing a chunk about 1MB in size.

    A main cache key will be set that contains information on the other keys.
    """
    # Note that we want to use pickle protocol 0 in order to be compatible
    # across both Python 2 and 3. On Python 2, 0 is the default.
    results = (
        (pickle.dumps(item, protocol=0), True, item)
        for item in items
    )

    if compress_large_data:
        results = _cache_compress_pickled_data(results)

    for item in _cache_store_chunks(results, key, expiration):
        yield item
示例#46
0
文件: db.py 项目: MarcusT0/django
    def _base_set(self, mode, key, value, timeout=None):
        if timeout is None:
            timeout = self.default_timeout
        db = router.db_for_write(self.cache_model_class)
        table = connections[db].ops.quote_name(self._table)
        cursor = connections[db].cursor()

        cursor.execute("SELECT COUNT(*) FROM %s" % table)
        num = cursor.fetchone()[0]
        now = timezone.now()
        now = now.replace(microsecond=0)
        if settings.USE_TZ:
            exp = datetime.utcfromtimestamp(time.time() + timeout)
        else:
            exp = datetime.fromtimestamp(time.time() + timeout)
        exp = exp.replace(microsecond=0)
        if num > self._max_entries:
            self._cull(db, cursor, now)
        pickled = pickle.dumps(value, pickle.HIGHEST_PROTOCOL)
        encoded = base64.b64encode(pickled).strip()
        cursor.execute("SELECT cache_key, expires FROM %s "
                       "WHERE cache_key = %%s" % table, [key])
        try:
            result = cursor.fetchone()
            if result and (mode == 'set' or
                    (mode == 'add' and result[1] < now)):
                cursor.execute("UPDATE %s SET value = %%s, expires = %%s "
                               "WHERE cache_key = %%s" % table,
                               [encoded, connections[db].ops.value_to_db_datetime(exp), key])
            else:
                cursor.execute("INSERT INTO %s (cache_key, value, expires) "
                               "VALUES (%%s, %%s, %%s)" % table,
                               [key, encoded, connections[db].ops.value_to_db_datetime(exp)])
        except DatabaseError:
            # To be threadsafe, updates/inserts are allowed to fail silently
            transaction.rollback_unless_managed(using=db)
            return False
        else:
            transaction.commit_unless_managed(using=db)
            return True
示例#47
0
 def dumps(self, obj):
     return pickle.dumps(obj, pickle.HIGHEST_PROTOCOL)
 def _dump(self, value):
     return pickle.dumps(value)
 def _dump(self, value):
     return self._fernet.encrypt(pickle.dumps(value))
示例#50
0
文件: base.py 项目: zoori/django
 def encode(self, session_dict):
     "Returns the given session dictionary pickled and encoded as a string."
     pickled = pickle.dumps(session_dict, pickle.HIGHEST_PROTOCOL)
     hash = self._hash(pickled)
     return base64.b64encode(hash.encode() + b":" + pickled).decode('ascii')
示例#51
0
"Thread-safe in-memory cache backend."
示例#52
0
    def _value_for_db_collection(self, value, field, field_kind, db_type,
                                 lookup):
        """
        Recursively converts values from AbstractIterableFields.

        Note that collection lookup values are plain values rather than
        lists, sets or dicts, but they still should be converted as a
        collection item (assuming all items or values are converted in
        the same way).

        We base the conversion on field class / kind and assume some
        knowledge about field internals (e.g. that the field has an
        "item_field" property that gives the right subfield for any of
        its values), to avoid adding a framework for determination of
        parameters for items' conversions; we do the conversion here
        rather than inside get_db_prep_save/lookup for symmetry with
        deconversion (which can't be in to_python because the method is
        also used for data not coming from the database).

        Returns a list, set, dict, string or bytes according to the
        db_type given.
        If the "list" db_type used for DictField, a list with keys and
        values interleaved will be returned (list of pairs is not good,
        because lists / tuples may need conversion themselves; the list
        may still be nested for dicts containing collections).
        The "string" and "bytes" db_types use serialization with pickle
        protocol 0 or 2 respectively.
        If an unknown db_type is specified, returns a generator
        yielding converted elements / pairs with converted values.
        """
        subfield, subkind, db_subtype = self._convert_as(
            field.item_field, lookup)

        # Do convert filter parameters.
        if lookup:
            # Special case where we are looking for an empty list
            if lookup == 'exact' and db_type == 'list' and value == u'[]':
                return []
            value = self._value_for_db(value, subfield, subkind, db_subtype,
                                       lookup)

        # Convert list/set items or dict values.
        else:
            if field_kind == 'DictField':

                # Generator yielding pairs with converted values.
                value = ((key,
                          self._value_for_db(subvalue, subfield, subkind,
                                             db_subtype, lookup))
                         for key, subvalue in six.iteritems(value))

                # Return just a dict, a once-flattened list;
                if db_type == 'dict':
                    return dict(value)
                elif db_type == 'list':
                    return list(item for pair in value for item in pair)

            else:

                # Generator producing converted items.
                value = (self._value_for_db(subvalue, subfield, subkind,
                                            db_subtype, lookup)
                         for subvalue in value)

                # "list" may be used for SetField.
                if db_type in 'list':
                    return list(value)
                elif db_type == 'set':
                    # assert field_kind != 'ListField'
                    return set(value)

            # Pickled formats may be used for all collection fields,
            # the fields "natural" type is serialized (something
            # concrete is needed, pickle can't handle generators :-)
            if db_type == 'bytes':
                return pickle.dumps(field._type(value), protocol=2)
            elif db_type == 'string':
                return pickle.dumps(field._type(value))

        # If nothing matched, pass the generator to the back-end.
        return value
示例#53
0
        num = cursor.fetchone()[0]
        now = timezone.now()
        now = now.replace(microsecond=0)

        if settings.USE_TZ: #use timezone
            exp = datetime.utcfromtimestamp(time.time() + timeout) 
        else:
            exp = datetime.fromtimestamp(time.time() + timeout)

        exp = exp.replace(microsecond=0)

        最大条目
        if num > self._max_entries:
            self._cull(db, cursor, now)

        pickled = pickle.dumps(value, pickle.HIGHEST_PROTOCOL)
        b64encoded = base64.b64encode(pickled)
        # The DB column is expecting a string, so make sure the value is a
        # string, not bytes. Refs #19274.
        if six.PY3:
            b64encoded = b64encoded.decode('latin1')
        cursor.execute("SELECT cache_key, expires FROM %s "
                       "WHERE cache_key = %%s" % table, [key])

        try:
            result = cursor.fetchone()
            if result and (mode == 'set' or
                    (mode == 'add' and result[1] < now)): 更新 result[1] < now 表示超时
                cursor.execute("UPDATE %s SET value = %%s, expires = %%s "
                               "WHERE cache_key = %%s" % table,
                               [b64encoded, connections[db].ops.value_to_db_datetime(exp), key])
示例#54
0
 def memo(*args, **kwargs):
     haxh = cPickle.dumps((args, sorted(kwargs.items())))
     if haxh not in memory:
         memory[haxh] = fctn(*args, **kwargs)
     return memory[haxh]
示例#55
0
 def dumps(self, obj):
     return pickle.dumps(obj, pickle.HIGHEST_PROTOCOL)