def test_VersatileImageField_picklability(self):
     """
     Ensures VersatileImageField instances can be pickled/unpickled.
     """
     cPickle.dump(
         self.jpg,
         open("pickletest.p", "wb")
     )
     jpg_unpickled = cPickle.load(
         open("pickletest.p", "rb")
     )
     jpg_instance = jpg_unpickled
     self.assertEqual(
         jpg_instance.image.thumbnail['100x100'].url,
         '/media/__sized__/python-logo-thumbnail-100x100.jpg'
     )
     pickled_state = self.jpg.image.__getstate__()
     self.assertEqual(
         pickled_state,
         {
             '_create_on_demand': False,
             '_committed': True,
             '_file': None,
             'name': 'python-logo.jpg',
             'closed': False
         }
     )
Example #2
0
    def get(self, key, default=None, version=None):
        key = self.make_key(key, version=version)
        self.validate_key(key)

        fname = self._key_to_file(key)
        try:
            with open(fname, 'rb') as f:
                exp = pickle.load(f)
                now = time.time()
                if exp < now:
                    self._delete(fname)
                else:
                    return pickle.load(f)
        except (IOError, OSError, EOFError, pickle.PickleError):
            pass
        return default
Example #3
0
    def get(self, key, default=None, version=None):
        key = self.make_key(key, version=version)
        self.validate_key(key)

        fname = self._key_to_file(key)
        try:
            with open(fname, 'rb') as f:
                exp = pickle.load(f)
                now = time.time()
                if exp is not None and exp < now:
                    self._delete(fname)
                else:
                    return pickle.load(f)
        except (IOError, OSError, EOFError, pickle.PickleError):
            pass
        return default
 def test_VersatileImageField_picklability(self):
     """
     Ensure VersatileImageField instances can be pickled/unpickled.
     """
     cPickle.dump(
         self.jpg,
         open("pickletest.p", "wb")
     )
     jpg_unpickled = cPickle.load(
         open("pickletest.p", "rb")
     )
     jpg_instance = jpg_unpickled
     self.assertEqual(
         jpg_instance.image.thumbnail['100x100'].url,
         '/media/__sized__/python-logo-thumbnail-100x100-70.jpg'
     )
     pickled_state = self.jpg.image.__getstate__()
     self.assertEqual(
         pickled_state,
         {
             '_create_on_demand': False,
             '_committed': True,
             '_file': None,
             'name': 'python-logo.jpg',
             'closed': False
         }
     )
Example #5
0
 def _is_expired(self, f):
     """
     Takes an open cache file and determines if it has expired,
     deletes the file if it is has passed its expiry time.
     """
     exp = pickle.load(f)
     if exp is not None and exp < time.time():
         f.close()  # On Windows a file has to be closed before deleting
         self._delete(f.name)
         return True
     return False
Example #6
0
 def _is_expired(self, f):
     """
     Takes an open cache file and determines if it has expired,
     deletes the file if it is has passed its expiry time.
     """
     exp = pickle.load(f)
     if exp is not None and exp < time.time():
         f.close()  # On Windows a file has to be closed before deleting
         self._delete(f.name)
         return True
     return False
Example #7
0
def _cache_fetch_large_data(cache, key, compress_large_data):
    """Fetch one or more large data items from the cache.

    The main cache key indicating the number of chunks will be read, followed
    by each of the chunks. If any chunks are missing, a MissingChunkError
    will be immediately returned.

    The data is then combined and optionally uncompressed. The unpickled
    results are then yielded to the caller on-demand.
    """
    chunk_count = int(cache.get(make_cache_key(key)))

    chunk_keys = [
        make_cache_key('%s-%d' % (key, i))
        for i in range(chunk_count)
    ]
    chunks = cache.get_many(chunk_keys)

    # Check that we have all the keys we expect, before we begin generating
    # values. We don't want to waste effort loading anything, and we want to
    # pass an error about missing keys to the caller up-front before we
    # generate anything.
    if len(chunks) != chunk_count:
        missing_keys = sorted(set(chunk_keys) - set(chunks.keys()))
        logging.debug('Cache miss for key(s): %s.' % ', '.join(missing_keys))

        raise MissingChunkError

    # Process all the chunks and decompress them at once, instead of streaming
    # the results. It's faster for any reasonably-sized data in cache. We'll
    # stream depickles instead.
    data = b''.join(
        chunks[chunk_key][0]
        for chunk_key in chunk_keys
    )

    if compress_large_data:
        data = zlib.decompress(data)

    fp = StringIO(data)

    try:
        # Unpickle all the items we're expecting from the cached data.
        #
        # There will only be one item in the case of old-style cache data.
        while True:
            try:
                yield pickle.load(fp)
            except EOFError:
                return
    except Exception as e:
        logging.warning('Unpickle error for cache key "%s": %s.' % (key, e))
        raise
Example #8
0
    def get(self, key, default=None, version=None):
        key = self.make_key(key, version=version)
        self.validate_key(key)

        fname = self._key_to_file(key)
        try:
            with open(fname, 'rb') as f: 文件对象
                exp = pickle.load(f) 获取过期时间
                now = time.time()

                if exp < now: 已经过期
                    self._delete(fname)

                else:
Example #9
0
 def has_key(self, key, version=None):
     key = self.make_key(key, version=version)
     self.validate_key(key)
     fname = self._key_to_file(key)
     try:
         with open(fname, 'rb') as f:
             exp = pickle.load(f)
         now = time.time()
         if exp < now:
             self._delete(fname)
             return False
         else:
             return True
     except (IOError, OSError, EOFError, pickle.PickleError):
         return False
Example #10
0
 def has_key(self, key, version=None):
     key = self.make_key(key, version=version)
     self.validate_key(key)
     fname = self._key_to_file(key)
     try:
         with open(fname, 'rb') as f:
             exp = pickle.load(f)
         now = time.time()
         if exp < now:
             self._delete(fname)
             return False
         else:
             return True
     except (IOError, OSError, EOFError, pickle.PickleError):
         return False
    def test_createmap(self):
        # save old value
        old_map = None
        map_set = hasattr(settings, 'COMPUTEDFIELDS_MAP')
        if map_set:
            old_map = settings.COMPUTEDFIELDS_MAP

        # should not fail
        settings.COMPUTEDFIELDS_MAP = os.path.join(settings.BASE_DIR,
                                                   'map.test')
        call_command('createmap', verbosity=0)
        with open(os.path.join(settings.BASE_DIR, 'map.test'), 'rb') as f:
            map = pickle.load(f)
            self.assertDictEqual(map, ComputedFieldsModelType._map)
        os.remove(os.path.join(settings.BASE_DIR, 'map.test'))

        # restore old  value
        if map_set:
            settings.COMPUTEDFIELDS_MAP = old_map
Example #12
0
def _cache_iter_large_data(data, key):
    """Iterate through large data that was fetched from the cache.

    This will unpickle the large data previously fetched through
    _cache_fetch_large_data, and yield each object to the caller.
    """
    fp = StringIO(data)

    try:
        # Unpickle all the items we're expecting from the cached data.
        #
        # There will only be one item in the case of old-style cache data.
        while True:
            try:
                yield pickle.load(fp)
            except EOFError:
                return
    except Exception as e:
        logging.warning('Unpickle error for cache key "%s": %s.' % (key, e))
        raise
Example #13
0
def _cache_iter_large_data(data, key):
    """Iterate through large data that was fetched from the cache.

    This will unpickle the large data previously fetched through
    _cache_fetch_large_data, and yield each object to the caller.
    """
    fp = io.BytesIO(data)

    try:
        # Unpickle all the items we're expecting from the cached data.
        #
        # There will only be one item in the case of old-style cache data.
        while True:
            try:
                yield pickle.load(fp)
            except EOFError:
                return
    except Exception as e:
        logger.warning('Unpickle error for cache key "%s": %s.' % (key, e))
        raise
    def _resolve_dependencies(mcs, force=False, _force=False):
        """
        This method triggers all the ugly stuff.
        Without providing a map file the calculations are done
        once per process by ``app.ready``. The steps are:
            - create a graph of the dependencies
            - cycling check
            - remove redundant paths
            - create final resolver lookup map

        Since these steps are very expensive, you should consider
        using a map file for production mode. This method will
        transparently load the map file omitting the graph and map
        creation upon every process creation.

        NOTE: The test cases rely on runtime overrides of the
        computed model fields dependencies and therefore override the
        "once per process" rule with ``_force``. Dont use this
        for your regular model development. If you really need to
        force the recreation of the graph and map, use ``force`` instead.
        Never do this at runtime in a multithreaded environment or hell
        will break loose. You have been warned ;)
        """
        with mcs._lock:
            if mcs._map_loaded and not _force:
                return
            if (getattr(settings, 'COMPUTEDFIELDS_MAP', False)
                    and not force and not _force):
                from django.utils.six.moves import cPickle as pickle
                with open(settings.COMPUTEDFIELDS_MAP, 'rb') as f:
                    mcs._map = pickle.load(f)
                    mcs._map_loaded = True
                return
            mcs._graph = ComputedModelsGraph(mcs._computed_models)
            if not getattr(settings, 'COMPUTEDFIELDS_ALLOW_RECURSION', False):
                mcs._graph.remove_redundant()
            mcs._map = ComputedFieldsModelType._graph.generate_lookup_map()
            mcs._map_loaded = True
Example #15
0
    def get(self, key, default=None, version=None):
        key = self.make_key(key, version=version)
        self.validate_key(key)

        fname = self._key_to_file(key)
        try:
            with open(fname, 'rb') as f: 文件对象
                exp = pickle.load(f) 获取过期时间
                now = time.time()

                if exp < now: 已经过期
                    self._delete(fname)

                else:
                    return pickle.load(f)

        except (IOError, OSError, EOFError, pickle.PickleError):
            pass
        return default

    def set(self, key, value, timeout=None, version=None):
        key = self.make_key(key, version=version)
        self.validate_key(key)

        fname = self._key_to_file(key)
        dirname = os.path.dirname(fname)

        if timeout is None:
            timeout = self.default_timeout
"File-based cache backend"