Exemple #1
0
    def post_process(self, paths, dry_run=False, **options):
        """
        Post process the given list of files (called from collectstatic).
        """
        processed_files = []
        # don't even dare to process the files if we're in dry run mode
        if dry_run:
            return processed_files

        # delete cache of all handled paths
        self.cache.delete_many([self.cache_key(path) for path in paths])

        # only try processing the files we have patterns for
        matches = lambda path: matches_patterns(path, self._patterns.keys())
        processing_paths = [path for path in paths if matches(path)]

        # then sort the files by the directory level
        path_level = lambda name: len(name.split(os.sep))
        for name in sorted(paths, key=path_level, reverse=True):

            # first get a hashed name for the given file
            hashed_name = self.hashed_name(name)

            original_file = self.open(name)
            try:
                # then get the original's file content
                content = original_file.read()

                # to apply each replacement pattern on the content
                if name in processing_paths:
                    converter = self.url_converter(name)
                    for patterns in self._patterns.values():
                        for pattern in patterns:
                            content = pattern.sub(converter, content)

                # then save the processed result
                if self.exists(hashed_name):
                    self.delete(hashed_name)

                saved_name = self._save(hashed_name, ContentFile(content))
                hashed_name = force_unicode(saved_name.replace('\\', '/'))
                processed_files.append(hashed_name)

                # and then set the cache accordingly
                self.cache.set(self.cache_key(name), hashed_name)
            finally:
                original_file.close()

        return processed_files
Exemple #2
0
    def post_process(self, paths, dry_run=False, **options):
        """
        Post process the given list of files (called from collectstatic).
        """
        processed_files = []
        # don't even dare to process the files if we're in dry run mode
        if dry_run:
            return processed_files

        # delete cache of all handled paths
        self.cache.delete_many([self.cache_key(path) for path in paths])

        # only try processing the files we have patterns for
        matches = lambda path: matches_patterns(path, self._patterns.keys())
        processing_paths = [path for path in paths if matches(path)]

        # then sort the files by the directory level
        path_level = lambda name: len(name.split(os.sep))
        for name in sorted(paths, key=path_level, reverse=True):

            # first get a hashed name for the given file
            hashed_name = self.hashed_name(name)

            original_file = self.open(name)
            try:
                # then get the original's file content
                content = original_file.read()

                # to apply each replacement pattern on the content
                if name in processing_paths:
                    converter = self.url_converter(name)
                    for patterns in self._patterns.values():
                        for pattern in patterns:
                            content = pattern.sub(converter, content)

                # then save the processed result
                if self.exists(hashed_name):
                    self.delete(hashed_name)

                saved_name = self._save(hashed_name, ContentFile(content))
                hashed_name = force_unicode(saved_name.replace('\\', '/'))
                processed_files.append(hashed_name)

                # and then set the cache accordingly
                self.cache.set(self.cache_key(name), hashed_name)
            finally:
                original_file.close()

        return processed_files
Exemple #3
0
    def post_process(self, paths, dry_run=False, fail_silently=False):
        """
        Post process the given list of files (called from collectstatic).

        Processing is actually two separate operations:

        1. renaming files to include a hash of their content for cache-busting,
           and copying those files to the target storage.
        2. adjusting files which contain references to other files so they
           refer to the cache-busting filenames.

        If either of these are performed on a file, then that file is considered
        post-processed.
        """
        # don't even dare to process the files if we're in dry run mode
        if dry_run:
            return

        # where to store the new paths
        hashed_paths = {}

        # build a list of adjustable files
        matches = lambda path: matches_patterns(path, self._patterns.keys())
        adjustable_paths = [path for path in paths if matches(path)]

        # then sort the files by the directory level
        path_level = lambda name: len(name.split(os.sep))
        for name in sorted(paths.keys(), key=path_level, reverse=True):

            # use the original, local file, not the copied-but-unprocessed
            # file, which might be somewhere far away, like S3
            storage, path = paths[name]
            with storage.open(path) as original_file:

                # generate the hash with the original content, even for
                # adjustable files.
                hashed_name = self.hashed_name(name, original_file)

                # then get the original's file content..
                if hasattr(original_file, 'seek'):
                    original_file.seek(0)

                hashed_file_exists = self.exists(hashed_name)
                processed = False

                # ..to apply each replacement pattern to the content
                if name in adjustable_paths:
                    content = original_file.read()
                    converter = self.url_converter(name, fail_silently)
                    for patterns in self._patterns.values():
                        for pattern in patterns:
                            content = pattern.sub(converter, content)
                    if hashed_file_exists:
                        self.delete(hashed_name)
                    # then save the processed result
                    content_file = ContentFile(smart_str(content))
                    saved_name = self._save(hashed_name, content_file)
                    hashed_name = force_unicode(saved_name.replace('\\', '/'))
                    processed = True
                else:
                    # or handle the case in which neither processing nor
                    # a change to the original file happened
                    if not hashed_file_exists:
                        processed = True
                        saved_name = self._save(hashed_name, original_file)
                        hashed_name = force_unicode(saved_name.replace('\\', '/'))

                # and then set the cache accordingly
                hashed_paths[self.cache_key(name)] = hashed_name
                yield name, hashed_name, processed

        self.cache.set_many(hashed_paths)
    def post_process(self, paths, dry_run=False, **options):
        """
        Post process the given list of files (called from collectstatic).

        Processing is actually two separate operations:

        1. renaming files to include a hash of their content for cache-busting,
           and copying those files to the target storage.
        2. adjusting files which contain references to other files so they
           refer to the cache-busting filenames.

        If either of these are performed on a file, then that file is considered
        post-processed.
        """
        # don't even dare to process the files if we're in dry run mode
        if dry_run:
            return

        # delete cache of all handled paths
        self.cache.delete_many([self.cache_key(path) for path in paths])

        # build a list of adjustable files
        matches = lambda path: matches_patterns(path, self._patterns.keys())
        adjustable_paths = [path for path in paths if matches(path)]

        # then sort the files by the directory level
        path_level = lambda name: len(name.split(os.sep))
        for name in sorted(paths.keys(), key=path_level, reverse=True):

            # use the original, local file, not the copied-but-unprocessed
            # file, which might be somewhere far away, like S3
            storage, path = paths[name]
            with storage.open(path) as original_file:

                # generate the hash with the original content, even for
                # adjustable files.
                hashed_name = self.hashed_name(name, original_file)

                # then get the original's file content..
                if hasattr(original_file, 'seek'):
                    original_file.seek(0)

                hashed_file_exists = self.exists(hashed_name)
                processed = False

                # ..to apply each replacement pattern to the content
                if name in adjustable_paths:
                    content = original_file.read()
                    converter = self.url_converter(name)
                    for patterns in self._patterns.values():
                        for pattern in patterns:
                            content = pattern.sub(converter, content)
                    if hashed_file_exists:
                        self.delete(hashed_name)
                    # then save the processed result
                    content_file = ContentFile(smart_str(content))
                    saved_name = self._save(hashed_name, content_file)
                    hashed_name = force_unicode(saved_name.replace('\\', '/'))
                    processed = True
                else:
                    # or handle the case in which neither processing nor
                    # a change to the original file happened
                    if not hashed_file_exists:
                        processed = True
                        saved_name = self._save(hashed_name, original_file)
                        hashed_name = force_unicode(
                            saved_name.replace('\\', '/'))

                # and then set the cache accordingly
                self.cache.set(self.cache_key(name), hashed_name)
                yield name, hashed_name, processed