def query_url_mapping(self, filepath): """Searches the environment-wide url mapping (based on the urls assigned to each directory in the load path). Returns the correct url for ``filepath``. Subclasses should be sure that they really want to call this method, instead of simply falling back to ``super()``. """ # Build a list of dir -> url mappings mapping = list(self.env.url_mapping.items()) try: mapping.append((self.env.directory, self.env.url)) except EnvironmentError: # Rarely, directory/url may not be set. That's ok. pass # Make sure paths are absolute, normalized, and sorted by length mapping = list(map( lambda p_u: (path.normpath(path.abspath(p_u[0])), p_u[1]), mapping)) mapping.sort(key=lambda i: len(i[0]), reverse=True) needle = path.normpath(filepath) for candidate, url in mapping: if needle.startswith(candidate): # Found it! rel_path = filepath[len(candidate)+1:] return url_prefix_join(url, rel_path) raise ValueError('Cannot determine url for %s' % filepath)
def query_url_mapping(self, filepath): """Searches the environment-wide url mapping (based on the urls assigned to each directory in the load path). Returns the correct url for ``filepath``. Subclasses should be sure that they really want to call this method, instead of simply falling back to ``super()``. """ # Build a list of dir -> url mappings mapping = list(self.env.url_mapping.items()) try: mapping.append((self.env.directory, self.env.url)) except EnvironmentError: # Rarely, directory/url may not be set. That's ok. pass # Make sure paths are absolute, normalized, and sorted by length mapping = list( map(lambda p_u: (path.normpath(path.abspath(p_u[0])), p_u[1]), mapping)) mapping.sort(key=lambda i: len(i[0]), reverse=True) needle = path.normpath(filepath) for candidate, url in mapping: if needle.startswith(candidate): # Found it! rel_path = needle[len(candidate) + 1:] return url_prefix_join(url, rel_path) raise ValueError('Cannot determine url for %s' % filepath)
def smartsplit(string, sep): """Split while allowing escaping. So far, this seems to do what I expect - split at the separator, allow escaping via \, and allow the backslash itself to be escaped. One problem is that it can raise a ValueError when given a backslash without a character to escape. I'd really like a smart splitter without manually scan the string. But maybe that is exactly what should be done. """ assert string is not None # or shlex will read from stdin if not six.PY3: # On 2.6, shlex fails miserably with unicode input is_unicode = isinstance(string, unicode) if is_unicode: string = string.encode('utf8') l = shlex.shlex(string, posix=True) l.whitespace += ',' l.whitespace_split = True l.quotes = '' if not six.PY3 and is_unicode: return map(lambda s: s.decode('utf8'), list(l)) else: return list(l)
def resolve_contents(self, ctx=None, force=False): """Return an actual list of source files. What the user specifies as the bundle contents cannot be processed directly. There may be glob patterns of course. We may need to search the load path. It's common for third party extensions to provide support for referencing assets spread across multiple directories. This passes everything through :class:`Environment.resolver`, through which this process can be customized. At this point, we also validate source paths to complain about missing files early. The return value is a list of 2-tuples ``(original_item, abspath)``. In the case of urls and nested bundles both tuple values are the same. Set ``force`` to ignore any cache, and always re-resolve glob patterns. """ if not ctx: ctx = wrap(self.env, self) # TODO: We cache the values, which in theory is problematic, since # due to changes in the env object, the result of the globbing may # change. Not to mention that a different env object may be passed # in. We should find a fix for this. if getattr(self, '_resolved_contents', None) is None or force: resolved = [] for item in self.contents: try: result = ctx.resolver.resolve_source(ctx, item) except IOError as e: raise BundleError(e) if not isinstance(result, list): result = [result] # Exclude the output file. # TODO: This will not work for nested bundle contents. If it # doesn't work properly anyway, should be do it in the first # place? If there are multiple versions, it will fail as well. # TODO: There is also the question whether we can/should # exclude glob duplicates. if self.output: try: result.remove(self.resolve_output(ctx)) except (ValueError, BundleError): pass resolved.extend(map(lambda r: (item, r), result)) # A hook for sorting the bundle contents in a certain order if self.sort: resolved = self.sort(ctx, resolved) self._resolved_contents = resolved return self._resolved_contents
def query_url_mapping(self, ctx, filepath): """Searches the environment-wide url mapping (based on the urls assigned to each directory in the load path). Returns the correct url for ``filepath``. Subclasses should be sure that they really want to call this method, instead of simply falling back to ``super()``. """ # Build a list of dir -> url mappings mapping = list(ctx.url_mapping.items()) try: mapping.append((ctx.directory, ctx.url)) except EnvironmentError: # Rarely, directory/url may not be set. That's ok. pass # Make sure paths are absolute, normalized, and sorted by length mapping = list(map(lambda p_u: (path.normpath(path.abspath(p_u[0])), p_u[1]), mapping)) mapping.sort(key=lambda i: len(i[0]), reverse=True) needle = path.normpath(filepath) for candidate, url in mapping: if needle.startswith(candidate): # Found it! rel_path = needle[len(candidate) + 1 :] # If there are any subdirs in rel_path, ensure # they use HTML-style path separators, in case # the local OS (Windows!) has a different scheme rel_path = rel_path.replace(os.sep, "/") return url_prefix_join(url, rel_path) raise ValueError("Cannot determine url for %s" % filepath)
def resolve_contents(self, ctx=None, force=False): """Return an actual list of source files. What the user specifies as the bundle contents cannot be processed directly. There may be glob patterns of course. We may need to search the load path. It's common for third party extensions to provide support for referencing assets spread across multiple directories. This passes everything through :class:`Environment.resolver`, through which this process can be customized. At this point, we also validate source paths to complain about missing files early. The return value is a list of 2-tuples ``(original_item, abspath)``. In the case of urls and nested bundles both tuple values are the same. Set ``force`` to ignore any cache, and always re-resolve glob patterns. """ if not ctx: ctx = wrap(self.env, self) # TODO: We cache the values, which in theory is problematic, since # due to changes in the env object, the result of the globbing may # change. Not to mention that a different env object may be passed # in. We should find a fix for this. if getattr(self, '_resolved_contents', None) is None or force: resolved = [] for item in self.contents: try: result = ctx.resolver.resolve_source(ctx, item) except IOError as e: raise BundleError(e) if not isinstance(result, list): result = [result] # Exclude the output file. # TODO: This will not work for nested bundle contents. If it # doesn't work properly anyway, should be do it in the first # place? If there are multiple versions, it will fail as well. # TODO: There is also the question whether we can/should # exclude glob duplicates. if self.output and self.merge: try: result.remove(self.resolve_output(ctx)) except (ValueError, BundleError): pass resolved.extend(map(lambda r: (item, r), result)) self._resolved_contents = resolved return self._resolved_contents
def _set_filters(self, value): """Filters may be specified in a variety of different ways, including by giving their name; we need to make sure we resolve everything to an actual filter instance. """ if value is None: self._filters = () return if isinstance(value, six.string_types): # 333: Simplify w/o condition? if six.PY3: filters = map(str.strip, value.split(',')) else: filters = map(unicode.strip, unicode(value).split(',')) elif isinstance(value, (list, tuple)): filters = value else: filters = [value] self._filters = [get_filter(f) for f in filters]
def check_timestamps(self, bundle, ctx, o_modified=None): from .bundle import Bundle from webassets.version import TimestampVersion if not o_modified: try: resolved_output = bundle.resolve_output(ctx) except BundleError: # This exception will occur when the bundle output has # placeholder, but a version cannot be found. If the # user has defined a manifest, this will just be the first # build. Return True to let it happen. # However, if no manifest is defined, raise an error, # because otherwise, this updater would always return True, # and thus not do its job at all. if ctx.manifest is None: raise BuildError( ('%s uses a version placeholder, and you are ' 'using "%s" versions. To use automatic ' 'building in this configuration, you need to ' 'define a manifest.' % (bundle, ctx.versions))) return True try: o_modified = TimestampVersion.get_timestamp(resolved_output) except OSError: # If the output file does not exist, we'll have to rebuild return True # Recurse through the bundle hierarchy. Check the timestamp of all # the bundle source files, as well as any additional # dependencies that we are supposed to watch. from webassets.bundle import wrap for iterator, result in ( (lambda e: map(lambda s: s[1], bundle.resolve_contents(e)), True), (bundle.resolve_depends, SKIP_CACHE)): for item in iterator(ctx): if isinstance(item, Bundle): nested_result = self.check_timestamps( item, wrap(ctx, item), o_modified) if nested_result: return nested_result elif not is_url(item): try: s_modified = TimestampVersion.get_timestamp(item) except OSError: # If a file goes missing, always require # a rebuild. return result else: if s_modified > o_modified: return result return False
def check_timestamps(self, bundle, ctx, o_modified=None): from .bundle import Bundle from webassets.version import TimestampVersion if not o_modified: try: resolved_output = bundle.resolve_output(ctx) except BundleError: # This exception will occur when the bundle output has # placeholder, but a version cannot be found. If the # user has defined a manifest, this will just be the first # build. Return True to let it happen. # However, if no manifest is defined, raise an error, # because otherwise, this updater would always return True, # and thus not do its job at all. if ctx.manifest is None: raise BuildError(( '%s uses a version placeholder, and you are ' 'using "%s" versions. To use automatic ' 'building in this configuration, you need to ' 'define a manifest.' % (bundle, ctx.versions))) return True try: o_modified = TimestampVersion.get_timestamp(resolved_output) except OSError: # If the output file does not exist, we'll have to rebuild return True # Recurse through the bundle hierarchy. Check the timestamp of all # the bundle source files, as well as any additional # dependencies that we are supposed to watch. from webassets.bundle import wrap for iterator, result in ( (lambda e: map(lambda s: s[1], bundle.resolve_contents(e)), True), (bundle.resolve_depends, SKIP_CACHE) ): for item in iterator(ctx): if isinstance(item, Bundle): nested_result = self.check_timestamps(item, wrap(ctx, item), o_modified) if nested_result: return nested_result elif not is_url(item): try: s_modified = TimestampVersion.get_timestamp(item) except OSError: # If a file goes missing, always require # a rebuild. return result else: if s_modified > o_modified: return result return False
def create_files(self, files): """Helper that allows to quickly create a bunch of files in the media directory of the current test run. """ # Allow passing a list of filenames to create empty files if not hasattr(files, 'items'): files = dict(map(lambda n: (n, ''), files)) for name, data in files.items(): dirs = path.dirname(self.path(name)) if not path.exists(dirs): os.makedirs(dirs) f = open(self.path(name), 'w') f.write(data) f.close()
def sequentize(string): """ Notate consecutive characters as sequence (1-4 instead of 1234) """ first, last, result = None, None, [] for char in map(ord, string): if last is None: first = last = char elif last + 1 == char: last = char else: result.append((first, last)) first = last = char if last is not None: result.append((first, last)) return ''.join([ '%s%s%s' % (chr(first), last > first + 1 and '-' or '', last != first and chr(last) or '') for first, last in result ])
def sequentize(string): """ Notate consecutive characters as sequence (1-4 instead of 1234) """ first, last, result = None, None, [] for char in map(ord, string): if last is None: first = last = char elif last + 1 == char: last = char else: result.append((first, last)) first = last = char if last is not None: result.append((first, last)) return ''.join(['%s%s%s' % ( chr(first), last > first + 1 and '-' or '', last != first and chr(last) or '' ) for first, last in result])
def load_builtin_filters(): from os import path import warnings # load modules to work based with and without pyinstaller # from: https://github.com/webcomics/dosage/blob/master/dosagelib/loader.py # see: https://github.com/pyinstaller/pyinstaller/issues/1905 # load modules using iter_modules() # (should find all filters in normal build, but not pyinstaller) prefix = __name__ + '.' module_names = [m[1] for m in pkgutil.iter_modules(__path__, prefix)] # special handling for PyInstaller importers = map(pkgutil.get_importer, __path__) toc = set() for i in importers: if hasattr(i, 'toc'): toc |= i.toc for elm in toc: if elm.startswith(prefix): module_names.append(elm) for module_name in module_names: #module_name = 'webassets.filter.%s' % name try: module = import_module(module_name) except Exception as e: warnings.warn('Error while loading builtin filter ' 'module \'%s\': %s' % (module_name, e)) else: for attr_name in dir(module): attr = getattr(module, attr_name) if inspect.isclass(attr) and issubclass(attr, Filter): if not attr.name: # Skip if filter has no name; those are # considered abstract base classes. continue register_filter(attr)
def _evaluate(self, args, kwargs, out, data=None): # For now, still support Python 2.5, but the format strings in argv # are not supported (making the feature mostly useless). For this # reason none of the builtin filters is using argv currently. if hasattr(str, 'format'): # Add 'self' to the keywords available in format strings kwargs = kwargs.copy() kwargs.update({'self': self}) # Resolve all the format strings in argv def replace(arg): try: return arg.format(*args, **kwargs) except KeyError as e: # Treat "output" and "input" variables special, they # are dealt with in :meth:`subprocess` instead. if e.args[0] not in ('input', 'output'): raise return arg argv = list(map(replace, self.argv)) else: argv = self.argv self.subprocess(argv, out, data=data)
def subprocess(cls, argv, out, data=None, cwd=None): """Execute the commandline given by the list in ``argv``. If a byestring is given via ``data``, it is piped into data. If ``cwd`` is not None, the process will be executed in that directory. ``argv`` may contain two placeholders: ``{input}`` If given, ``data`` will be written to a temporary file instead of data. The placeholder is then replaced with that file. ``{output}`` Will be replaced by a temporary filename. The return value then will be the content of this file, rather than stdout. """ class tempfile_on_demand(object): def __repr__(self): if not hasattr(self, 'filename'): fd, self.filename = tempfile.mkstemp() os.close(fd) return self.filename @property def created(self): return hasattr(self, 'filename') # Replace input and output placeholders input_file = tempfile_on_demand() output_file = tempfile_on_demand() if hasattr(str, 'format'): # Support Python 2.5 without the feature argv = list(map(lambda item: item.format(input=input_file, output=output_file), argv)) try: data = (data.read() if hasattr(data, 'read') else data) if data is not None: data = data.encode('utf-8') if input_file.created: if data is None: raise ValueError( '{input} placeholder given, but no data passed') with open(input_file.filename, 'wb') as f: f.write(data) # No longer pass to stdin data = None try: proc = subprocess.Popen( argv, # we cannot use the in/out streams directly, as they might be # StringIO objects (which are not supported by subprocess) stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE, cwd=cwd, shell=os.name == 'nt') except OSError: raise FilterError('Program file not found: %s.' % argv[0]) stdout, stderr = proc.communicate(data) if proc.returncode: raise FilterError( '%s: subprocess returned a non-success result code: ' '%s, stdout=%s, stderr=%s' % ( cls.name or cls.__name__, proc.returncode, stdout, stderr)) else: if output_file.created: with open(output_file.filename, 'rb') as f: out.write(f.read().decode('utf-8')) else: out.write(stdout.decode('utf-8')) finally: if output_file.created: os.unlink(output_file.filename) if input_file.created: os.unlink(input_file.filename)
def subprocess(cls, argv, out, data=None): """Execute the commandline given by the list in ``argv``. If a byestring is given via ``data``, it is piped into data. ``argv`` may contain two placeholders: ``{input}`` If given, ``data`` will be written to a temporary file instead of data. The placeholder is then replaced with that file. ``{output}`` Will be replaced by a temporary filename. The return value then will be the content of this file, rather than stdout. """ class tempfile_on_demand(object): def __repr__(self): if not hasattr(self, "filename"): self.fd, self.filename = tempfile.mkstemp() return self.filename @property def created(self): return hasattr(self, "filename") # Replace input and output placeholders input_file = tempfile_on_demand() output_file = tempfile_on_demand() if hasattr(str, "format"): # Support Python 2.5 without the feature argv = list(map(lambda item: item.format(input=input_file, output=output_file), argv)) try: if input_file.created: if not data: raise ValueError("{input} placeholder given, but no data passed") with os.fdopen(input_file.fd, "w") as f: f.write(data.read() if hasattr(data, "read") else data) # No longer pass to stdin data = None proc = subprocess.Popen( argv, # we cannot use the in/out streams directly, as they might be # StringIO objects (which are not supported by subprocess) stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE, ) data = data.read() if hasattr(data, "read") else data if data is not None: data = data.encode("utf-8") stdout, stderr = proc.communicate(data) if proc.returncode: raise FilterError( "%s: subprocess returned a non-success result code: " "%s, stdout=%s, stderr=%s" % (cls.name or cls.__name__, proc.returncode, stdout, stderr) ) else: if output_file.created: with os.fdopen(output_file.fd, "r") as f: out.write(f.read()) else: out.write(stdout.decode("utf-8")) finally: if output_file.created: os.unlink(output_file.filename) if input_file.created: os.unlink(input_file.filename)
def subprocess(cls, argv, out, data=None): """Execute the commandline given by the list in ``argv``. If a byestring is given via ``data``, it is piped into data. ``argv`` may contain two placeholders: ``{input}`` If given, ``data`` will be written to a temporary file instead of data. The placeholder is then replaced with that file. ``{output}`` Will be replaced by a temporary filename. The return value then will be the content of this file, rather than stdout. """ class tempfile_on_demand(object): def __repr__(self): if not hasattr(self, 'filename'): self.fd, self.filename = tempfile.mkstemp() return self.filename @property def created(self): return hasattr(self, 'filename') # Replace input and output placeholders input_file = tempfile_on_demand() output_file = tempfile_on_demand() if hasattr(str, 'format'): # Support Python 2.5 without the feature argv = list( map( lambda item: item.format(input=input_file, output=output_file), argv)) try: data = (data.read() if hasattr(data, 'read') else data) if data is not None: data = data.encode('utf-8') if input_file.created: if not data: raise ValueError( '{input} placeholder given, but no data passed') with os.fdopen(input_file.fd, 'wb') as f: f.write(data) # No longer pass to stdin data = None proc = subprocess.Popen( argv, # we cannot use the in/out streams directly, as they might be # StringIO objects (which are not supported by subprocess) stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE, shell=os.name == 'nt') stdout, stderr = proc.communicate(data) if proc.returncode: raise FilterError( '%s: subprocess returned a non-success result code: ' '%s, stdout=%s, stderr=%s' % (cls.name or cls.__name__, proc.returncode, stdout, stderr)) else: if output_file.created: with open(output_file.filename, 'rb') as f: out.write(f.read().decode('utf-8')) else: out.write(stdout.decode('utf-8')) finally: if output_file.created: os.unlink(output_file.filename) if input_file.created: os.unlink(input_file.filename)
# The encoding of source files. #source_encoding = 'utf-8' # The master toctree document. master_doc = 'index' # General information about the project. project = u'webassets' copyright = u'2009, 2010, Michael Elsdörfer' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = ".".join(map(str, webassets.__version__)) # The full version, including alpha/beta/rc tags. release = version # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of documents that shouldn't be included in the build. #unused_docs = []