def _rebase(self, url): if "#" in url: url, hashid = url.rsplit("#", 1) hashid = "#" + hashid else: hashid = "" if "?" in url: url, _ = url.rsplit("?", 1) rebased = None if url.startswith("."): rebased = posixpath.join(self.base, url) rebased = posixpath.normpath(rebased) else: rebased = url.strip("/") path = None if "/" in self.name: # try find file using relative url in self.name path = find_file(os.path.join(self.name[: self.name.rindex("/")], rebased)) if path: rebased = os.path.join(self.name[: self.name.rindex("/")], rebased) if not path: # try finding file based on GLOBAL_MEDIA_DIRS path = find_file(rebased) if not path: raise Exception( "Unable to find url `%s` from file %s. File does not exists: %s" % (url, self.name, rebased) ) # generating data for images doesn't work for scss if getattr(settings, "GENERATE_DATA_URIS", False) and self.name.endswith(".css"): if os.path.getsize(path) <= MAX_DATA_URI_FILE_SIZE and not IGNORE_PATTERN.match(rebased): data = b64encode(open(path, "rb").read()) mime = guess_type(path)[0] or "application/octet-stream" return "data:%s;base64,%s" % (mime, data) elif getattr(settings, "GENERATE_DATA_URIS", False) and self.name.endswith(".scss") and False: if os.path.getsize(path) <= MAX_DATA_URI_FILE_SIZE and not IGNORE_PATTERN.match(rebased): # data = b64encode(open(path, 'rb').read()) # mime = guess_type(path)[0] or 'application/octet-stream' return 'inline-image("%s")' % (url) if appsettings.MEDIA_DEV_MODE: prefix = appsettings.DEV_MEDIA_URL version = os.path.getmtime(path) rebased += "?v=%s" % version else: prefix = appsettings.PRODUCTION_MEDIA_URL with open(path) as sf: version = sha1(sf.read()).hexdigest() rebased_prefix, rebased_extention = rebased.rsplit(".", 1) rebased = "%s.%s" % (rebased_prefix, rebased_extention) rebased = posixpath.join(prefix, rebased) return "/" + rebased.strip("/") + hashid
def _read_import(self, name): file_name = find_file(name) if not file_name: raise IOError("File not found: '%s'" % name) with open(file_name, 'r') as f: return f.read()
def get_dev_output(self, name): path = find_file(name) fp = open(path, 'rb') content = fp.read() fp.close() mimetype = guess_type(path)[0] return content, mimetype
def fixurls(self, match): url = match.group(1) hashid = '' if '#' in url: url, hashid = url.split('#', 1) hashid = '#' + hashid url_query = None if '?' in url: url, url_query = url.split('?', 1) if ':' not in url and not url.startswith('/'): rebased_url = posixpath.join(self.base_path, url) rebased_url = posixpath.normpath(rebased_url) try: if GENERATE_DATA_URIS: path = find_file(rebased_url) if os.path.getsize(path) <= MAX_DATA_URI_FILE_SIZE and \ not IGNORE_PATTERN.match(rebased_url): data = b64encode(open(path, 'rb').read()) mime = guess_type(path)[0] or 'application/octet-stream' return 'url(data:%s;base64,%s)' % (mime, data) url = media_url(rebased_url) except: logging.error('URL not found: %s' % url) if url_query is None: url_query = '' elif '?' in url: url_query = '&' + url_query else: url_query = '?' + url_query return 'url(%s%s%s)' % (url, url_query, hashid)
def _read_jst_content(self, name): name = name.strip(' \n\t') fname = find_file(name) if not fname: raise IOError("File not found: '%s'" % name) with open(fname, 'r') as f: return smart_unicode(f.read())
def _regenerate(self, debug=False): path = find_file(self.module) mtime = os.path.getmtime(path) if mtime == self._mtime: return source = read_text_file(path) self._compiled = self._compile(source, debug=debug) self._compiled_hash = sha1(smart_str(self._compiled)).hexdigest() self._mtime = mtime
def get_dev_output(self, name): path = find_file(name) with open(path, 'rb') as fp: content = fp.read() mimetype = guess_type(path)[0] return content, mimetype
def _regenerate(self, debug=False): path = find_file(self.module) mtime = os.path.getmtime(path) if mtime == self._mtime: return self._compiled = self._compile(path, debug=debug) self._compiled_hash = sha1(self._compiled).hexdigest() self._mtime = mtime
def get_dev_output(self, name, variation): assert name == self.name, ( '''File name "%s" doesn't match the one in GENERATE_MEDIA ("%s")''' % (name, self.name)) path = find_file(name) assert path, """File name "%s" doesn't exist.""" % name fp = open(path, 'r') output = fp.read() fp.close() return output
def _regenerate(self, debug=False): path = find_file(self.module) mtime = os.path.getmtime(path) if mtime == self._mtime: return fp = open(path, 'r') source = fp.read() fp.close() self._compiled = self._compile(source, debug=debug) self._compiled_hash = sha1(self._compiled).hexdigest() self._mtime = mtime
def __init__(self, **kwargs): self.config(kwargs, module=None) super(CoffeeScript, self).__init__(**kwargs) assert self.filetype == 'js', ( 'CoffeeScript only supports compilation to js. ' 'The parent filter expects "%s".' % self.filetype) self._compiled = None self._compiled_hash = None self._mtime = None if not find_file(self.module): raise RuntimeError('File not found: %s' % self.module)
def _find_file(self, name): if not name.endswith(('.sass', '.scss')): names = (name + '.sass', name + '.scss') else: names = (name,) parts = name.rsplit('/', 1) parts[-1] = '_' + parts[-1] names += ('/'.join(parts),) for name in names: path = find_file(name, media_dirs=self.path) if path: return path
def _find_file(self, name): parts = name.rsplit("/", 1) parts[-1] = "_" + parts[-1] partial = "/".join(parts) if not name.endswith((".sass", ".scss")): names = (name + ".sass", name + ".scss", partial + ".sass", partial + ".scss") else: names = (name, partial) for name in names: path = find_file(name, media_dirs=self.path) if path: return path
def _find_deps(self, name, lang): resolver = CommentResolver(lang) deps = [] for dep in resolver.resolve(name): dep_file = find_file(dep) if not dep_file: continue deps += self._find_deps(dep_file, lang) deps.append(dep) return deps
def get_last_modified(self): content = super(CssImport, self).get_dev_output(self.name, {}) files = [] self.rewrite_re.sub(lambda m: files.append(m.group(1)), content) lm = 0 for f in files: fname = find_file(f) if not fname: return time.time() fmod = os.path.getmtime(fname) if fmod > lm: lm = fmod return lm
def _find_file(self, name): parts = name.rsplit('/', 1) parts[-1] = '_' + parts[-1] partial = '/'.join(parts) if not name.endswith(('.sass', '.scss')): names = (name + '.sass', name + '.scss', partial + '.sass', partial + '.scss') else: names = (name, partial) for name in names: path = find_file(name, media_dirs=self.path) if path: return path
def make_imports(self, match): fname = find_file(match.group(1)) if not fname: lineno = match.string.count('\n', 0, match.start()) print "[%s:%d] Can't find file `%s`" % (self.name, lineno, match.group(1)) return "" try: with open(fname, 'r') as sf: content = sf.read() except IOError, e: lineno = match.string.count('\n', 0, match.start()) info = self.name, lineno, fname, e print "[%s:%d] Can't import file `%s`: %s" % info return ""
def _regenerate(self, debug=False): path = find_file(self.module) mtime = os.path.getmtime(path) # the following fails, if files "imported" into the current less file have been # modified; disable this temporarily # if mtime == self._mtime: # print "*** no need to regenerate", path, mtime, self._mtime # return if not debug or not ENABLE_LESS_CSS_DEBUG: self._compiled = self._compile(path, debug=debug) self._compiled_hash = sha1(self._compiled).hexdigest() self._compiled = self._compiled.decode( 'utf-8') #decoding it here as sha1 expects str self._mtime = mtime else: self._compiled = file(path).read() self._extract_imports(self._compiled) self._imports_code = { name: file(find_file(name)).read().decode('utf-8') for name in self._imports } self._compiled_hash = 'kane' self._compiled = self._compiled.decode('utf-8') self._mtime = mtime
def _find_css(self, name): result = [] for ext in MEDIA_CSS_EXT: if isinstance(MEDIA_CSS_LOCATION, basestring): locations = [MEDIA_CSS_LOCATION] else: locations = MEDIA_CSS_LOCATION for location in locations: entry_name = os.path.join(location, name + "." + ext) entry_file = find_file(entry_name) if entry_file: result += self._find_deps(entry_file, "css") result.append(entry_name) break return result
def _find_deps(self, name, lang): if not MEDIA_DEV_MODE: if name in self._cache: return self._cache[name] resolver = CommentResolver(lang) deps = [] for dep in resolver.resolve(name): dep_file = find_file(dep) if not dep_file: continue deps += self._find_deps(dep_file, lang) deps.append(dep) if not MEDIA_DEV_MODE: self._cache[name] = deps return deps
def fixurls(self, match): url = original_url = match.group(1) hashid = '' if '#' in url: url, hashid = url.split('#', 1) hashid = '#' + hashid url_query = None if '?' in url: url, url_query = url.split('?', 1) if ':' not in url and not url.startswith('/'): rebased_url = posixpath.join(self.base_path, url) rebased_url = posixpath.normpath(rebased_url) try: if GENERATE_DATA_URIS: path = find_file(rebased_url) if os.path.getsize(path) <= MAX_DATA_URI_FILE_SIZE and \ not IGNORE_PATTERN.match(rebased_url): data = b64encode(open(path, 'rb').read()) mime = guess_type( path)[0] or 'application/octet-stream' return 'url(data:{};base64,{})'.format(mime, data) url = media_url(rebased_url) # except: except KeyError: # logger.error('URL not found: %s' % url) global_errors['filters.cssurl'][ original_url] = 'URL not found: ' + original_url else: global_errors['filters.cssurl'].pop(original_url, None) if url_query is None: url_query = '' elif '?' in url: url_query = '&' + url_query else: url_query = '?' + url_query return 'url({}{}{})'.format(url, url_query, hashid)
def fixurls(self, match): url = match.group(1) hashid = "" if "#" in url: url, hashid = url.split("#", 1) hashid = "#" + hashid if ":" not in url and not url.startswith("/"): rebased_url = posixpath.join(self.base_path, url) rebased_url = posixpath.normpath(rebased_url) try: if GENERATE_DATA_URIS: path = find_file(rebased_url) if os.path.getsize(path) <= MAX_DATA_URI_FILE_SIZE and not IGNORE_PATTERN.match(rebased_url): data = b64encode(open(path, "rb").read()) mime = guess_type(path)[0] or "application/octet-stream" return "url(data:%s;base64,%s)" % (mime, data) url = media_url(rebased_url) except: logging.error("URL not found: %s" % url) return "url(%s%s)" % (url, hashid)
def _collect_scss_files(self): files = {} pool = [self.name] last_modified = 0 while len(pool): item = pool.pop(0) try: content = self._read_import(item) except IOError: return None for quote, include in self.IMPORT.findall(content): fname = include.strip(' \n\t') pool.append(fname) fname = find_file(item) if not fname: return None files[fname] = os.path.getmtime(fname) return files
def generate(self, sprite_name, image_names): sprite_css_name, sprite_png_name = sprite_name + '.sprite.css', sprite_name + '.sprite.png' matrix = [] for img in image_names: im = Image.open(utils.find_file(img)) matrix.append((im.size[1], [(im.size[0], img, im)])) matrix.append((self.padding, ())) w = max((sum((j[0] for j in i[1])) for i in matrix)) h = sum((i[0] for i in matrix)) css = "" sprite = Image.new("RGBA", (w, h)) y = 0 for i in matrix: x, h = 0, i[0] for j in i[1]: w = j[0] sprite.paste(j[2], (x, y)) selector = os.path.splitext(j[1])[0].replace(os.path.sep, '_') css += "\n.%s.%s { background-position: %dpx %dpx; }" % ( sprite_name, selector, -x, -y) css += "\n.%s.%s.sized { width: %dpx; height: %dpx; }" % ( sprite_name, selector, w, h) css += "\n.%s.%s.replace { overflow: hidden; text-indent: -9999px; width: %dpx; height: %dpx;}" % ( sprite_name, selector, w, h) x += w y += h buf = StringIO() sprite.save(buf, format="PNG") png = buf.getvalue() buf.close() hash = sha1(png).hexdigest() self.cache[sprite_css_name] = css, "text/css" self.cache[sprite_png_name] = png, "image/png" return sprite_css_name, sprite_png_name, hash
def _rebase(self, url): if "#" in url: url, hashid = url.rsplit("#", 1) hashid = "#" + hashid else: hashid = "" if url.startswith("."): rebased = os.path.join(self.base, url) rebased = os.path.normpath(rebased) else: rebased = url.strip("/") local_file_path = utils.find_file(rebased) if not os.path.exists(local_file_path): print "Check path", os.path.realpath(local_file_path), rebased raise Exception("Unable to find url `%s` from file %s. File does not exists: %s" % ( url, self.name, local_file_path )) if appsettings.MEDIA_DEV_MODE: prefix = appsettings.DEV_MEDIA_URL version = os.path.getmtime(local_file_path) rebased += "?v=%s" % version else: prefix = appsettings.PRODUCTION_MEDIA_URL with open(local_file_path, "rb") as sf: version = sha1(sf.read()).hexdigest() print(version) rebased_prefix, rebased_extention = rebased.rsplit(".", 1) rebased = "%s-%s.%s" % (rebased_prefix, version, rebased_extention) #need to correct the windows path (back slash) to web url (forward slash) rebased = "\"" + os.path.join(prefix, rebased.replace("\\", "/")) + "\"" return rebased.strip("/") + hashid
def get_last_modified(self): pool = [self.name] last_modified = 0 while len(pool): item = pool.pop(0) try: content = self._read_jst_content(item) except IOError: return None for include in self.include.findall(content): fname = include.strip(' \n\t') pool.append(fname) fname = find_file(item) if not fname: return None lm = os.path.getmtime(fname) if lm > last_modified: last_modified = lm return last_modified
def fixurls(self, match): url = match.group(1) hashid = '' if '#' in url: url, hashid = url.split('#', 1) hashid = '#' + hashid url_query = None if '?' in url: url, url_query = url.split('?', 1) if ':' not in url and not url.startswith('/'): rebased_url = posixpath.join(self.base_path, url) rebased_url = posixpath.normpath(rebased_url) try: if GENERATE_DATA_URIS: path = find_file(rebased_url) if os.path.getsize(path) <= MAX_DATA_URI_FILE_SIZE and \ not IGNORE_PATTERN.match(rebased_url): data = b64encode(open(path, 'rb').read()) mime = guess_type(path)[0] or 'application/octet-stream' return 'url(data:%s;base64,%s)' % (mime, data) url = media_url(rebased_url) except: if settings.PRODUCTION: # Removing this from local dev env so we can have a less # cluttered console. Will warn us of issues while testing # on GAE environment. logging.error('URL not found: %s' % url) if url_query is None: url_query = '' elif '?' in url: url_query = '&' + url_query else: url_query = '?' + url_query return 'url(%s%s%s)' % (url, url_query, hashid)
def resolve(self, fname): fname = find_file(fname) if fname in self._cache: result, time = self._cache[fname] mtime = os.path.getmtime(fname) if time == mtime: return result with open(fname) as sf: content = sf.read() # for testing purposes if MEDIA_RELATIVE_RESOLVE: result = [] for r in self._resolve(content): result += _find_files(os.path.split(fname)[0], r) else: result = self._resolve(content) time = os.path.getmtime(fname) self._cache[fname] = result, time return result
def get_last_modified(self): path = find_file(self.name) return path and os.path.getmtime(path)
def _get_path(self): path = find_file(self.name) assert path, """File name "%s" doesn't exist.""" % self.name return path
def _find_file(self, name): return find_file(name, media_dirs=self.path)
def _rebase(self, url): if "#" in url: url, hashid = url.rsplit("#", 1) hashid = "#" + hashid else: hashid = "" if "?" in url: url, _ = url.rsplit("?", 1) rebased = None if url.startswith("."): rebased = posixpath.join(self.base, url) rebased = posixpath.normpath(rebased) else: rebased = url.strip("/") path = None if '/' in self.name: # try find file using relative url in self.name path = find_file( os.path.join(self.name[:self.name.rindex('/')], rebased)) if path: rebased = os.path.join(self.name[:self.name.rindex('/')], rebased) if not path: # try finding file based on GLOBAL_MEDIA_DIRS path = find_file(rebased) if not path: raise Exception( "Unable to find url `%s` from file %s. File does not exists: %s" % (url, self.name, rebased)) # generating data for images doesn't work for scss if getattr(settings, 'GENERATE_DATA_URIS', False) and self.name.endswith('.css'): if os.path.getsize(path) <= MAX_DATA_URI_FILE_SIZE and \ not IGNORE_PATTERN.match(rebased): data = b64encode(open(path, 'rb').read()) mime = guess_type(path)[0] or 'application/octet-stream' return 'data:%s;base64,%s' % (mime, data) elif getattr(settings, 'GENERATE_DATA_URIS', False) and self.name.endswith('.scss') and False: if os.path.getsize( path ) <= MAX_DATA_URI_FILE_SIZE and not IGNORE_PATTERN.match(rebased): #data = b64encode(open(path, 'rb').read()) #mime = guess_type(path)[0] or 'application/octet-stream' return 'inline-image("%s")' % (url) if appsettings.MEDIA_DEV_MODE: prefix = appsettings.DEV_MEDIA_URL version = os.path.getmtime(path) rebased += "?v=%s" % version else: prefix = appsettings.PRODUCTION_MEDIA_URL with open(path) as sf: version = sha1(sf.read()).hexdigest() rebased_prefix, rebased_extention = rebased.rsplit(".", 1) rebased = "%s.%s" % (rebased_prefix, rebased_extention) rebased = posixpath.join(prefix, rebased) return "/" + rebased.strip("/") + hashid
def _get_path(self): path = find_file(self.name) assert path, """File name "{}" doesn't exist.""".format(self.name) return path
def _get_path(self): path = find_file(self.name) assert path, f"""File name "{self.name}" doesn't exist.""" return path
def _find_file(self, name): if not name.endswith('.less'): name = name + '.less' return find_file(name, media_dirs=self.path)
def _get_contents(self, language): filename = self._get_filename(language) path = find_file(filename) assert path, "File name '%s' doesn't exist." % filename return read_text_file(path)