def read(self): f = open(self.filepath) logger.info('read ' + self.filepath) content = f.read() f.close() meta_regex = re.compile( r"^\s*(?:-|=){3,}\s*\n((?:.|\n)+?)\n\s*(?:-|=){3,}\s*\n*", re.MULTILINE) match = re.match(meta_regex, content) if not match: logger.error("No metadata in: %s" % self.filepath) return None meta = match.group(1) meta = re.sub(r'\r\n|\r|\n', '\n', meta) dct = {} k = v = None for meta in meta.split('\n'): meta = meta.replace('\t', ' ') if meta.startswith(' ') and k: dct[k] = dct[k] + '\n' + meta.lstrip() if ':' in meta and not meta.startswith(' '): index = meta.find(':') k, v = meta[:index], meta[index + 1:] k, v = k.rstrip(), v.lstrip() dct[k] = to_unicode(v) text = to_unicode(content[match.end():]) dct['content'] = markdown(text) return dct
def content_url(a, *args): slug = ns.site.slug args = [to_unicode(arg) for arg in args] path = os.path.join(to_unicode(a), *args) if sys.platform.startswith('win'): path = path.replace('\\', '/') if path.endswith('/index.html'): path = path.rstrip('index.html') if not path.startswith('http://'): path = '/%s' % path.lstrip('/') return path.lower() basename, ext = os.path.splitext(path) if not ext: path = basename + '/' elif ext == '.html': if slug == 'clean': path = basename elif slug == 'slash': path = basename + '/' if not path.startswith('http://'): path = '/%s' % path.lstrip('/') return path.lower()
def read(self): f = open(self.filepath) logger.info('read ' + self.filepath) content = f.read() f.close() meta_regex = re.compile( r"^\s*(?:-|=){3,}\s*\n((?:.|\n)+?)\n\s*(?:-|=){3,}\s*\n*", re.MULTILINE ) match = re.match(meta_regex, content) if not match: logger.error("No metadata in: %s" % self.filepath) return None meta = match.group(1) meta = re.sub(r'\r\n|\r|\n', '\n', meta) dct = {} k = v = None for meta in meta.split('\n'): meta = meta.replace('\t', ' ') if meta.startswith(' ') and k: dct[k] = dct[k] + '\n' + meta.lstrip() if ':' in meta and not meta.startswith(' '): index = meta.find(':') k, v = meta[:index], meta[index + 1:] k, v = k.rstrip(), v.lstrip() dct[k] = to_unicode(v) text = to_unicode(content[match.end():]) dct['content'] = markdown(text) return dct
def content_url(ctx, base, *args): writer = ctx.get('writer') def fix_index(url): if url.endswith('/index.html'): return url[:-10] return url args = list(args) base = to_unicode(base) use_relative_url = settings.config.get('relative_url', False) if base.startswith('http://') or base.startswith('https://'): prefix = '%s/' % base.rstrip('/') elif use_relative_url and writer: prefix = '%s/' % get_relative_base(writer['filepath']) args.insert(0, base) else: prefix = '/' args.insert(0, base) args = map(lambda o: to_unicode(o).strip('/'), args) url = '/'.join(args).replace('//', '/').replace(' ', '-') url = prefix + url.lstrip('/') url = to_unicode(fix_index(url.lower())) if url.endswith('/'): return url permalink = settings.config['permalink'] if permalink.endswith('.html'): if url.endswith('.html'): return url if url.endswith('.xml'): return url return '%s.html' % url if permalink.endswith('/'): if url.endswith('.html'): url = fix_index(url) url = url.rstrip('.html') if url.endswith('.xml'): url = url.rstrip('.xml') return '%s/' % url if url.endswith('.html'): url = fix_index(url) return url.rstrip('.html') if url.endswith('.xml'): return url.rstrip('.xml') return url
def content_url(ctx, base, *args): writer = ctx.get("writer") def fix_index(url): if url.endswith("/index.html"): return url[:-10] return url args = list(args) base = to_unicode(base) if base.startswith("http://") or base.startswith("https://"): prefix = "%s/" % base.rstrip("/") elif settings.use_relative_url and writer: prefix = "%s/" % get_relative_base(writer["filepath"]) args.insert(0, base) else: prefix = "/" args.insert(0, base) args = map(lambda o: to_unicode(o).strip("/"), args) url = "/".join(args).replace("//", "/").replace(" ", "-") url = prefix + url.lstrip("/") url = to_unicode(fix_index(url.lower())) if url.endswith("/"): return url if settings.permalink.endswith(".html"): if url.endswith(".html"): return url if url.endswith(".xml"): return url return "%s.html" % url if settings.permalink.endswith("/"): if url.endswith(".html"): url = fix_index(url) url = url.rstrip(".html") if url.endswith(".xml"): url = url.rstrip(".xml") return "%s/" % url if url.endswith(".html"): url = fix_index(url) return url.rstrip(".html") if url.endswith(".xml"): return url.rstrip(".xml") return url
def __load_themes(): import time import tempfile path = os.path.join(tempfile.gettempdir(), 'liquidluck.json') if not os.path.exists(path) or \ os.stat(path).st_mtime + 100 < time.time(): content = __fetch_themes() f = open(path, 'w') f.write(content) f.close() content = to_unicode(open(path).read()) try: import json json_decode = json.loads except ImportError: import simplejson json_decode = simplejson.loads repos = json_decode(content) themes = {} for theme in repos['repositories']: name = theme['name'].replace('liquidluck-theme', '') name = name.strip().strip('-') theme['name'] = name themes[name] = theme return themes
def content_url(a, *args): slug = namespace.site.get('slug', 'html') args = [to_unicode(arg) for arg in args] path = os.path.join(to_unicode(a), *args) basename, ext = os.path.splitext(path) if not ext: path = basename + '/' if slug == 'clean': path = basename if slug == 'slash': path = basename + '/' if not path.startswith('http://'): path = '/%s' % path.lstrip('/') return path
def render(self): f = open(self.filepath) logging.debug('read ' + self.relative_filepath) header = '' body = '' recording = True for line in f: if recording and line.startswith('---'): recording = False elif recording: header += line else: body += line f.close() body = to_unicode(body) meta = self._parse_meta(header) #: keep body in meta data as source text meta['source_text'] = body _toc = m.Markdown(m.HtmlTocRenderer(), 0) meta['toc'] = _toc.render(body) content = markdown(body) return self.post_class(self.filepath, content, meta=meta)
def _parse_meta(self, header, body): header = m.html(to_unicode(header)) titles = re.findall(r'<h1>(.*)</h1>', header) if not titles: logging.error('There is no title') title = None else: title = titles[0] meta = {'title': title} items = re.findall(r'<li>(.*?)</li>', header, re.S) for item in items: index = item.find(':') key = item[:index].rstrip() value = item[index + 1:].lstrip() meta[key] = value desc = re.findall(r'<p>(.*?)</p>', header, re.S) if desc: meta['description'] = '\n\n'.join(desc) #: keep body in meta data as source text meta['source_text'] = body _toc = m.Markdown(m.HtmlTocRenderer(), 0) meta['toc'] = _toc.render(body) return meta
def _plain_text(self, node): child = node.firstChild if not child: return None if child.nodeType == node.TEXT_NODE: return to_unicode(child.data) return None
def markdown(text): text = to_unicode(text) render = JuneRender(flags=m.HTML_USE_XHTML) md = m.Markdown( render, extensions=m.EXT_FENCED_CODE | m.EXT_AUTOLINK, ) return md.render(text)
def markdown(text): text = to_unicode(text) render = JuneRender(flags=m.HTML_USE_XHTML) md = m.Markdown( render, extensions=(m.EXT_FENCED_CODE | m.EXT_AUTOLINK | m.EXT_TABLES | m.EXT_NO_INTRA_EMPHASIS | m.EXT_STRIKETHROUGH), ) return md.render(text)
def get(self): f = open(LIVERELOAD) content = to_unicode(f.read()) content = content.replace('{{port}}', str(PORT)) f.close() self.set_header('Content-Type', 'application/javascript') self.write(content)
def content_url(base, *args): def fix_index(url): if url.endswith('/index.html'): return url[:-10] return url args = list(args) base = to_unicode(base) if base.startswith('http://') or base.startswith('https://'): prefix = '%s/' % base.rstrip('/') else: prefix = '/' args.insert(0, base) args = map(lambda o: to_unicode(o).strip('/'), args) url = '/'.join(args).replace('//', '/').replace(' ', '-') url = prefix + url.lstrip('/') url = to_unicode(fix_index(url.lower())) if url.endswith('/'): return url if settings.permalink.endswith('.html'): if url.endswith('.html'): return url if url.endswith('.xml'): return url return '%s.html' % url if settings.permalink.endswith('/'): if url.endswith('.html'): url = fix_index(url) url = url.rstrip('.html') if url.endswith('.xml'): url = url.rstrip('.xml') return '%s/' % url if url.endswith('.html'): url = fix_index(url) return url.rstrip('.html') if url.endswith('.xml'): return url.rstrip('.xml') return url
def _parse_meta(self, header): header = m.html(to_unicode(header)) title = re.findall(r'<h1>(.*)</h1>', header)[0] meta = {'title': title} items = re.findall(r'<li>(.*?)</li>', header, re.S) for item in items: index = item.find(':') key = item[:index].rstrip() value = item[index + 1:].lstrip() meta[key] = value return meta
def __fetch_themes(): import urllib if hasattr(urllib, 'urlopen'): urlopen = urllib.urlopen else: import urllib.request urlopen = urllib.request.urlopen content = urlopen( "https://api.github.com/legacy/repos/search/%22liquidluck-theme-%22" ).read() content = to_unicode(content) return content
def replace(m): key = m.group(1) bits = key.split('.') value = post for bit in bits: if not hasattr(value, bit): return '' value = getattr(value, bit) if not value: return '' return to_unicode(value)
def __load_themes(): import time import tempfile path = os.path.join(tempfile.gettempdir(), 'liquidluck.json') if not os.path.exists(path) or \ os.stat(path).st_mtime + 600 < time.time(): content = __fetch_themes() f = open(path, 'w') f.write(content) f.close() content = to_unicode(open(path).read()) return __filter_themes(content)
def markdown(text): text = to_unicode(text) regex = re.compile(r'^````(\w+)', re.M) text = regex.sub(r'````\1+', text) regex = re.compile(r'^`````(\w+)', re.M) text = regex.sub(r'`````\1-', text) render = AraleRender(flags=m.HTML_USE_XHTML | m.HTML_TOC) md = m.Markdown( render, extensions=(m.EXT_FENCED_CODE | m.EXT_AUTOLINK | m.EXT_TABLES | m.EXT_NO_INTRA_EMPHASIS | m.EXT_STRIKETHROUGH), ) return md.render(text)
def markdown(text): text = to_unicode(text) regex = re.compile(r'^````(\w+)', re.M) text = regex.sub(r'````\1+', text) regex = re.compile(r'^`````(\w+)', re.M) text = regex.sub(r'`````\1-', text) render = AraleRender(flags=m.HTML_USE_XHTML | m.HTML_TOC) md = m.Markdown( render, extensions=( m.EXT_FENCED_CODE | m.EXT_AUTOLINK | m.EXT_TABLES | m.EXT_NO_INTRA_EMPHASIS | m.EXT_STRIKETHROUGH ), ) return md.render(text)
def __load_themes(force=False): import time import tempfile path = os.path.join(tempfile.gettempdir(), 'liquidluck.json') if not os.path.exists(path) or \ os.stat(path).st_mtime + 600 < time.time() or \ force: content = __fetch_themes() if "repositories" not in content: return content f = open(path, 'w') f.write(utf8(content)) f.close() content = to_unicode(open(path).read()) return __filter_themes(content)
def replace(m): key = m.group(1) bits = key.split('.') value = post for bit in bits: if not hasattr(value, bit): return '' value = getattr(value, bit) if not value: return '' if isinstance(value, int) and value < 10: #: fix on month and date value value = '0%d' % value return to_unicode(value)
def _parse_meta(self, header): header = m.html(to_unicode(header)) titles = re.findall(r'<h1>(.*)</h1>', header) if not titles: logging.error('There is no title') title = None else: title = titles[0] meta = {'title': title} items = re.findall(r'<li>(.*?)</li>', header, re.S) for item in items: index = item.find(':') key = item[:index].rstrip() value = item[index + 1:].lstrip() meta[key] = value return meta
def render(self): f = open(self.filepath) logging.debug('read ' + self.relative_filepath) header = '' body = '' recording = True for line in f: if recording and line.startswith('---'): recording = False elif recording: header += line else: body += line # Support my own style of writing articles like this: # (note the missing --- to end the header section) # # # Title Here # - tags: tag 1, tags 2 # # Body text here if not len(body): header = '' body = '' f.seek(0) linenumber = 0 for line in f: linenumber += 1 if linenumber == 1 and line.startswith('# '): header += line elif linenumber == 2 and line.startswith('- tags:'): header += line else: body += line f.close() body = to_unicode(body) meta = self._parse_meta(header, body) content = self._parse_content(body) return self.post_class(self.filepath, content, meta=meta)
def render(self): f = open(self.filepath) logging.debug('read ' + self.relative_filepath) header = '' body = '' recording = True for line in f: if recording and line.startswith('---'): recording = False elif recording: header += line else: body += line f.close() body = to_unicode(body) meta = self._parse_meta(header, body) content = self._parse_content(body) return self.post_class(self.filepath, content, meta=meta)