def generate(self, request): entrylist = filter(lambda e: not e.draft, request['entrylist']) entrylist = list(entrylist)[0:self.num_entries] tt = self.env.engine.fromfile('%s.xml' % self.type) path = joinurl(self.conf['output_dir'], self.path) if not path.endswith(('.xml', '.html')): path = joinurl(path, 'index.html') # detect removed entries hv = md5(*entrylist, attr=lambda e: e.permalink) if memoize(path) != hv: memoize(path, hv) has_changed = True else: has_changed = False if (exists(path) and not filter(lambda e: e.has_changed, entrylist) and not has_changed and not tt.has_changed): event.skip(path) raise StopIteration updated = entrylist[0].date if entrylist else datetime.utcnow() html = tt.render(conf=self.conf, env=union(self.env, route=self.path, updated=updated, entrylist=entrylist)) yield html, path
def generate(self, conf, env, request): if not env.options.search: raise StopIteration() tree, meta = index(request['entrylist']) for i, entry in enumerate(request['entrylist']): yield io.StringIO(entry.content), \ joinurl(conf['output_dir'], self.path, 'src', '%i.txt' % i) # CST algorithm with `meta` data with io.open(join(dirname(__file__), 'search.js'), encoding='utf-8') as fp: javascript = fp.read() fp = io.StringIO((javascript.replace('%% PATH %%', json.dumps(self.path)).replace( '%% ENTRYLIST %%', json.dumps(meta)))) yield fp, joinurl(conf['output_dir'], self.path, 'search.js') for char in string.ascii_lowercase: if char in tree: fp = io.BytesIO() json.dump(tree.pop(char), fp) yield fp, joinurl(conf['output_dir'], self.path, char + '.js') fp = io.BytesIO() json.dump(tree, fp) yield fp, joinurl(conf['output_dir'], self.path, '_.js')
def generate(self, request): """In this step, we filter drafted entries (they should not be included into the Sitemap) and test each url pattern for success and write the corresponding changefreq and priority to the Sitemap.""" drafted = set([ joinurl(self.conf['output_dir'], e.permalink, 'index.html') for e in request['entrylist'] + request['pages'] if e.draft ]) path = joinurl(self.conf['output_dir'], self.path) sm = Map() if exists(path) and not self.has_changed: event.skip(path) raise StopIteration for fname in self.files: if fname in drafted: continue url = join(self.conf['www_root'], fname.replace(self.conf['output_dir'], '')) for view in self.views: if self.patterns[view].match(url): priority, changefreq = self.scores.get( view, (0.5, 'weekly')) sm.add(rchop(url, 'index.html'), getmtime(fname), changefreq, priority) break yield sm.read(), path
def generate(self, request): """In this step, we filter drafted entries (they should not be included into the Sitemap) and test each url pattern for success and write the corresponding changefreq and priority to the Sitemap.""" drafted = set([joinurl(self.conf['output_dir'], e.permalink, 'index.html') for e in request['entrylist'] + request['pages'] if e.draft]) path = joinurl(self.conf['output_dir'], self.path) sm = Map() if exists(path) and not self.has_changed: event.skip(path) raise StopIteration for fname in self.files: if fname in drafted: continue url = join(self.conf['www_root'], fname.replace(self.conf['output_dir'], '')) for view in self.views: if self.patterns[view].match(url): priority, changefreq = self.scores.get(view, (0.5, 'weekly')) sm.add(rchop(url, 'index.html'), getmtime(fname), changefreq, priority) break yield sm.read(), path
def generate(self, request): tt = self.env.jinja2.get_template('main.html') entrylist = request['entrylist'] pathes = dict() for entry in entrylist: if entry.permalink != expand(self.path, entry): p = joinurl(self.conf['output_dir'], entry.permalink) else: p = joinurl(self.conf['output_dir'], expand(self.path, entry)) if p.endswith('/'): p = joinurl(p, 'index.html') if p in pathes: raise AcrylamidException("title collision %r in %r" % (entry.permalink, entry.filename)) pathes[p] = entry for path, entry in pathes.iteritems(): if exists(path) and not entry.has_changed and not tt.has_changed: event.skip(path) continue html = tt.render(env=union(self.env, entrylist=[entry], type='entry'), conf=self.conf, entry=entry) yield html, path
def generate(self, conf, env, request): for lang in env.langs: for entry in self._get_page_list(request, lang): path = '' route = strip_default_lang(expand(self.path, entry), self.conf) if entry.hasproperty('permalink'): path = joinurl(self.conf['output_dir'], entry.permalink) elif lang == self.conf.lang_code: path = joinurl(self.conf['output_dir'], route, '/') else: path = joinurl(self.conf['output_dir'], expand(self.path, entry)) if path.endswith('/'): path = joinurl(path, 'index.html') request['env']['path'] = '/' request['env']['lang'] = lang request['env']['active_route'] = route tt = env.engine.fromfile(self.template) html = tt.render(conf=conf, entry=entry, env=union(env, type=self.__class__.__name__.lower(), route=route)) yield html, path
def generate(self, conf, env, request): if not env.options.search: raise StopIteration() tree, meta = index(request['entrylist']) for i, entry in enumerate(request['entrylist']): yield io.StringIO(entry.content), \ joinurl(conf['output_dir'], self.path, 'src', '%i.txt' % i) # CST algorithm with `meta` data with io.open(join(dirname(__file__), 'search.js'), encoding='utf-8') as fp: javascript = fp.read() fp = io.StringIO((javascript .replace('%% PATH %%', json.dumps(self.path)) .replace('%% ENTRYLIST %%', json.dumps(meta)))) yield fp, joinurl(conf['output_dir'], self.path, 'search.js') for char in string.ascii_lowercase: if char in tree: fp = io.BytesIO() json.dump(tree.pop(char), fp) yield fp, joinurl(conf['output_dir'], self.path, char + '.js') fp = io.BytesIO() json.dump(tree, fp) yield fp, joinurl(conf['output_dir'], self.path, '_.js')
def generate(self, conf, env, data): """In this step, we filter drafted entries (they should not be included into the Sitemap) and test each url pattern for success and write the corresponding changefreq and priority to the Sitemap.""" drafted = set([joinurl(conf['output_dir'], e.permalink, 'index.html') for e in data.get('drafts', [])]) path = joinurl(conf['output_dir'], self.path) sm = Map() if exists(path) and not self.modified: event.skip(path) raise StopIteration for fname in self.files: if fname in drafted: continue url = join(conf['www_root'], fname.replace(conf['output_dir'], '')) for view in self.views: if any(ifilter(lambda pat: pat.match(url), self.patterns[view])): priority, changefreq = self.scores.get(view.name, (0.5, 'weekly')) sm.add(rchop(url, 'index.html'), getmtime(fname), changefreq, priority) break sm.finish() yield sm, path
def absolutify(part): if part.startswith('/'): return joinurl(self.www_root, part) if part.find('://') == part.find('/') - 1: return part return joinurl(self.www_root, entry.permalink, part)
def run(conf, env, options): """Subcommand: ping -- notify external ressources via Pingback etc.""" initialize(conf, env) # we access the cache, so we must initialize first entrylist = sorted([Entry(e, conf) for e in filelist(conf['content_dir'], conf.get('entries_ignore', []))], key=lambda k: k.date, reverse=True) entrylist = [entry for entry in entrylist if not entry.draft] print joinurl(conf['www_root'], entrylist[0].permalink) links = re.findall('https?://[^ ]+', entrylist[0].source) print links
def generate(self, conf, env, data): pathes, entrylist = set(), data[self.type] unmodified = not env.modified and not conf.modified for i, entry in enumerate(entrylist): if entry.hasproperty('permalink'): path = joinurl(conf['output_dir'], entry.permalink) else: path = joinurl(conf['output_dir'], expand(self.path, entry)) if isfile(path) and path in pathes: try: os.remove(path) finally: other = [e.filename for e in entrylist if e is not entry and e.permalink == entry.permalink][0] log.error("title collision %s caused by %s and %s", entry.permalink, entry.filename, other) raise SystemExit pathes.add(path) next, prev = self.next(entrylist, i), self.prev(entrylist, i) # per-entry template tt = env.engine.fromfile(env, entry.props.get('layout', self.template)) if all([isfile(path), unmodified, not tt.modified, not entry.modified, not modified(*references(entry))]): event.skip(self.name, path) else: html = tt.render(conf=conf, entry=entry, env=union(env, entrylist=[entry], type=self.__class__.__name__.lower(), prev=prev, next=next, route=expand(self.path, entry))) yield html, path # check if any resources need to be moved if entry.hasproperty('copy'): for res_src in entry.resources: res_dest = join(dirname(path), basename(res_src)) # Note, presence of res_src check in FileReader.getresources if isfile(res_dest) and getmtime(res_dest) > getmtime(res_src): event.skip(self.name, res_dest) continue try: fp = io.open(res_src, 'rb') # use mkfile rather than yield so different ns can be specified (and filtered by sitemap) mkfile(fp, res_dest, ns='resource', force=env.options.force, dryrun=env.options.dryrun) except IOError as e: log.warn("Failed to copy resource '%s' whilst processing '%s' (%s)" % (res_src, entry.filename, e.strerror))
def generate(self, conf, env, data): for lang in env.langs: entrylist = [] for entry in data[self.type]: try: e = entry_for_lang(data, lang, entry) entrylist.append(e) except TranslationNotFound: entrylist.append(entry) unmodified = not env.modified and not conf.modified for i, entry in enumerate(entrylist): route = strip_default_lang(expand(self.path, entry), self.conf) if entry.hasproperty('permalink'): path = joinurl(conf['output_dir'], entry.permalink) elif lang == self.conf.lang_code: path = joinurl(self.conf['output_dir'], route, '/') entry.permalink = route else: path = joinurl(self.conf['output_dir'], expand(self.path, entry)) entry.permalink = route if path.endswith('/'): path = joinurl(path, 'index.html') next, prev = self.next(entrylist, i), self.prev(entrylist, i) env['lang'] = lang env['active_route'] = route # per-entry template tt = env.engine.fromfile(entry.props.get('layout', self.template)) if all([isfile(path), unmodified, not tt.modified, not entry.modified, not modified(*references(entry))]): event.skip(self.name, path) continue html = tt.render(conf=conf, entry=entry, env=union(env, entrylist=[entry], type=self.__class__.__name__.lower(), prev=prev, next=next, route=expand( self.path, entry))) yield html, path
def context(self, env, request): """"Here we prepare the detection pattern and active views. For each view we convert ``view.path`` to a regular expression pattern using simple replacements.""" patterns = dict() replacements = [(":year", "\d+"), (":month", "\d+"), (":day", "\d+"), (":[^/]+", "[^/]+")] for name, view in self.env.views.iteritems(): permalink = view.path for pat, repl in replacements: permalink = re.sub(pat, repl, permalink) if permalink.endswith("/"): permalink += "index.html" patterns[name] = re.compile("^" + joinurl(re.escape(self.conf["www_root"]), permalink) + "$") self.patterns = patterns self.views = [] # sort active views by frequency views = env.views.keys() for v in "entry", "tag", "index": try: self.views.append(views.pop(views.index(v))) except ValueError: pass self.views.extend(views) return env
def generate(self, conf, env, data): """In this step, we filter drafted entries (they should not be included into the Sitemap) and write the pre-defined priorities to the map.""" path = joinurl(conf['output_dir'], self.path) sm = Map() if exists(path) and not self.modified and not conf.modified: event.skip('sitemap', path) raise StopIteration for ns, fname in self.files: if ns == 'draft': continue permalink = '/' + fname.replace(conf['output_dir'], '') url = conf['www_root'] + permalink priority, changefreq = self.scores.get(ns, (0.5, 'weekly')) if self.imgext: images = [x for x in self.mapping.get(permalink, []) if splitext(x)[1].lower() in self.imgext] sm.add(rchop(url, 'index.html'), getmtime(fname), changefreq, priority, images) else: sm.add(rchop(url, 'index.html'), getmtime(fname), changefreq, priority) sm.finish() yield sm, path
def generate(self, request): entrylist = sorted((e for e in request['entrylist'] if not e.draft), key=lambda k: k.date, reverse=True) tt = self.env.engine.fromfile(self.template) path = joinurl(self.conf['output_dir'], self.path, 'index.html') hv = md5(*entrylist, attr=lambda o: o.md5) rv = memoize('articles-hash') if rv == hv: has_changed = False else: # save new value for next run memoize('articles-hash', hv) has_changed = True if exists(path) and not has_changed and not tt.has_changed: event.skip(path) raise StopIteration articles = {} for entry in entrylist: articles.setdefault((entry.year, entry.imonth), []).append(entry) route = self.path html = tt.render(conf=self.conf, articles=articles, env=union(self.env, num_entries=len(entrylist), route=route)) yield html, path
def generate(self, request): """Creates nicely paged listing of your posts. First page is the index.hml used to have this nice url: http://yourblog.com/ with a recent list of your (e.g. summarized) Posts. Other pages are enumerated to /page/n+1 """ ipp = self.items_per_page tt = self.env.jinja2.get_template('main.html') entrylist = [entry for entry in request['entrylist'] if not entry.draft] paginator = paginate(entrylist, ipp, orphans=self.conf['default_orphans']) for (next, curr, prev), entries, has_changed in paginator: # curr = current page, next = newer pages, prev = older pages if next is not None: next = self.path.rstrip('/') if next == 1 \ else expand(self.pagination, {'num': next}) curr = self.path if curr == 1 else expand(self.pagination, {'num': curr}) prev = None if prev is None else expand(self.pagination, {'num': prev}) path = joinurl(self.conf['output_dir'], curr, 'index.html') if exists(path) and not has_changed and not tt.has_changed: event.skip(path) continue html = tt.render(conf=self.conf, env=union(self.env, entrylist=entries, type='index', prev=prev, curr=curr, next=next, items_per_page=ipp, num_entries=len(entrylist))) yield html, path
def context(self, env, request): """"Here we prepare the detection pattern and active views. For each view we convert ``view.path`` to a regular expression pattern using simple replacements.""" patterns = dict() replacements = [(':year', '\d+'), (':month', '\d+'), (':day', '\d+'), (':[^/]+', '[^/]+')] for name, view in self.env.views.iteritems(): permalink = view.path for pat, repl in replacements: permalink = re.sub(pat, repl, permalink) if permalink.endswith('/'): permalink += 'index.html' patterns[name] = re.compile('^' + joinurl(re.escape(self.conf['www_root']), permalink) + '$') self.patterns = patterns self.views = [] # sort active views by frequency views = env.views.keys() for v in 'entry', 'tag', 'index': try: self.views.append(views.pop(views.index(v))) except ValueError: pass self.views.extend(views)
def context(self, env, request): """"Here we prepare the detection pattern and active views. For each view we convert ``view.path`` to a regular expression pattern using simple replacements.""" patterns = dict() replacements = [(':year', '\d+'), (':month', '\d+'), (':day', '\d+'), (':[^/]+', '[^/]+')] for name, view in self.env.views.iteritems(): permalink = view.path for pat, repl in replacements: permalink = re.sub(pat, repl, permalink) if permalink.endswith('/'): permalink += 'index.html' patterns[name] = re.compile( '^' + joinurl(re.escape(self.conf['www_root']), permalink) + '$') self.patterns = patterns self.views = [] # sort active views by frequency views = env.views.keys() for v in 'entry', 'tag', 'index': try: self.views.append(views.pop(views.index(v))) except ValueError: pass self.views.extend(views) return env
def tweet(entry, conf, dryrun=False): """Send a tweet with the title, link and tags from an entry. The first time you need to authorize Acrylamid but than it works without any interaction.""" key = "6k00FRe6w4SZfqEzzzyZVA" secret = "fzRfQcqQX4gcZziyLeoI5wSbnFb7GGj2oEh10hnjPUo" creds = os.path.expanduser('~/.twitter_oauth') if not os.path.exists(creds): twitter.oauth_dance("Acrylamid", key, secret, creds) oauth_token, oauth_token_secret = twitter.read_token_file(creds) t = twitter.Twitter(auth=twitter.OAuth(oauth_token, oauth_token_secret, key, secret)) tweet = u"New Blog Entry: {0} {1} {2}".format(entry.title, helpers.joinurl(conf['www_root'], entry.permalink), ' '.join([u'#' + helpers.safeslug(tag) for tag in entry.tags])) print(' ', bold(blue("tweet ")), end='') print('\n'.join(wrap(tweet.encode('utf8'), subsequent_indent=' '*13))) if not dryrun: try: t.statuses.update(status=tweet.encode('utf8')) except twitter.api.TwitterError as e: try: log.warn("%s" % json.loads(e.response_data)['error']) except (ValueError, TypeError): log.warn("Twitter: something went wrong...")
def generate(self, request): ipp = self.items_per_page tt = self.env.engine.fromfile(self.template) entrylist = [entry for entry in request['entrylist'] if not entry.draft] paginator = paginate(entrylist, ipp, orphans=self.conf['default_orphans']) route = self.path for (next, curr, prev), entries, has_changed in paginator: # curr = current page, next = newer pages, prev = older pages next = None if next is None \ else link(u'« Next', self.path.rstrip('/')) if next == 1 \ else link(u'« Next', expand(self.pagination, {'num': next})) curr = link(curr, self.path) if curr == 1 \ else link(expand(self.pagination, {'num': curr})) prev = None if prev is None \ else link(u'Previous »', expand(self.pagination, {'num': prev})) path = joinurl(self.conf['output_dir'], curr.href, 'index.html') if exists(path) and not has_changed and not tt.has_changed: event.skip(path) continue html = tt.render(conf=self.conf, env=union(self.env, entrylist=entries, type='index', prev=prev, curr=curr, next=next, items_per_page=ipp, num_entries=len(entrylist), route=route)) yield html, path
def generate(self, conf, env, data, **kwargs): if self.pagination is None: self.items_per_page = 2**32 self.pagination = self.path ipp = self.items_per_page tt = env.engine.fromfile(env, self.template) route = expand(self.path, kwargs) entrylist = data['entrylist'] paginator = paginate(entrylist, ipp, route, conf.default_orphans) for (next, curr, prev), entrylist, modified in paginator: next = None if next is None \ else link(u'Next', expand(self.path, kwargs)) if next == 1 \ else link(u'Next', expand(self.pagination, union({'num': next}, kwargs))) curr = link(curr, expand(self.path, kwargs)) if curr == 1 \ else link(curr, expand(self.pagination, union({'num': curr}, kwargs))) prev = None if prev is None \ else link(u'Previous', expand(self.pagination, union({'num': prev}, kwargs))) path = joinurl(conf['output_dir'], curr.href) if isfile(path) and not (modified or tt.modified or env.modified or conf.modified): event.skip(self.__class__.__name__.lower(), path) continue html = self.render(conf, env, union(locals(), kwargs)) yield html, path
def generate(self, request): entrylist = sorted((e for e in request['entrylist'] if not e.draft), key=lambda k: k.date, reverse=True) tt = self.env.tt.fromfile(self.template) path = joinurl(self.conf['output_dir'], self.path, 'index.html') hv = md5(*entrylist, attr=lambda o: o.md5) rv = memoize('articles-hash') if rv == hv: has_changed = False else: # save new value for next run memoize('articles-hash', hv) has_changed = True if exists(path) and not has_changed and not tt.has_changed: event.skip(path) raise StopIteration articles = {} for entry in entrylist: articles.setdefault((entry.year, entry.month), []).append(entry) html = tt.render(conf=self.conf, articles=articles, env=union(self.env, num_entries=len(entrylist))) yield html, path
def generate(self, conf, env, data): ipp = self.items_per_page tt = env.engine.fromfile(self.template) entrylist = data['entrylist'] paginator = paginate(entrylist, ipp, self.path, conf.default_orphans) route = self.path for (next, curr, prev), entries, modified in paginator: # curr = current page, next = newer pages, prev = older pages next = None if next is None \ else link(u'« Next', self.path.rstrip('/')) if next == 1 \ else link(u'« Next', expand(self.pagination, {'num': next})) curr = link(curr, self.path) if curr == 1 \ else link(expand(self.pagination, {'num': curr})) prev = None if prev is None \ else link(u'Previous »', expand(self.pagination, {'num': prev})) path = joinurl(conf['output_dir'], curr.href, 'index.html') if isfile(path) and not (modified or tt.modified or env.modified or conf.modified): event.skip(path) continue html = tt.render(conf=conf, env=union(env, entrylist=entries, type='index', prev=prev, curr=curr, next=next, items_per_page=ipp, num_entries=len(entrylist), route=route)) yield html, path
def tweet(entry, conf, dryrun=False): """Send a tweet with the title, link and tags from an entry. The first time you need to authorize Acrylamid but than it works without any interaction.""" key = "6k00FRe6w4SZfqEzzzyZVA" secret = "fzRfQcqQX4gcZziyLeoI5wSbnFb7GGj2oEh10hnjPUo" creds = os.path.expanduser('~/.twitter_oauth') if not os.path.exists(creds): twitter.oauth_dance("Acrylamid", key, secret, creds) oauth_token, oauth_token_secret = twitter.read_token_file(creds) t = twitter.Twitter( auth=twitter.OAuth(oauth_token, oauth_token_secret, key, secret)) tweet = u"New Blog Entry: {0} {1} {2}".format( entry.title, helpers.joinurl(conf['www_root'], entry.permalink), ' '.join([u'#' + helpers.safeslug(tag) for tag in entry.tags])) print(' ', bold(blue("tweet ")), end='') print('\n'.join(wrap(tweet.encode('utf8'), subsequent_indent=' ' * 13))) if not dryrun: try: t.statuses.update(status=tweet.encode('utf8')) except twitter.api.TwitterError as e: try: log.warn("%s" % json.loads(e.response_data)['error']) except (ValueError, TypeError): log.warn("Twitter: something went wrong...")
def generate(self, request): tt = self.env.engine.fromfile(self.template) pathes = set() nondrafts, drafts = [], [] for entry in request[self.type]: if not entry.draft: nondrafts.append(entry) else: drafts.append(entry) for isdraft, entrylist in enumerate([nondrafts, drafts]): has_changed = self.has_changed(entrylist, 'draft' if isdraft else 'entry') for i, entry in enumerate(entrylist): if entry.hasproperty('permalink'): path = joinurl(self.conf['output_dir'], entry.permalink) else: path = joinurl(self.conf['output_dir'], expand(self.path, entry)) if path.endswith('/'): path = joinurl(path, 'index.html') if isfile(path) and path in pathes: try: os.remove(path) finally: f = lambda e: e is not entry and e.permalink == entry.permalink raise AcrylamidException("title collision %r in %r with %r." % (entry.permalink, entry.filename, filter(f, entrylist)[0].filename)) pathes.add(path) next = self.next(entrylist, i) if not isdraft else None prev = self.prev(entrylist, i) if not isdraft else None if isfile(path) and not any([has_changed, entry.has_changed, tt.has_changed]): event.skip(path) continue route = expand(self.path, entry) html = tt.render(conf=self.conf, entry=entry, env=union(self.env, entrylist=[entry], type=self.__class__.__name__.lower(), prev=prev, next=next, route=route)) yield html, path
def generate(self, request): tt = self.env.tt.fromfile(self.template) entrylist = request['entrylist'] pathes = dict() for entry in entrylist: if entry.permalink != expand(self.path, entry): p = joinurl(self.conf['output_dir'], entry.permalink) else: p = joinurl(self.conf['output_dir'], expand(self.path, entry)) if p.endswith('/'): p = joinurl(p, 'index.html') if p in pathes: raise AcrylamidException("title collision %r in %r" % (entry.permalink, entry.filename)) pathes[p] = entry has_changed = False hv = md5(*entrylist, attr=lambda e: e.permalink) if memoize('entry-permalinks') != hv: memoize('entry-permalinks', hv) has_changed = True pathes = sorted(pathes.iteritems(), key=lambda k: k[1].date, reverse=True) for i, (path, entry) in enumerate(pathes): next = None if i == 0 else link(entrylist[i-1].title, entrylist[i-1].permalink.rstrip('/'), entrylist[i-1]) prev = None if i == len(pathes) - 1 else link(entrylist[i+1].title, entrylist[i+1].permalink.rstrip('/'), entrylist[i+1]) if exists(path) and not any([has_changed, entry.has_changed, tt.has_changed]): event.skip(path) continue html = tt.render(conf=self.conf, entry=entry, env=union(self.env, entrylist=[entry], type='entry', prev=prev, next=next)) yield html, path
def convert(self, link, replacements): for pat, repl in replacements: link = re.sub(pat, repl, link) if link.endswith('/'): link += 'index.html' return re.compile('^' + joinurl(re.escape(self.conf['www_root']), link) + '$')
def absolutify(part): if part.startswith('/'): return self.conf.www_root + part if part.find('://') == part.find('/') - 1: return part return self.conf.www_root + joinurl(entry.permalink, part)
def generate(self, request): """Creates paged listing by tag.""" ipp = self.items_per_page tt = self.env.engine.fromfile(self.template) for tag in self.tags: entrylist = [entry for entry in self.tags[tag]] paginator = paginate(entrylist, ipp, salt=tag, orphans=self.conf["default_orphans"]) route = expand(self.path, {"name": tag}).rstrip("/") for (next, curr, prev), entries, has_changed in paginator: # e.g.: curr = /page/3, next = /page/2, prev = /page/4 next = ( None if next is None else link(u"« Next", expand(self.path, {"name": tag}).rstrip("/")) if next == 1 else link(u"« Next", expand(self.pagination, {"name": tag, "num": next})) ) curr = ( link(curr, expand(self.path, {"name": tag})) if curr == 1 else link(expand(self.pagination, {"num": curr, "name": tag})) ) prev = ( None if prev is None else link(u"Previous »", expand(self.pagination, {"name": tag, "num": prev})) ) path = joinurl(self.conf["output_dir"], curr, "index.html") if exists(path) and not has_changed and not tt.has_changed: event.skip(path) continue html = tt.render( conf=self.conf, env=union( self.env, entrylist=entries, type="tag", prev=prev, curr=curr, next=next, tag=tag, items_per_page=ipp, num_entries=len(entrylist), route=route, ), ) yield html, path
def test_joinurl(self): examples = ((['hello', 'world'], 'hello/world'), (['/hello', 'world'], '/hello/world'), (['hello', '/world'], 'hello/world'), (['/hello', '/world'], '/hello/world'), (['/hello/', '/world/'], '/hello/world/')) for value, expected in examples: assert helpers.joinurl(*value) == expected
def generate(self, request): entrylist = filter(lambda e: not e.draft, request['entrylist']) entrylist = list(entrylist)[0:self.num_entries] tt = self.env.engine.fromfile('%s.xml' % self.type) path = joinurl(self.conf['output_dir'], self.path) if not path.endswith(('.xml', '.html')): path = joinurl(path, 'index.html') if exists(path) and not filter(lambda e: e.has_changed, entrylist): if not tt.has_changed: event.skip(path) raise StopIteration updated = entrylist[0].date if entrylist else datetime.utcnow() html = tt.render(conf=self.conf, env=union(self.env, route=self.path, updated=updated, entrylist=entrylist)) yield html, path
def generate(self, request): entrylist = filter(lambda e: not e.draft, request['entrylist']) entrylist = list(entrylist)[0:self.num_entries] tt = self.env.engine.fromfile('%s.xml' % self.type) path = joinurl(self.conf['output_dir'], self.path) if not path.endswith(('.xml', '.html')): path = joinurl(path, 'index.html') if exists(path) and not filter(lambda e: e.has_changed, entrylist): if not tt.has_changed: event.skip(path) raise StopIteration updated = entrylist[0].date if entrylist else datetime.now() html = tt.render(conf=self.conf, env=union(self.env, path=self.path, updated=updated, entrylist=entrylist)) yield html, path
def generate(self, request): tt = self.env.engine.fromfile(self.template) entrylist = request[self.type] pathes = set() has_changed = self.has_changed(entrylist) for i, entry in enumerate(entrylist): if entry.hasproperty('permalink'): path = joinurl(self.conf['output_dir'], entry.permalink) else: path = joinurl(self.conf['output_dir'], expand(self.path, entry)) if path.endswith('/'): path = joinurl(path, 'index.html') if isfile(path) and path in pathes: try: os.remove(path) finally: f = lambda e: e is not entry and e.permalink == entry.permalink raise AcrylamidException("title collision %r in %r with %r." % (entry.permalink, entry.filename, filter(f, entrylist)[0].filename)) next, prev = self.next(entrylist, i), self.prev(entrylist, i) # detect collisions pathes.add(path) if isfile(path) and not any([has_changed, entry.has_changed, tt.has_changed]): event.skip(path) continue route = expand(self.path, entry) html = tt.render(conf=self.conf, entry=entry, env=union(self.env, entrylist=[entry], type=self.__class__.__name__.lower(), prev=prev, next=next, route=route)) yield html, path
def generate(self, conf, env, data): tt = env.engine.fromfile(env, self.template) keyfunc = lambda k: () if '/:year' in self.path: keyfunc = lambda k: (k.year, ) if '/:month' in self.path: keyfunc = lambda k: (k.year, k.imonth) if '/:day' in self.path: keyfunc = lambda k: (k.year, k.imonth, k.iday) for next, curr, prev in neighborhood( groupby(data['entrylist'], keyfunc)): salt, group = '-'.join(str(i) for i in curr[0]), list(curr[1]) modified = memoize('archive-' + salt, hash(*group)) or any( e.modified for e in group) if prev: prev = link(u'/'.join('%02i' % i for i in prev[0]), expand(self.path, prev[1][0])) if next: next = link(u'/'.join('%02i' % i for i in next[0]), expand(self.path, next[1][0])) route = expand(self.path, group[0]) path = joinurl(conf['output_dir'], route) # an object storing year, zero-padded month and day as attributes (may be None) key = type( 'Archive', (object, ), dict( zip(('year', 'month', 'day'), map(lambda x: '%02i' % x if x else None, keyfunc(group[0])))))() if isfile(path) and not (modified or tt.modified or env.modified or conf.modified): event.skip('archive', path) continue html = tt.render(conf=conf, env=union(env, entrylist=group, type='archive', prev=prev, curr=link(route), next=next, num_entries=len(group), route=route, archive=key)) yield html, path
def generate(self, request): entrylist = filter(lambda e: not e.draft, request['entrylist'])[:self.num_entries] tt = self.env.jinja2.get_template('%s.xml' % self.__class__.__name__.lower()) path = joinurl(self.conf['output_dir'], self.path) if not filter(lambda e: path.endswith(e), ['.xml', '.html']): path = joinurl(path, 'index.html') if exists(path) and not filter(lambda e: e.has_changed, entrylist): if not tt.has_changed: event.skip(path) raise StopIteration updated=entrylist[0].date if entrylist else datetime.now() html = tt.render(conf=self.conf, env=union(self.env, updated=updated, entrylist=entrylist)) yield html, path
def joinurl(self): examples = ((['hello', 'world'], 'hello/world'), (['/hello', 'world'], '/hello/world'), (['hello', '/world'], 'hello/world'), (['/hello', '/world'], '/hello/world'), (['/hello/', '/world/'], '/hello/world/index.html'), (['/bar/', '/'], '/bar/index.html')) for value, expected in examples: assert helpers.joinurl(*value) == expected
def generate(self, request): tt = self.env.engine.fromfile(self.template) entrylist = request[self.type] pathes = set() has_changed = self.has_changed(entrylist) for i, entry in enumerate(entrylist): if entry.hasproperty('permalink'): path = joinurl(self.conf['output_dir'], entry.permalink) else: path = joinurl(self.conf['output_dir'], expand(self.path, entry)) if path.endswith('/'): path = joinurl(path, 'index.html') if isfile(path) and path in pathes: try: os.remove(path) finally: f = lambda e: e is not entry and e.permalink == entry.permalink raise AcrylamidException("title collision %r in %r with %r." % (entry.permalink, entry.filename, filter(f, entrylist)[0].filename)) next, prev = self.next(entrylist, i), self.prev(entrylist, i) # detect collisions pathes.add(path) if isfile(path) and not any([has_changed, entry.has_changed, tt.has_changed]): event.skip(path) continue html = tt.render(conf=self.conf, entry=entry, env=union(self.env, entrylist=[entry], type=self.__class__.__name__.lower(), prev=prev, next=next)) yield html, path
def test_joinurl(self): examples = ( (["hello", "world"], "hello/world"), (["/hello", "world"], "/hello/world"), (["hello", "/world"], "hello/world"), (["/hello", "/world"], "/hello/world"), (["/hello/", "/world/"], "/hello/world/"), ) for value, expected in examples: assert helpers.joinurl(*value) == expected
def generate(self, request): """Creates paged listing by tag.""" ipp = self.items_per_page tt = self.env.engine.fromfile(self.template) entrylist = [ entry for entry in request['entrylist'] if not entry.draft ] for tag in self.tags: entrylist = [entry for entry in self.tags[tag]] paginator = paginate(entrylist, ipp, salt=tag, orphans=self.conf['default_orphans']) route = expand(self.path, {'name': tag}).rstrip('/') for (next, curr, prev), entries, has_changed in paginator: # e.g.: curr = /page/3, next = /page/2, prev = /page/4 next = None if next is None \ else link(u'« Next', expand(self.path, {'name': tag}).rstrip('/')) if next == 1 \ else link(u'« Next', expand(self.pagination, {'name': tag, 'num': next})) curr = link(curr, expand(self.path, {'name': tag})) if curr == 1 \ else link(expand(self.pagination, {'num': curr, 'name': tag})) prev = None if prev is None \ else link(u'Previous »', expand(self.pagination, {'name': tag, 'num': prev})) path = joinurl(self.conf['output_dir'], curr, 'index.html') if exists(path) and not has_changed and not tt.has_changed: event.skip(path) continue html = tt.render(conf=self.conf, env=union(self.env, entrylist=entries, type='tag', prev=prev, curr=curr, next=next, tag=tag, items_per_page=ipp, num_entries=len(entrylist), route=route)) yield html, path
def run(conf, env, options): """Subcommand: ping -- notify external resources via Pingback etc.""" commands.initialize(conf, env) entrylist = [entry for entry in readers.load(conf)[0] if not entry.draft] if options.file: try: entrylist = [ filter(lambda e: e.filename == options.file, entrylist)[0] ] except IndexError: raise AcrylamidException("no such post!") if options.service == 'twitter': if twitter is None: raise AcrylamidException("'twitter' egg not found") for entry in entrylist if options.all else entrylist[:options. max or 1]: tweet(entry, conf, options.dryrun) return # XXX we should search for actual hrefs not random grepping, but this # requires access to the cache at non-runtime which is unfortunately # not possible yet. patterns = [ r'(?<=\n)\[.*?\]:\s?(https?://.+)$', # referenced markdown r'\[[^\]]+\]\((https?://[^\)]+)\)', # inline markdown r'(?<=\n)\.\.\s+[^:]+:\s+(https?://.+)$', # referenced docutils r'`[^<]+ <(https?://[^>]+)>`_', # inline docutils ] pool = Threadpool(options.jobs) ping = lambda src, dest: pingback(helpers.joinurl(conf['www_root'], src), dest, options.dryrun) for entry in entrylist if options.all else entrylist[:options.max or 1]: for href in sum( [re.findall(pat, entry.source, re.M) for pat in patterns], []): pool.add_task(ping, *[entry.permalink, href]) try: pool.wait_completion() except KeyboardInterrupt: sys.exit(1)
def generate(self, request): ipp = self.items_per_page tt = self.env.engine.fromfile(self.template) entrylist = [ entry for entry in request['entrylist'] if not entry.draft ] paginator = paginate(entrylist, ipp, orphans=self.conf['default_orphans']) route = self.path for (next, curr, prev), entries, has_changed in paginator: # curr = current page, next = newer pages, prev = older pages next = None if next is None \ else link(u'« Next', self.path.rstrip('/')) if next == 1 \ else link(u'« Next', expand(self.pagination, {'num': next})) curr = link(curr, self.path) if curr == 1 \ else link(expand(self.pagination, {'num': curr})) prev = None if prev is None \ else link(u'Previous »', expand(self.pagination, {'num': prev})) path = joinurl(self.conf['output_dir'], curr.href, 'index.html') if exists(path) and not has_changed and not tt.has_changed: event.skip(path) continue html = tt.render(conf=self.conf, env=union(self.env, entrylist=entries, type='index', prev=prev, curr=curr, next=next, items_per_page=ipp, num_entries=len(entrylist), route=route)) yield html, path
def generate(self, conf, env, data): entrylist = data['entrylist'] tt = env.engine.fromfile(env, self.template) path = joinurl(conf['output_dir'], self.path, 'index.html') if exists(path) and not (conf.modified or env.modified or tt.modified): event.skip('article', path) raise StopIteration articles = {} for entry in entrylist: articles.setdefault((entry.year, entry.imonth), []).append(entry) html = tt.render(conf=conf, articles=articles, env=union(env, num_entries=len(entrylist), route=self.path)) yield html, path
def w3c(paths, conf, warn=False, sleep=0.2): """Validate HTML by using the validator.w3.org API. :param paths: a list of HTML files we map to our actual domain :param conf: configuration :param warn: don't handle warnings as success when set :param sleep: sleep between requests (be nice to their API)""" for path in paths: url = path[len(conf['output_dir']) - 1:] resp = head("http://validator.w3.org/check?uri=" + \ helpers.joinurl(conf['www_root'], quote(url))) print(helpers.rchop(url, 'index.html'), end=' ') if resp.code != 200: print(red('not 200 Ok!')) continue headers = resp.info() if headers['x-w3c-validator-status'] == "Abort": print(red("Abort")) elif headers['x-w3c-validator-status'] == 'Valid': if int(headers['x-w3c-validator-warnings']) == 0: print(green('Ok')) else: if warn: print( yellow(headers['x-w3c-validator-warnings'] + ' warns')) else: print(green('Ok')) else: res = headers['x-w3c-validator-errors'] + ' errors, ' + \ headers['x-w3c-validator-warnings'] + ' warns' print(red(res)) time.sleep(sleep)
def generate(self, conf, env, data): pathes, entrylist = set(), data[self.type] unmodified = not env.modified and not conf.modified for i, entry in enumerate(entrylist): if entry.hasproperty('permalink'): path = joinurl(conf['output_dir'], entry.permalink) else: path = joinurl(conf['output_dir'], expand(self.path, entry)) if isfile(path) and path in pathes: try: os.remove(path) finally: other = [ e.filename for e in entrylist if e is not entry and e.permalink == entry.permalink ][0] log.error("title collision %s caused by %s and %s", entry.permalink, entry.filename, other) raise SystemExit pathes.add(path) next, prev = self.next(entrylist, i), self.prev(entrylist, i) # per-entry template tt = env.engine.fromfile(env, entry.props.get('layout', self.template)) if all([ isfile(path), unmodified, not tt.modified, not entry.modified, not modified(*references(entry)) ]): event.skip(self.name, path) else: html = tt.render(conf=conf, entry=entry, env=union( env, entrylist=[entry], type=self.__class__.__name__.lower(), prev=prev, next=next, route=expand(self.path, entry))) yield html, path # check if any resources need to be moved if entry.hasproperty('copy'): for res_src in entry.resources: res_dest = join(dirname(path), basename(res_src)) # Note, presence of res_src check in FileReader.getresources if isfile(res_dest ) and getmtime(res_dest) > getmtime(res_src): event.skip(self.name, res_dest) continue try: fp = io.open(res_src, 'rb') # use mkfile rather than yield so different ns can be specified (and filtered by sitemap) mkfile(fp, res_dest, ns='resource', force=env.options.force, dryrun=env.options.dryrun) except IOError as e: log.warn( "Failed to copy resource '%s' whilst processing '%s' (%s)" % (res_src, entry.filename, e.strerror))
def translate_path(self, path): path = SimpleHTTPRequestHandler.translate_path(self, path) return joinurl(u(os.getcwd()), self.www_root, path[len(u(os.getcwd())):])
def relatively(part): if part.startswith('/') or part.find('://') == part.find('/') - 1: return part return joinurl(entry.permalink, part)