def once(args=[], **kwargs): """log only once even when a loop calls this function multiple times. :param args: args as in log.info(msg, *args). :param **kwargs: should be a valid logger with the message as argument. Example: log.once(info='Hello World, %s!', args=['Peter', ]).""" if len(kwargs) != 1: raise AcrylamidException('incorrect usage of log.once()') log, msg = kwargs.items()[0] if not log in ('critical', 'fatal', 'warn', 'warning', 'info', 'skip', 'debug'): raise AcrylamidException('no such logger: %s' % log) try: key = findCaller() except ValueError: key = None if key is None: # unable to determine call frame globals()[log](msg, *args) elif key not in STORE: globals()[log](msg, *args) STORE.append(key)
def fetch(url, auth=None): """Fetch URL, optional with HTTP Basic Authentication.""" if not (url.startswith('http://') or url.startswith('https://')): try: with io.open(url, 'r', encoding='utf-8', errors='replace') as fp: return u''.join(fp.readlines()) except OSError as e: raise AcrylamidException(e.args[0]) req = Request(url) if auth: req.add_header('Authorization', 'Basic ' + b64encode(auth)) try: r = urlopen(req) except HTTPError as e: raise AcrylamidException(e.msg) if r.getcode() == 401: user = input('Username: '******':' + passwd) elif r.getcode() == 200: try: enc = re.search('charset=(.+);?', r.headers.get('Content-Type', '')).group(1) except AttributeError: enc = 'utf-8' return u'' + r.read().decode(enc) raise AcrylamidException('invalid status code %i, aborting.' % r.getcode())
def reststyle(fileobj): """Parse metadata from reStructuredText document when the first two lines are valid reStructuredText headlines followed by metadata fields. -- http://docutils.sourceforge.net/docs/ref/rst/restructuredtext.html#field-lists""" import docutils from docutils.core import publish_doctree title = fileobj.readline().strip('\n') dash = fileobj.readline().strip('\n') if not title or not dash: raise AcrylamidException('No title given in %r' % fileobj.name) if len(dash) < len(title) or dash.count(dash[0]) < len(dash): raise AcrylamidException('title line does not match second line %r' % fileobj.name) i = 2 meta = [] while True: line = fileobj.readline() i += 1 if not line.strip() and i == 3: continue elif not line.strip(): break # blank line - done else: meta.append(line) document = publish_doctree(''.join(meta)) meta = dict(title=title) for docinfo in document.traverse(docutils.nodes.docinfo): for element in docinfo.children: if element.tagname == 'field': # custom fields name_elem, body_elem = element.children name = name_elem.astext() value = body_elem.astext() else: # standard fields (e.g. filters) name = element.tagname value = element.astext() name = name.lower() if '\n\n' in value: value = value.split('\n\n') # Y U NO DETECT UR LISTS? elif '\n' in value: value = value.replace('\n', ' ') # linebreaks in wrapped sentences meta[name] = distinguish( value.split('\n\n') if '\n\n' in value else value) return i, meta
def run(conf, env, options): """Subcommand: ping -- notify external resources via Pingback etc.""" commands.initialize(conf, env) entrylist = [entry for entry in readers.load(conf)[0] if not entry.draft] if options.file: try: entrylist = [ filter(lambda e: e.filename == options.file, entrylist)[0] ] except IndexError: raise AcrylamidException("no such post!") if options.service == 'twitter': if twitter is None: raise AcrylamidException("'twitter' egg not found") for entry in entrylist if options.all else entrylist[:options. max or 1]: tweet(entry, conf, options.dryrun) return # XXX we should search for actual hrefs not random grepping, but this # requires access to the cache at non-runtime which is unfortunately # not possible yet. patterns = [ r'(?<=\n)\[.*?\]:\s?(https?://.+)$', # referenced markdown r'\[[^\]]+\]\((https?://[^\)]+)\)', # inline markdown r'(?<=\n)\.\.\s+[^:]+:\s+(https?://.+)$', # referenced docutils r'`[^<]+ <(https?://[^>]+)>`_', # inline docutils ] pool = Threadpool(options.jobs) ping = lambda src, dest: pingback(helpers.joinurl(conf['www_root'], src), dest, options.dryrun) for entry in entrylist if options.all else entrylist[:options.max or 1]: for href in sum( [re.findall(pat, entry.source, re.M) for pat in patterns], []): pool.add_task(ping, *[entry.permalink, href]) try: pool.wait_completion() except KeyboardInterrupt: sys.exit(1)
def yamlstyle(fileobj): """Open and read content and return metadata and the position where the actual content begins. If ``pyyaml`` is available we use this parser but we provide a dumb fallback parser that can handle simple assigments in YAML. :param fileobj: fileobj, utf-8 encoded """ head = [] i = 0 while True: line = fileobj.readline() i += 1 if i == 1 and not line.startswith('---'): raise AcrylamidException("no meta information in %r found" % fileobj.name) elif i > 1 and not line.startswith('---'): head.append(line) elif i > 1 and line.startswith('---') or not line: break if yaml: try: return i, yaml.load(''.join(head)) except yaml.YAMLError as e: raise AcrylamidException('YAMLError: %s' % str(e)) else: props = {} for j, line in enumerate(head): if line[0] == '#' or not line.strip(): continue try: key, value = [x.strip() for x in line.split(':', 1)] except ValueError: raise AcrylamidException('%s:%i ValueError: %s\n%s' % ( fileobj.name, j, line.strip('\n'), ("Either your YAML is malformed or our naïve parser is to dumb \n" "to read it. Revalidate your YAML or install PyYAML parser with \n" "> easy_install -U pyyaml"))) props[key] = distinguish(value) if 'title' not in props: raise AcrylamidException('No title given in %r' % fileobj.name) return i, props
def system(cmd, stdin=None, **kwargs): """A simple front-end to python's horrible Popen-interface which lets you run a single shell command (only one, semicolon and && is not supported by os.execvp(). Does not catch OSError! :param cmd: command to run (a single string or a list of strings). :param stdin: optional string to pass to stdin. :param kwargs: is passed to :class:`subprocess.Popen`.""" try: if stdin: p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE, **kwargs) result, err = p.communicate(stdin.encode('utf-8')) else: p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, **kwargs) result, err = p.communicate() except OSError as e: raise OSError(e.strerror) retcode = p.poll() if err or retcode != 0: if not err.strip(): err = 'process exited with %i.' % retcode raise AcrylamidException(err.strip() if not PY2K else err.strip().decode('utf-8')) return result.strip().decode('utf-8')
def pingback(src, dest, dryrun=False): """Makes a pingback request to dest on behalf of src, i.e. effectively saying to dest that "the page at src is linking to you".""" def search_link(content): match = re.search(b'<link rel="pingback" href="([^"]+)" ?/?>', content) return match and match.group(1) try: r = head(dest) except (URLError, HTTPError) as e: return try: server_url = r.info().get('X-Pingback', '') or search_link( r.read(512 * 1024)) if server_url: print("Pingback", blue(urlparse(server_url).netloc), end='') print("from", green(''.join(urlparse(src)[1:3])) + ".") if not dryrun: server = xmlrpc.client.ServerProxy(server_url) server.pingback.ping(src, dest) except xmlrpc.client.ProtocolError as e: raise AcrylamidException(e.args[0])
def generate(item): entry = {} for k, v in iteritems({ 'title': 'title', 'date': 'pubDate', 'link': 'link', 'content': 'description' }): try: entry[k] = item.find(v).text if k != 'content' \ else unescape(item.find(v).text) except (AttributeError, TypeError): pass if any( filter(lambda k: k not in entry, ['title', 'date', 'link', 'content'])): raise AcrylamidException('invalid RSS 2.0 feed: provide at least title, ' \ + 'link, content and pubDate!') return { 'title': entry['title'], 'content': entry['content'], 'date': parse_date_time(entry['date']), 'link': entry['link'], 'tags': [cat.text for cat in item.findall('category')] }
def load(conf): """Load and parse textfiles from content directory and optionally filter by an ignore pattern. Filenames ending with a known binary extension such as audio, video or images are ignored. If not blacklisted open the file end check if it :func:`utils.istext`. This function is *not* exception-tolerant. If Acrylamid could not handle a file it will raise an exception. It returns a tuple containing the list of entries sorted by date reverse (newest comes first) and other pages (unsorted). :param conf: configuration with CONTENT_DIR and CONTENT_IGNORE set""" # list of Entry-objects reverse sorted by date. entrylist, pages = [], [] # collect and skip over malformed entries for path in filelist(conf['content_dir'], conf.get('content_ignore', [])): if path.endswith(('.txt', '.rst', '.md')) or istext(path): try: entry = Entry(path, conf) if entry.type == 'entry': entrylist.append(entry) else: pages.append(entry) except (ValueError, AcrylamidException) as e: raise AcrylamidException('%s: %s' % (path, e.args[0])) # sort by date, reverse return (sorted(entrylist, key=lambda k: k.date, reverse=True), pages)
def init(self, cache_dir=None): """Initialize cache object by creating the cache_dir if non-existent, read all available cache objects and restore memoized key/values. :param cache_dir: the directory where cache files are stored. :param mode: the file mode wanted for the cache files, default 0600 """ if cache_dir: self.cache_dir = cache_dir if not exists(self.cache_dir): try: os.mkdir(self.cache_dir, 0o700) except OSError: raise AcrylamidException("could not create directory '%s'" % self.cache_dir) # load memorized items try: with io.open(join(self.cache_dir, 'info'), 'rb') as fp: self.memoize.update(pickle.load(fp)) except (IOError, pickle.PickleError): self.emptyrun = True else: self.emptyrun = False
def date(self): """Parse date value and return :class:`datetime.datetime` object. You can set a ``DATE_FORMAT`` in your :doc:`conf.py` otherwise Acrylamid tries several format strings and throws an exception if no pattern works.""" # alternate formats from pelican.utils, thank you! # https://github.com/ametaireau/pelican/blob/master/pelican/utils.py formats = ['%Y-%m-%d %H:%M', '%Y/%m/%d %H:%M', '%Y-%m-%d', '%Y/%m/%d', '%d-%m-%Y', '%Y-%d-%m', # Weird ones '%d/%m/%Y', '%d.%m.%Y', '%d.%m.%Y %H:%M', '%Y-%m-%d %H:%M:%S'] if 'date' not in self.props: if self.type == 'entry': log.warn("using mtime from %r" % self.filename) return super(MetadataMixin, self).date # Date.fromtimestamp(self.mtime) string = re.sub(' +', ' ', self.props['date']) formats.insert(0, self.props['date_format']) for date_format in formats: try: return Date.strptime(string, date_format).replace(tzinfo=self.tzinfo) except ValueError: pass else: raise AcrylamidException("%r is not a valid date" % string)
def parse(content): for method in (atom, rss, wordpress): try: return method(content) except InputError: pass else: raise AcrylamidException('unable to parse source')
def transform(self, text, entry, *args): try: system(['which', 'pandoc']) except AcrylamidException: if self.ignore: return text raise AcrylamidException('Pandoc: pandoc not available') if len(args) == 0: raise AcrylamidException("pandoc filter takes one or more arguments") fmt, extras = args[0], args[1:] cmd = ['pandoc', '-f', fmt, '-t', 'HTML'] cmd.extend(['--'+x for x in extras]) try: return system(cmd, stdin=text) except OSError as e: raise AcrylamidException(e.msg)
def parse(content): failed = [] for method in (wp, rss20, atom): try: res = method(content) return next(res), res except ImportError: log.info('notice BeautifulSoup is required for WordPress import') except InvalidSource as e: failed.append(e.args[0]) else: raise AcrylamidException('unable to parse source')
def create(defaults, item): global USED_WORDPRESS fd, tmp = tempfile.mkstemp(suffix='.txt') with io.open(fd, 'w', encoding='utf-8') as f: f.write(u'---\n') f.write(u'title: %s\n' % safe(item['title'])) if item.get('author') != defaults.get('author'): f.write(u'author: %s\n' % (item.get('author') or defaults.get('author'))) f.write(u'date: %s\n' % item['date'].strftime(conf['date_format'])) #f.write(u'filter: %s\n' % item['filter']) if 'draft' in item: f.write(u'draft: %s\n' % item['draft']) if 'tags' in item: f.write(u'tags: [%s]\n' % ', '.join(item['tags'])) if item.get('description'): f.write(u'description: %s\n' % item['description']) if 'permalink' in item: f.write(u'permalink: %s\n' % item['permalink']) if item.get('type', 'entry') != 'entry': f.write(u'type: %s\n' % item['type']) for arg in options.args: f.write(arg.strip() + u'\n') f.write(u'---\n\n') # this are fixes for WordPress because they don't save HTML but a # stupid mixed-in form of HTML making it very difficult to get either HTML # or reStructuredText/Markdown if USED_WORDPRESS and item['filter'] == 'markdown': item['content'] = item['content'].replace("\n ", " \n") elif USED_WORDPRESS and item['filter'] == 'rst': item['content'] = item['content'].replace('\n ', '\n\n') f.write(item['content'] + u'\n') entry = Entry(tmp, conf) p = join(conf['content_dir'], dirname(entry.permalink)[1:]) try: os.makedirs(p.rsplit('/', 1)[0]) except OSError: pass filepath = p + '.txt' if isfile(filepath) and not options.force: raise AcrylamidException('Entry already exists %r' % filepath) shutil.move(tmp, filepath) event.create('import', filepath)
def initialize(self, func): if not self.initialized: try: self.init(self.conf, self.env) self.initialized = True except ImportError as e: if self.env.options.ignore: log.warn(e.args[0]) setattr(cls, 'transform', lambda cls, x, y, *z: x) self.initialized = True return lambda cls, x, y, *z: x traceback.print_exc(file=sys.stdout) raise AcrylamidException('ImportError: %s' % e.args[0]) return func
def atom(xml): def parse_date_time(stamp): ts = parsedate_tz(stamp) ts = mktime_tz(ts) return datetime.fromtimestamp(ts) try: tree = ElementTree.fromstring(xml.encode('utf-8')) except ElementTree.ParseError: raise InvalidSource('no well-formed XML') if not tree.tag.endswith('/2005/Atom}feed'): raise InvalidSource('no Atom feed') # --- site settings --- # ns = '{http://www.w3.org/2005/Atom}' # etree Y U have stupid namespace handling? defaults = {} defaults['title'] = tree.find(ns + 'title').text defaults['www_root'] = tree.find(ns + 'id').text defaults['author'] = tree.find(ns + 'author').find(ns + 'name').text yield defaults # --- individual posts --- # for item in tree.findall(ns + 'entry'): entry = {} try: entry['title'] = item.find(ns + 'title').text entry['date'] = item.find(ns + 'updated').text entry['link'] = item.find(ns + 'link').text entry['content'] = item.find(ns + 'content').text except (AttributeError, TypeError): pass if item.find(ns + 'content').get('type', 'text') == 'html': entry['content'] = unescape(entry['content']) if filter(lambda k: not k in entry, ['title', 'date', 'link', 'content']): raise AcrylamidException('invalid Atom feed: provide at least title, ' + 'link, content and updated!') yield {'title': entry['title'], 'content': entry['content'], 'date': datetime.strptime(entry['date'], "%Y-%m-%dT%H:%M:%SZ"), 'link': entry['link']}
def rss20(xml): def parse_date_time(stamp): ts = parsedate_tz(stamp) ts = mktime_tz(ts) return datetime.fromtimestamp(ts) try: tree = ElementTree.fromstring(xml.encode('utf-8')) except ElementTree.ParseError: raise InvalidSource('no well-formed XML') if tree.tag != 'rss' or tree.attrib.get('version') != '2.0': raise InvalidSource('no RSS 2.0 feed') # --- site settings --- # defaults = {'author': None} channel = tree.getchildren()[0] for k, v in {'title': 'sitename', 'link': 'www_root', 'language': 'lang', 'author': 'author'}.iteritems(): try: defaults[v] = channel.find(k).text except AttributeError: pass yield defaults # --- individual posts --- # for item in channel.findall('item'): entry = {} for k, v in {'title': 'title', 'date': 'pubDate', 'link': 'link', 'content': 'description'}.iteritems(): try: entry[k] = item.find(v).text if k != 'content' \ else unescape(item.find(v).text) except (AttributeError, TypeError): pass if filter(lambda k: not k in entry, ['title', 'date', 'link', 'content']): raise AcrylamidException('invalid RSS 2.0 feed: provide at least title, ' \ + 'link, content and pubDate!') yield {'title': entry['title'], 'content': entry['content'], 'date': parse_date_time(entry['date']), 'link': entry['link']}
def transform(self, text, entry, *filters): val = [] for f in filters: if f in self: val.append(f) else: x = f.split('(', 1)[:1][0] if x in self: val.append(x) self.extensions[x] = f elif not self.ignore: raise AcrylamidException('Markdown: %s' % '\n'.join(self.failed)) return markdown.Markdown(extensions=[self.extensions[m] for m in val], output_format='xhtml5').convert(text)
def render(self, conf, env, kwargs): dikt = env.__class__() dikt.update(env) dikt['type'] = self.__class__.__name__.lower() dikt['num_entries'] = len(env.globals.entrylist) for key in set(self.export + ['route']): try: dikt[key] = kwargs[key] except KeyError: try: dikt[key] = getattr(self, key) except AttributeError: raise AcrylamidException("missing key %r" % key) return kwargs['tt'].render(conf=conf, env=dikt)
def markdownstyle(fileobj): """Parse Markdown Metadata without converting the source code. Mostly copy&paste from the 'meta' extension but slighty modified to fit to Acrylamid: we try to parse a value into a python value (via :func:`distinguish`).""" # -- from markdown.extensions.meta meta_re = re.compile(r'^[ ]{0,3}(?P<key>[A-Za-z0-9._-]+):\s*(?P<value>.*)') meta_more_re = re.compile(r'^[ ]{4,}(?P<value>.*)') i = 0 meta, key = {}, None while True: line = fileobj.readline() i += 1 if line.strip() == '': break # blank line - done m1 = meta_re.match(line) if m1: key = m1.group('key').lower().strip() value = distinguish(m1.group('value').strip()) try: meta[key].append(value) except KeyError: meta[key] = [value] else: m2 = meta_more_re.match(line) if m2 and key: # Add another line to existing key meta[key].append(m2.group('value').strip()) else: break # no meta data - done if not meta: raise AcrylamidException("no meta information in %r found" % fileobj.name) for key, values in meta.iteritems(): if key not in ('tag', 'tags') and len(values) == 1: meta[key] = values[0] return i, meta
def create(defaults, title, date, author, content, fmt, permalink=None, tags=None): global USED_WORDPRESS fd, tmp = tempfile.mkstemp(suffix='.txt') title = safe(title) with io.open(fd, 'w') as f: f.write(u'---\n') f.write(u'title: %s\n' % title) if author != defaults.get('author', None): f.write(u'author: %s\n' % author) f.write(u'date: %s\n' % date.strftime(conf['date_format'])) f.write(u'filter: [%s, ]\n' % fmt) if tags: f.write(u'tags: [%s]\n' % ', '.join(tags)) if permalink: f.write(u'permalink: %s\n' % permalink) for arg in options.args: f.write(arg.strip() + u'\n') f.write(u'---\n\n') # this are fixes for WordPress because they don't save HTML but a # stupid mixed-in form of HTML making it very difficult to get either HTML # or reStructuredText/Markdown if USED_WORDPRESS and fmt == 'markdown': content = content.replace("\n ", " \n") elif USED_WORDPRESS and fmt == 'rst': content = content.replace('\n ', '\n\n') f.write(content+u'\n') entry = Entry(tmp, conf) p = join(conf['content_dir'], dirname(entry.permalink)[1:]) try: os.makedirs(p.rsplit('/', 1)[0]) except OSError: pass filepath = p + '.txt' if isfile(filepath) and not options.force: raise AcrylamidException('Entry already exists %r' % filepath) shutil.move(tmp, filepath) event.create(filepath)
def run(conf, env, options): """Subcommand: deploy -- run the shell command specified in DEPLOYMENT[task] using Popen. Each string value from :doc:`conf.py` is added to the execution environment. Every argument after ``acrylamid deploy task ARG1 ARG2`` is appended to cmd.""" if options.task is None: for task in conf.get('deployment', {}).keys(): print >> sys.stdout, task sys.exit(0) task, args = options.task, options.args cmd = conf.get('deployment', {}).get(task, None) if not cmd: raise AcrylamidException('no tasks named %r in conf.py' % task) # apply ARG1 ARG2 ... and -v --long-args to the command, e.g.: # $> acrylamid deploy task arg1 -b --foo cmd += ' ' + ' '.join(args) if '%s' in cmd: log.warn("'%s' syntax is deprecated, use $OUTPUT_DIR variable.") cmd = cmd.replace('%s', '$OUTPUT_DIR') env = os.environ env.update( dict([(k.upper(), v) for k, v in conf.items() if isinstance(v, basestring)])) log.info('execute %s', cmd) p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) while True: output = p.stdout.read(1) if output == '' and p.poll() != None: break if output != '': sys.stdout.write(output) sys.stdout.flush()
def generate(self, request): tt = self.env.engine.fromfile(self.template) entrylist = request[self.type] pathes = set() has_changed = self.has_changed(entrylist) for i, entry in enumerate(entrylist): if entry.hasproperty('permalink'): path = joinurl(self.conf['output_dir'], entry.permalink) else: path = joinurl(self.conf['output_dir'], expand(self.path, entry)) if path.endswith('/'): path = joinurl(path, 'index.html') if isfile(path) and path in pathes: try: os.remove(path) finally: f = lambda e: e is not entry and e.permalink == entry.permalink raise AcrylamidException("title collision %r in %r with %r." % (entry.permalink, entry.filename, filter(f, entrylist)[0].filename)) next, prev = self.next(entrylist, i), self.prev(entrylist, i) # detect collisions pathes.add(path) if isfile(path) and not any([has_changed, entry.has_changed, tt.has_changed]): event.skip(path) continue route = expand(self.path, entry) html = tt.render(conf=self.conf, entry=entry, env=union(self.env, entrylist=[entry], type=self.__class__.__name__.lower(), prev=prev, next=next, route=route)) yield html, path
def generate(item): entry = {} try: entry['title'] = item.find(ns + 'title').text entry['date'] = item.find(ns + 'updated').text entry['link'] = item.find(ns + 'link').text entry['content'] = item.find(ns + 'content').text except (AttributeError, TypeError): raise AcrylamidException('invalid Atom feed: provide at least title, ' + 'link, content and updated!') if item.find(ns + 'content').get('type', 'text') == 'html': entry['content'] = unescape(entry['content']) return {'title': entry['title'], 'content': entry['content'], 'date': datetime.strptime(entry['date'], "%Y-%m-%dT%H:%M:%SZ"), 'link': entry['link'], 'tags': [x.get('term') for x in item.findall(ns + 'category')]}
def init(self, cache_dir=None, mode=0600): """Initialize cache object by creating the cache_dir if non-existent, read all available cache objects and restore memoized key/values. :param cache_dir: the directory where cache files are stored. :param mode: the file mode wanted for the cache files, default 0600 """ if cache_dir: self.cache_dir = cache_dir if mode: self.mode = mode if not exists(self.cache_dir): try: os.mkdir(self.cache_dir, 0700) except OSError: raise AcrylamidException("could not create directory '%s'" % self.cache_dir) # get all cache objects for path in self._list_dir(): try: with io.open(path, 'rb') as fp: self.objects[path] = set(pickle.load(fp).keys()) except pickle.PickleError: os.remove(path) except (AttributeError, EOFError): # this may happen after a refactor log.info('notice invalid cache objects') for obj in self._list_dir(): cache.remove(obj) break except IOError: continue # load memorized items try: with io.open(join(cache.cache_dir, 'info'), 'rb') as fp: cache.memoize.update(pickle.load(fp)) except (IOError, pickle.PickleError): pass
def run(conf, env, options): """Subcommand: deploy -- run the shell command specified in DEPLOYMENT[task] using Popen. Each string value from :doc:`conf.py` is added to the execution environment. Every argument after ``acrylamid deploy task ARG1 ARG2`` is appended to cmd.""" if options.list: for task in iterkeys(conf.get('deployment', {})): print(task) sys.exit(0) task, args = options.task or 'default', options.args cmd = conf.get('deployment', {}).get(task, None) if not cmd: raise AcrylamidException('no tasks named %r in conf.py' % task) # apply ARG1 ARG2 ... and -v --long-args to the command, e.g.: # $> acrylamid deploy task arg1 -b --foo cmd += ' ' + ' '.join(args) enc = sys.getfilesystemencoding() env = os.environ env.update( dict([(k.upper(), v.encode(enc, 'replace') if PY2K else v) for k, v in iteritems(conf) if isinstance(v, string_types)])) log.info('execute %s', cmd) p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) while True: output = p.stdout.read(1) if output == b'' and p.poll() != None: break if output != b'': sys.stdout.write(output.decode(enc)) sys.stdout.flush()
def pandocstyle(fileobj): """A function to parse the so called 'Title block' out of Pandoc-formatted documents. Provides very simple parsing so that Acrylamid won't choke on plain Pandoc documents. See http://johnmacfarlane.net/pandoc/README.html#title-block Currently not implemented: - Formatting within title blocks - Man-page writer title block extensions """ meta_pan_re = re.compile(r'^[ ]{0,3}%+\s*(?P<value>.*)') meta_pan_more_re = re.compile(r'^\s*(?P<value>.*)') meta_pan_authsplit = re.compile(r';+\s*') i, j = 0, 0 meta, key = {}, None poss_keys = ['title', 'author', 'date'] while True: line = fileobj.readline() i += 1 if line.strip() == '': break # blank line - done if j + 1 > len(poss_keys): raise AcrylamidException( "%r has too many items in the Pandoc title block." % fileobj.name) m1 = meta_pan_re.match(line) if m1: key = poss_keys[j] j += 1 valstrip = m1.group('value').strip() if not valstrip: continue value = distinguish(m1.group('value').strip()) if key == 'author': value = value.strip(';') value = meta_pan_authsplit.split(value) meta.setdefault(key, []).append(value) else: m2 = meta_pan_more_re.match(line) if m2 and key: # Add another line to existing key value = m2.group('value').strip() if key == 'author': value = value.strip(';') value = meta_pan_authsplit.split(value) meta[key].append(value) else: break # no meta data - done if 'title' not in meta: raise AcrylamidException('No title given in %r' % fileobj.name) if len(meta['title']) > 1: meta['title'] = ' '.join(meta['title']) if 'author' in meta: meta['author'] = sum(meta['author'], []) else: log.warn('%s does not have an Author in the Pandoc title block.' % fileobj.name) for key, values in iteritems(meta): if len(values) == 1: meta[key] = values[0] return i, meta
def run(conf, env, options): """Subcommand: new -- create a new blog entry the easy way. Either run ``acrylamid new My fresh new Entry`` or interactively via ``acrylamid new`` and the file will be created using the preferred permalink format.""" # we need the actual default values commands.initialize(conf, env) # config content_extension originally defined as string, not a list extlist = conf.get('content_extension', ['.txt']) if isinstance(extlist, string_types): ext = extlist else: ext = extlist[0] fd, tmp = tempfile.mkstemp(suffix=ext, dir='.cache/') editor = os.getenv('VISUAL') if os.getenv('VISUAL') else os.getenv( 'EDITOR') tt = formats.get(ext, yaml) if options.title: title = u(' '.join(options.title)) else: title = u(input("Entry's title: ")) with io.open(fd, 'w', encoding='utf-8') as f: f.write(tt(title, datetime.now().strftime(conf['date_format']))) entry = readers.Entry(tmp, conf) p = join(conf['content_dir'], splitext(entry.permalink.strip('/'))[0]) try: os.makedirs(p.rsplit('/', 1)[0]) except OSError: pass filepath = p + ext if isfile(filepath): raise AcrylamidException('Entry already exists %r' % filepath) shutil.move(tmp, filepath) event.create('new', filepath) if datetime.now().hour == 23 and datetime.now().minute > 45: log.info( "notice don't forget to update entry.date-day after mignight!") if log.level() >= log.WARN: return try: if editor: retcode = subprocess.call(shlex.split(editor) + [filepath]) elif sys.platform == 'darwin': retcode = subprocess.call(['open', filepath]) else: retcode = subprocess.call(['xdg-open', filepath]) except OSError: raise AcrylamidException('Could not launch an editor') # XXX process detaches... m( if retcode < 0: raise AcrylamidException('Child was terminated by signal %i' % -retcode) if os.stat(filepath)[6] == 0: raise AcrylamidException('File is empty!')
def new(conf, env, title, prompt=True): """Subcommand: new -- create a new blog entry the easy way. Either run ``acrylamid new My fresh new Entry`` or interactively via ``acrylamid new`` and the file will be created using the preferred permalink format.""" # we need the actual defaults values initialize(conf, env) fd, tmp = tempfile.mkstemp(suffix='.txt', dir='.cache/') editor = os.getenv('VISUAL') if os.getenv('VISUAL') else os.getenv( 'EDITOR') if not title: title = raw_input("Entry's title: ") title = safe(title) with io.open(fd, 'w') as f: f.write(u'---\n') f.write(u'title: %s\n' % title) f.write(u'date: %s\n' % datetime.now().strftime(conf['date_format'])) f.write(u'---\n\n') entry = readers.Entry(tmp, conf) p = join(conf['content_dir'], dirname(entry.permalink)[1:]) try: os.makedirs(p.rsplit('/', 1)[0]) except OSError: pass filepath = p + '.txt' if isfile(filepath): raise AcrylamidException('Entry already exists %r' % filepath) shutil.move(tmp, filepath) event.create(filepath) if datetime.now().hour == 23 and datetime.now().minute > 45: log.info( "notice consider editing entry.date-day after you passed mignight!" ) if not prompt: return try: if editor: retcode = subprocess.call([editor, filepath]) elif sys.platform == 'darwin': retcode = subprocess.call(['open', filepath]) else: retcode = subprocess.call(['xdg-open', filepath]) except OSError: raise AcrylamidException('Could not launch an editor') # XXX process detaches... m( if retcode < 0: raise AcrylamidException('Child was terminated by signal %i' % -retcode) if os.stat(filepath)[6] == 0: raise AcrylamidException('File is empty!')