def url(self, absolute=False): """URL to the channel, relative to /.""" channelpath = os.path.dirname(self.channel()['_metafile']) relpath = os.path.relpath(paths.build_dir('htdocs'), channelpath) if relpath == '.' and absolute: return 'http://%s/' % env['otto.site'] if absolute: return 'http://%s/%s/' % (env['otto.site'], relpath) return relpath+'/'
def build_blog(source_dir, dest_dir): """Build the blog""" require('otto.build_dir', 'otto.site') if not source_dir.endswith('/'): source_dir += '/' build_dir = paths.build_dir(dest_dir) logging.info("Processing " + source_dir) with lcd(paths.local_workspace()): local('mkdir -p %s' % build_dir) local('cp -a %s %s' % (source_dir, build_dir)) # Walk the dir looking for markdown files and convert to JSON for thisdir, subdirs, files in os.walk(build_dir): for mdfile in fnmatch.filter(files, '*.md'): # TODO Only write outfile if md file is newer mdfilename = os.path.join(thisdir, mdfile) entry = Entry.load_markdown(mdfilename) entry.save_json() # Now walk it again, converting each JSON file to HTML and Atom entries = [] for thisdir, subdirs, files in os.walk(build_dir, topdown=False): for entryfile in fnmatch.filter(files, '*.json'): if entryfile == 'channel.json': continue entrypath = os.path.join(thisdir, entryfile) entry = Entry.load_json(entrypath) entries.append(entry) entry.render_to('html') entry.render_to('atom') # Since this is a depth-first crawl, if we have reached the channel dir, # we have already processed all the entries in this channel. if os.path.exists( os.path.join(thisdir, 'channel.json')): channel = Channel.load_json(thisdir) # sort entries reverse chrono entries.sort(key=Entry.sort_key, reverse=True) channel['entries'] = entries channel.save_json() channel.render_to('html') channel.render_to('atom')