def gen_linear_archive(self): config = self._request.get_configuration() data = self._request.get_data() root = config["datadir"] baseurl = config.get("base_url", "") archives = {} archive_list = tools.walk(self._request, root) items = [] for mem in archive_list: timetuple = tools.filestat(self._request, mem) y = time.strftime("%Y", timetuple) m = time.strftime("%m", timetuple) d = time.strftime("%d", timetuple) l = "<a href=\"%s/%s/\">%s</a><br>" % (baseurl, y, y) if not archives.has_key(y): archives[y] = l items.append(["%s-%s" % (y, m), "%s-%s-%s" % (y, m, d), time.mktime(timetuple), mem]) arc_keys = archives.keys() arc_keys.sort() arc_keys.reverse() result = [] for key in arc_keys: result.append(archives[key]) self._archives = '\n'.join(result) self._items = items
def gen_linear_archive(self): config = self._request.get_configuration() data = self._request.get_data() root = config["datadir"] archives = {} archive_list = tools.walk(self._request, root) fulldict = {} fulldict.update(config) fulldict.update(data) template = config.get( 'archive_template', '<a href="%(base_url)s/%(Y)s/%(b)s">%(Y)s-%(b)s</a><br />') for mem in archive_list: timetuple = tools.filestat(self._request, mem) timedict = {} for x in ["B", "b", "m", "Y", "y"]: timedict[x] = time.strftime("%" + x, timetuple) fulldict.update(timedict) if not (timedict['Y'] + timedict['m']) in archives: archives[timedict['Y'] + timedict['m']] = (template % fulldict) arc_keys = archives.keys() arc_keys.sort() arc_keys.reverse() result = [] for key in arc_keys: result.append(archives[key]) self._archives = '\n'.join(result)
def gen_linear_archive(self): config = self._request.get_configuration() data = self._request.get_data() root = config["datadir"] baseurl = config.get("base_url", "") archives = {} archive_list = tools.walk(self._request, root) items = [] for mem in archive_list: timetuple = tools.filestat(self._request, mem) y = time.strftime("%Y", timetuple) m = time.strftime("%m", timetuple) d = time.strftime("%d", timetuple) l = "<a href=\"%s/%s/\">%s</a><br>" % (baseurl, y, y) if not archives.has_key(y): archives[y] = l items.append([ "%s-%s" % (y, m), "%s-%s-%s" % (y, m, d), time.mktime(timetuple), mem ]) arc_keys = archives.keys() arc_keys.sort() arc_keys.reverse() result = [] for key in arc_keys: result.append(archives[key]) self._archives = '\n'.join(result) self._items = items
def __init__(self, request, filename, root, datadir=""): """ :param request: the Request object :param filename: the complete filename for the file in question including path :param root: i have no clue what this is :param datadir: the datadir """ base.EntryBase.__init__(self, request) self._config = request.get_configuration() self._filename = filename.replace(os.sep, '/') self._root = root.replace(os.sep, '/') self._datadir = datadir or self._config["datadir"] if self._datadir.endswith(os.sep): self._datadir = self._datadir[:-1] self._timetuple = tools.filestat(self._request, self._filename) self._mtime = time.mktime(self._timetuple) self._fulltime = time.strftime("%Y%m%d%H%M%S", self._timetuple) self._populated_data = 0
def gen_linear_archive(self): config = self._request.get_configuration() data = self._request.get_data() root = config["datadir"] archives = {} archive_list = tools.walk(self._request, root) fulldict = {} fulldict.update(config) fulldict.update(data) template = config.get('archive_template', '<a href="%(base_url)s/%(Y)s/%(b)s">%(Y)s-%(b)s</a><br />') for mem in archive_list: timetuple = tools.filestat(self._request, mem) timedict = {} for x in ["B", "b", "m", "Y", "y"]: timedict[x] = time.strftime("%" + x, timetuple) fulldict.update(timedict) if not (timedict['Y'] + timedict['m']) in archives: archives[timedict['Y'] + timedict['m']] = (template % fulldict) arc_keys = archives.keys() arc_keys.sort() arc_keys.reverse() result = [] for key in arc_keys: result.append(archives[key]) self._archives = '\n'.join(result)
def __init__(self, request, filename, root, datadir=""): """ @param request: the Request object @type request: Request @param filename: the complete filename for the file in question including path @type filename: string @param root: i have no clue what this is @type root: string @param datadir: the datadir @type datadir: string """ base.EntryBase.__init__(self, request) self._config = request.getConfiguration() self._filename = filename.replace(os.sep, '/') self._root = root.replace(os.sep, '/') self._datadir = datadir or self._config["datadir"] if self._datadir.endswith(os.sep): self._datadir = self._datadir[:-1] self._timetuple = tools.filestat(self._request, self._filename) self._mtime = time.mktime(self._timetuple) self._fulltime = time.strftime("%Y%m%d%H%M%S", self._timetuple) self._populated_data = 0
def cb_prepare(args): request = args['request'] data = request.getData() if data['bl_type'] != 'file': return entry = data['entry_list'] flavour = data['flavour'] filename = os.path.normpath( entry[0]['filename']) # normpath is for windows. config = request.getConfiguration() datadir = config['datadir'] base_url = config['base_url'] extension = config.get('entry_extension', 'txt') r = re.compile('(.*\.' + extension + '$)') allentries = tools.Walk(request, datadir, pattern=r) # cur_time = time.localtime() entrylist = [] for e in allentries: timetuple = tools.filestat(request, e) # if cur_time < timetuple: # continue entrylist.append((timetuple, e)) entrylist.sort() entrylist.reverse() entrylist = [x[1] for x in entrylist] try: num = entrylist.index(filename) except ValueError: data['entry_navi'] = '| <a href="%s">MAIN</a> |' % base_url return def _entry_url(file_entry): return "%s/%s.%s" % (base_url, file_entry["file_path"], flavour) navi_str = '' if num != (len(entrylist) - 1): fname = entrylist[num + 1] e = entries.fileentry.FileEntry(request, fname, datadir) prev_link = _entry_url(e) navi_str += '< <a href="%s">%s</a> ' % (prev_link, e['title']) navi_str += '| <a href="%s">MAIN</a> |' % base_url if num: fname = entrylist[num - 1] e = entries.fileentry.FileEntry(request, fname, datadir) next_link = _entry_url(e) navi_str += ' <a href="%s">%s</a> >' % (next_link, e['title']) data['entry_navi'] = navi_str
def cb_prepare(args): request = args['request'] data = request.getData() if data['bl_type'] != 'file': return entry = data['entry_list'] flavour = data['flavour'] filename = os.path.normpath(entry[0]['filename']) # normpath is for windows. config = request.getConfiguration() datadir = config['datadir'] base_url = config['base_url'] extension = config.get('entry_extension','txt') r = re.compile('(.*\.'+extension+'$)') allentries = tools.Walk(request, datadir, pattern=r) # cur_time = time.localtime() entrylist = [] for e in allentries: timetuple = tools.filestat(request, e) # if cur_time < timetuple: # continue entrylist.append((timetuple, e)) entrylist.sort() entrylist.reverse() entrylist = [x[1] for x in entrylist] try: num = entrylist.index(filename) except ValueError: data['entry_navi'] = '| <a href="%s">MAIN</a> |'%base_url return def _entry_url(file_entry): return "%s/%s.%s" % (base_url, file_entry["file_path"], flavour) navi_str = '' if num != (len(entrylist)-1): fname = entrylist[num+1] e = entries.fileentry.FileEntry(request, fname, datadir) prev_link = _entry_url(e) navi_str += '< <a href="%s">%s</a> '%(prev_link,e['title']) navi_str += '| <a href="%s">MAIN</a> |'%base_url if num: fname = entrylist[num-1] e = entries.fileentry.FileEntry(request, fname, datadir) next_link = _entry_url(e) navi_str += ' <a href="%s">%s</a> >'%(next_link,e['title']) data['entry_navi'] = navi_str
def gen_linear_archive(self): config = self._request.get_configuration() data = self._request.get_data() root = config["datadir"] baseurl = config.get("base_url", "") archives = {} archive_list = tools.walk(self._request, root) items = [] fulldict = {} fulldict.update(config) fulldict.update(data) flavour = data.get( "flavour", config.get("default_flavour", "html")) template = config.get( 'archive_template', '<a href="%(base_url)s/%(Y)s/index.%(f)s">%(Y)s</a><br />') for mem in archive_list: timetuple = tools.filestat(self._request, mem) timedict = {} for x in ["m", "Y", "y", "d"]: timedict[x] = time.strftime("%" + x, timetuple) fulldict.update(timedict) fulldict["f"] = flavour year = fulldict["Y"] if not year in archives: archives[year] = template % fulldict items.append( ["%(Y)s-%(m)s" % fulldict, "%(Y)s-%(m)s-%(d)s" % fulldict, time.mktime(timetuple), mem]) arc_keys = archives.keys() arc_keys.sort() arc_keys.reverse() result = [] for key in arc_keys: result.append(archives[key]) self._archives = '\n'.join(result) self._items = items
def gen_linear_archive(self): config = self._request.get_configuration() data = self._request.get_data() root = config["datadir"] baseurl = config.get("base_url", "") archives = {} archive_list = tools.walk(self._request, root) items = [] fulldict = {} fulldict.update(config) fulldict.update(data) flavour = data.get("flavour", config.get("default_flavour", "html")) template = config.get( 'archive_template', '<a href="%(base_url)s/%(Y)s/index.%(f)s">%(Y)s</a><br />') for mem in archive_list: timetuple = tools.filestat(self._request, mem) timedict = {} for x in ["m", "Y", "y", "d"]: timedict[x] = time.strftime("%" + x, timetuple) fulldict.update(timedict) fulldict["f"] = flavour year = fulldict["Y"] if not archives.has_key(year): archives[year] = template % fulldict items.append([ "%(Y)s-%(m)s" % fulldict, "%(Y)s-%(m)s-%(d)s" % fulldict, time.mktime(timetuple), mem ]) arc_keys = archives.keys() arc_keys.sort() arc_keys.reverse() result = [] for key in arc_keys: result.append(archives[key]) self._archives = '\n'.join(result) self._items = items
def __populateBasicMetadata(self): """ Fills the metadata dict with metadata about the given file. This metadata consists of things we pick up from an os.stat call as well as knowledge of the filename and the root directory. The rest of the metadata comes from parsing the file itself which is done with __populateData. """ file_basename = os.path.basename(self._filename) path = self._filename.replace(self._root, '') path = path.replace(os.path.basename(self._filename), '') path = path[:-1] absolute_path = self._filename.replace(self._config['datadir'], '') absolute_path = absolute_path.replace(file_basename, '') absolute_path = absolute_path[1:][:-1] fn, ext = os.path.splitext(file_basename) if absolute_path == '': file_path = fn else: file_path = os.path.join(absolute_path, fn) tb_id = '%s/%s' % (absolute_path, fn) tb_id = re.sub(r'[^A-Za-z0-9]', '_', tb_id) self['path'] = path self['tb_id'] = tb_id self['absolute_path'] = absolute_path self['file_path'] = file_path self['fn'] = fn self['filename'] = self._filename # handle the time portions timeTuple = tools.filestat(self._request, self._filename) self.setTime(timeTuple) # when someone does a getMetadata and they're looking for # a key not in this list, then we'll have to parse the # file and complete the list of keys. self._original_metadata_keys = self.keys() self._original_metadata_keys.remove(base.CONTENT_KEY)
def __populateBasicMetadata(self): """ Fills the metadata dict with metadata about the given file. This metadata consists of things we pick up from an os.stat call as well as knowledge of the filename and the root directory. The rest of the metadata comes from parsing the file itself which is done with __populateData. """ file_basename = os.path.basename(self._filename) path = self._filename.replace(self._root, '') path = path.replace(os.path.basename(self._filename), '') path = path[:-1] absolute_path = self._filename.replace(self._config['datadir'], '') absolute_path = absolute_path.replace(file_basename, '') absolute_path = absolute_path[1:][:-1] fn, ext = os.path.splitext(file_basename) if absolute_path == '': file_path = fn else: file_path = os.path.join(absolute_path, fn) tb_id = '%s/%s' % (absolute_path, fn) tb_id = re.sub(r'[^A-Za-z0-9]', '_', tb_id) self['path'] = path self['tb_id'] = tb_id self['absolute_path'] = absolute_path self['file_path'] = file_path self['fn'] = fn self['filename'] = self._filename # handle the time portions timeTuple = tools.filestat(self._filename) self.setTime(timeTuple) # when someone does a getMetadata and they're looking for # a key not in this list, then we'll have to parse the # file and complete the list of keys. self._original_metadata_keys = self.keys() self._original_metadata_keys.remove(base.CONTENT_KEY)
def cb_filelist(args): request = args["request"] pyhttp = request.getHttp() data = request.getData() config = request.getConfiguration() if not pyhttp["PATH_INFO"].startswith("/zqcrecent"): return datadir = config["datadir"] walkdir = datadir + "/" + str(pyhttp["QUERY_STRING"]) data["debug"] = "DEBUG::%s<br/>%s<br/>%s" % ( datadir, str(pyhttp["QUERY_STRING"]), walkdir) baseurl = config.get("base_url", "") cmntdir = config.get("comment_dir", datadir + os.sep + "comments") cmntext = config.get("comment_ext", ".cmt") data["blog_title"] = config.get( "blog_title", "") + "<DIV id='recent'> - category recent</DIV>" data[INIT_KEY] = 1 config['num_entries'] = 9999 marker = time.time() - (60 * 60 * 24 * 14) # get entries and export # get all the entries #allentries = tools.Walk(request, datadir) ## Zoomq::060128 walking base Categories Point allentries = tools.Walk(request, walkdir) debug = [] stuff = [] for mem in allentries: timetuple = tools.filestat(request, mem) entrytstamp = time.mktime(timetuple) tstamp = entrytstamp absolute_path = mem[len(datadir):mem.rfind(os.sep)] fn = mem[mem.rfind(os.sep) + 1:mem.rfind(".")] cmtexpr = os.path.join(cmntdir + absolute_path, fn + '-*.' + cmntext) cmtlist = glob.glob(cmtexpr) cmtlist = [(os.stat(m)[8], m) for m in cmtlist] cmtlist.sort() cmtlist.reverse() # we want the most recent mtime from either the entry or # any of its comments if len(cmtlist) > 0: if tstamp < cmtlist[0][0]: tstamp = cmtlist[0][0] # if the mtime is more recent than our marker, we toss the # stuff into our list of things to look at. # Zoomq::060214 fixed cancel this limited """ if tstamp > marker: stuff.append( [tstamp, entrytstamp, mem, cmtlist] ) """ stuff.append([tstamp, entrytstamp, mem, cmtlist]) stuff.sort() stuff.reverse() # time stamp and blog entry #e = "<tr>\n<td valign=\"top\" align=\"left\">%s:</td>\n" \ # "<td><a href=\"%s/%s\">%s</a> (%s)<br />%s</td></tr>\n" e = """<tr> <td valign="top" align="left">%s:</td> <td><a href="%s/%s.html">%s</a> (%s) <br/>%s </td></tr> """ entrylist = [] output = [] for mem in stuff: entry = entries.fileentry.FileEntry(request, mem[2], data['root_datadir']) tstamp = time.strftime("%m/%d/%Y", time.localtime(mem[1])) temp = e % (tstamp, \ baseurl, \ entry["file_path"], \ entry["title"], \ "", \ "".join( [get_comment_text(c) + "<br />" for c in mem[3]]) #entry["path"] ) output.append(temp) entrylist.append( new_entry(request, "Category Recent Entries::", "<tr><td colspan=2> </td></tr>\n".join(output))) return entrylist
def gen_linear_archive(self): config = self._request.get_configuration() data = self._request.get_data() root = config["datadir"] archives = {} archive_list = tools.walk(self._request, root) fulldict = {} fulldict.update(config) fulldict.update(data) template = config.get( 'archive_template', '<a href="%(base_url)s/%(Y)s/%(m)s">%(y)s.%(m)s</a>') #<a href="%(base_url)s/%(Y)s/%(b)s">%(Y)s-%(b)s</a> #print fulldict["base_url"] for mem in archive_list: timetuple = tools.filestat(self._request, mem) timedict = {} for x in ["B", "b", "m", "Y", "y"]: timedict[x] = time.strftime("%" + x, timetuple) fulldict.update(timedict) if not archives.has_key(timedict['Y'] + timedict['m']): archives[timedict['Y'] + timedict['m']] = [template % fulldict, 1] else: archives[timedict['Y'] + timedict['m']][1] += 1 archives[timedict['Y'] + timedict['m']][0] = template % fulldict #print archives #return arc_keys = archives.keys() arc_keys.sort() arc_keys.reverse() yearmonth = {} result = [] #base archives walk and count every year's mounth for key in arc_keys: yearname = key[:-2] if yearname in yearmonth.keys(): yearmonth[yearname][0] += archives[key][1] yearmonth[yearname][1].append(archives[key]) else: yearmonth[yearname] = [archives[key][1], []] yearmonth[yearname][1].append(archives[key]) #print yearmonth["2007"] mon_keys = yearmonth.keys() mon_keys.sort() mon_keys.reverse() #print mon_keys for year in mon_keys: #print "%s<sup>%s<sup>"%(year,yearmonth[year][0]) monode = yearmonth[year][1] result.append( "<li class='yearchives'><a href='%s/%s'>%s</a><sup>(%d)</sup></li>" % (fulldict["base_url"], year, year, yearmonth[year][0])) if 1 == len(monode): #print "%s<sup>%s<sup>"%(monode[0][0],monode[0][1]) result.append("<li>%s<sup>(%d)</sup><li>" % (monode[0][0], monode[0][1])) else: for m in monode: #print m #print "%s<sup>%s<sup>"%(m[0],m[1]) result.append("<li>%s<sup>(%d)</sup><li>" % (m[0], m[1])) #result.append("%s<sup>%s<sup>"%(month[0],month[1])) #print result self._archives = '\n'.join(result)
def run_static_renderer(self, incremental=False): """This will go through all possible things in the blog and statically render everything to the ``static_dir`` specified in the config file. This figures out all the possible ``path_info`` settings and calls ``self.run()`` a bazillion times saving each file. :param incremental: Whether (True) or not (False) to incrementally render the pages. If we're incrementally rendering pages, then we render only the ones that have changed. """ self.initialize() config = self._request.get_configuration() data = self._request.get_data() print "Performing static rendering." if incremental: print "Incremental is set." static_dir = config.get("static_dir", "") data_dir = config["datadir"] if not static_dir: print "Error: You must set static_dir in your config file." return 0 flavours = config.get("static_flavours", ["html"]) index_flavours = config.get("static_index_flavours", ["html"]) render_me = [] month_names = config.get("static_monthnames", True) month_numbers = config.get("static_monthnumbers", False) year_indexes = config.get("static_yearindexes", True) dates = {} categories = {} # first we handle entries and categories listing = tools.walk(self._request, data_dir) for mem in listing: # skip the ones that have bad extensions ext = mem[mem.rfind(".") + 1:] if not ext in data["extensions"].keys(): continue # grab the mtime of the entry file mtime = time.mktime(tools.filestat(self._request, mem)) # remove the datadir from the front and the bit at the end mem = mem[len(data_dir):mem.rfind(".")] # this is the static filename fn = os.path.normpath(static_dir + mem) # grab the mtime of one of the statically rendered file try: smtime = os.stat(fn + "." + flavours[0])[8] except: smtime = 0 # if the entry is more recent than the static, we want to # re-render if smtime < mtime or not incremental: # grab the categories temp = os.path.dirname(mem).split(os.sep) for i in range(len(temp) + 1): p = os.sep.join(temp[0:i]) categories[p] = 0 # grab the date mtime = time.localtime(mtime) year = time.strftime("%Y", mtime) month = time.strftime("%m", mtime) day = time.strftime("%d", mtime) if year_indexes: dates[year] = 1 if month_numbers: dates[year + "/" + month] = 1 dates[year + "/" + month + "/" + day] = 1 if month_names: monthname = tools.num2month[month] dates[year + "/" + monthname] = 1 dates[year + "/" + monthname + "/" + day] = 1 # toss in the render queue for f in flavours: render_me.append((mem + "." + f, "")) print "rendering %d entries." % len(render_me) # handle categories categories = categories.keys() categories.sort() # if they have stuff in their root category, it'll add a "/" # to the category list and we want to remove that because it's # a duplicate of "". if "/" in categories: categories.remove("/") print "rendering %d category indexes." % len(categories) for mem in categories: mem = os.path.normpath(mem + "/index.") for f in index_flavours: render_me.append((mem + f, "")) # now we handle dates dates = dates.keys() dates.sort() dates = ["/" + d for d in dates] print "rendering %d date indexes." % len(dates) for mem in dates: mem = os.path.normpath(mem + "/index.") for f in index_flavours: render_me.append((mem + f, "")) # now we handle arbitrary urls additional_stuff = config.get("static_urls", []) print "rendering %d arbitrary urls." % len(additional_stuff) for mem in additional_stuff: if mem.find("?") != -1: url = mem[:mem.find("?")] query = mem[mem.find("?") + 1:] else: url = mem query = "" render_me.append((url, query)) # now we pass the complete render list to all the plugins via # cb_staticrender_filelist and they can add to the filelist # any (url, query) tuples they want rendered. print "(before) building %s files." % len(render_me) tools.run_callback( "staticrender_filelist", { 'request': self._request, 'filelist': render_me, 'flavours': flavours, 'incremental': incremental }) render_me = sorted(set(render_me)) print "building %s files." % len(render_me) for url, q in render_me: url = url.replace(os.sep, "/") print "rendering '%s' ..." % url tools.render_url_statically(dict(config), url, q) # we're done, clean up self.cleanup()
def cb_filelist(args): request = args["request"] pyhttp = request.getHttp() data = request.getData() config = request.getConfiguration() if not pyhttp["PATH_INFO"].startswith("/recent"): return datadir = config["datadir"] baseurl = config.get("base_url", "") cmntdir = config.get("comment_dir", datadir + os.sep + "comments") cmntext = config.get("comment_ext", ".cmt") data["blog_title"] = config.get("blog_title", "") + " - recent activity" data[INIT_KEY] = 1 config['num_entries'] = 9999 marker = time.time() - (60 * 60 * 24 * 14) # get all the entries allentries = tools.Walk(request, datadir) debug = [] stuff = [] for mem in allentries: timetuple = tools.filestat(request, mem) entrytstamp = time.mktime(timetuple) tstamp = entrytstamp absolute_path = mem[len(datadir):mem.rfind(os.sep)] fn = mem[mem.rfind(os.sep)+1:mem.rfind(".")] cmtexpr = os.path.join(cmntdir + absolute_path, fn + '-*.' + cmntext) cmtlist = glob.glob(cmtexpr) cmtlist = [ (os.stat(m)[8], m) for m in cmtlist] cmtlist.sort() cmtlist.reverse() # we want the most recent mtime from either the entry or # any of its comments if len(cmtlist) > 0: if tstamp < cmtlist[0][0]: tstamp = cmtlist[0][0] # if the mtime is more recent than our marker, we toss the # stuff into our list of things to look at. if tstamp > marker: stuff.append( [tstamp, entrytstamp, mem, cmtlist] ) stuff.sort() stuff.reverse() # time stamp and blog entry e = "<tr>\n<td valign=\"top\" align=\"left\">%s:</td>\n" \ "<td><a href=\"%s/%s\">%s</a> (%s)<br />%s</td></tr>\n" entrylist = [] output = [] for mem in stuff: entry = entries.fileentry.FileEntry(request, mem[2], config["datadir"]) tstamp = time.strftime("%m/%d/%Y", time.localtime(mem[1])) temp = e % (tstamp, \ baseurl, \ entry["file_path"], \ entry["title"], \ entry["path"], \ "".join( [get_comment_text(c) + "<br />" for c in mem[3]])) output.append(temp) entrylist.append(new_entry(request, "Recent activity:", "<tr><td colspan=2> </td></tr>\n".join(output))) return entrylist
def generate_calendar(self): """ Generates the calendar. We'd like to walk the archives for things that happen in this month and mark the dates accordingly. After doing that we pass it to a formatting method which turns the thing into HTML. """ config = self._request.get_configuration() data = self._request.get_data() entry_list = data["entry_list"] root = config["datadir"] baseurl = config.get("base_url", "") self._today = time.localtime() if len(entry_list) == 0: # if there are no entries, we shouldn't even try to # do something fancy. self._cal = "" return view = list(entry_list[0]["timetuple"]) # this comes in as '', 2001, 2002, 2003, ... so we can convert it # without an issue temp = data.get("pi_yr") if not temp: view[0] = int(self._today[0]) else: view[0] = int(temp) # the month is a bit harder since it can come in as "08", "", or # "Aug" (in the example of August). temp = data.get("pi_mo") if temp and temp.isdigit(): view[1] = int(temp) elif temp and tools.month2num.has_key(temp): view[1] = int(tools.month2num[temp]) else: view[1] = int(self._today[1]) self._view = view = tuple(view) # if we're looking at a specific day, we figure out what it is if data.get("pi_yr") and data.get("pi_mo") and data.get("pi_da"): if data["pi_mo"].isdigit(): mon = data["pi_mo"] else: mon = tools.month2num[data["pi_mo"]] self._specificday = (int(data.get("pi_yr", self._today[0])), int(mon), int(data.get("pi_da", self._today[2]))) archive_list = tools.walk(self._request, root) yearmonth = {} for mem in archive_list: timetuple = tools.filestat(self._request, mem) # if we already have an entry for this date, we skip to the # next one because we've already done this processing day = str(timetuple[2]).rjust(2) if self._entries.has_key(day): continue # add an entry for yyyymm so we can figure out next/previous year = str(timetuple[0]) dayzfill = string.zfill(timetuple[1], 2) yearmonth[year + dayzfill] = time.strftime("%b", timetuple) # if the entry isn't in the year/month we're looking at with # the calendar, then we skip to the next one if timetuple[0:2] != view[0:2]: continue # mark the entry because it's one we want to show datepiece = time.strftime("%Y/%b/%d", timetuple) self._entries[day] = (baseurl + "/" + datepiece, day) # Set the first day of the week (Sunday by default) first = config.get('calendar_firstweekday', 6) calendar.setfirstweekday(first) # create the calendar cal = calendar.monthcalendar(view[0], view[1]) # insert the days of the week cal.insert(0, calendar.weekheader(2).split()) # figure out next and previous links by taking the dict of # yyyymm strings we created, turning it into a list, sorting # them, and then finding "today"'s entry. then the one before # it (index-1) is prev, and the one after (index+1) is next. keys = yearmonth.keys() keys.sort() thismonth = time.strftime("%Y%m", view) # do some quick adjustment to make sure we didn't pick a # yearmonth that's outside the yearmonths of the entries we # know about. if thismonth in keys: index = keys.index(thismonth) elif len(keys) == 0 or keys[0] > thismonth: index = 0 else: index = len(keys) - 1 # build the prev link if index == 0 or len(keys) == 0: prev = None else: prev = ("%s/%s/%s" % (baseurl, keys[index-1][:4], yearmonth[keys[index-1]]), "<") # build the next link if index == len(yearmonth)-1 or len(keys) == 0: next = None else: next = ("%s/%s/%s" % (baseurl, keys[index+1][:4], yearmonth[keys[index+1]]), ">") # insert the month name and next/previous links cal.insert(0, [prev, time.strftime("%B %Y", view), next]) self._cal = self.format_with_css(cal)
def gen_linear_archive(self): config = self._request.get_configuration() data = self._request.get_data() root = config["datadir"] archives = {} archive_list = tools.walk(self._request, root) fulldict = {} fulldict.update(config) fulldict.update(data) template = config.get("archive_template", '<a href="%(base_url)s/%(Y)s/%(m)s">%(y)s.%(m)s</a>') # <a href="%(base_url)s/%(Y)s/%(b)s">%(Y)s-%(b)s</a> # print fulldict["base_url"] for mem in archive_list: timetuple = tools.filestat(self._request, mem) timedict = {} for x in ["B", "b", "m", "Y", "y"]: timedict[x] = time.strftime("%" + x, timetuple) fulldict.update(timedict) if not archives.has_key(timedict["Y"] + timedict["m"]): archives[timedict["Y"] + timedict["m"]] = [template % fulldict, 1] else: archives[timedict["Y"] + timedict["m"]][1] += 1 archives[timedict["Y"] + timedict["m"]][0] = template % fulldict # print archives # return arc_keys = archives.keys() arc_keys.sort() arc_keys.reverse() yearmonth = {} result = [] # base archives walk and count every year's mounth for key in arc_keys: yearname = key[:-2] if yearname in yearmonth.keys(): yearmonth[yearname][0] += archives[key][1] yearmonth[yearname][1].append(archives[key]) else: yearmonth[yearname] = [archives[key][1], []] yearmonth[yearname][1].append(archives[key]) # print yearmonth["2007"] mon_keys = yearmonth.keys() mon_keys.sort() mon_keys.reverse() # print mon_keys for year in mon_keys: # print "%s<sup>%s<sup>"%(year,yearmonth[year][0]) monode = yearmonth[year][1] result.append( "<li class='yearchives'><a href='%s'>%s</a><sup>(%d)</sup></li>" % (fulldict["base_url"], year, yearmonth[year][0]) ) if 1 == len(monode): # print "%s<sup>%s<sup>"%(monode[0][0],monode[0][1]) result.append("<li>%s<sup>(%d)</sup><li>" % (monode[0][0], monode[0][1])) else: for m in monode: # print m # print "%s<sup>%s<sup>"%(m[0],m[1]) result.append("<li>%s<sup>(%d)</sup><li>" % (m[0], m[1])) # result.append("%s<sup>%s<sup>"%(month[0],month[1])) # print result self._archives = "\n".join(result)
def cb_filelist(args): request = args["request"] pyhttp = request.getHttp() data = request.getData() config = request.getConfiguration() if not pyhttp["PATH_INFO"].startswith("/recent"): return datadir = config["datadir"] baseurl = config.get("base_url", "") cmntdir = config.get("comment_dir", datadir + os.sep + "comments") cmntext = config.get("comment_ext", ".cmt") data["blog_title"] = config.get("blog_title", "") + " - recent activity" data[INIT_KEY] = 1 config['num_entries'] = 9999 marker = time.time() - (60 * 60 * 24 * 14) # get all the entries allentries = tools.Walk(request, datadir) debug = [] stuff = [] for mem in allentries: timetuple = tools.filestat(request, mem) entrytstamp = time.mktime(timetuple) tstamp = entrytstamp absolute_path = mem[len(datadir):mem.rfind(os.sep)] fn = mem[mem.rfind(os.sep) + 1:mem.rfind(".")] cmtexpr = os.path.join(cmntdir + absolute_path, fn + '-*.' + cmntext) cmtlist = glob.glob(cmtexpr) cmtlist = [(os.stat(m)[8], m) for m in cmtlist] cmtlist.sort() cmtlist.reverse() # we want the most recent mtime from either the entry or # any of its comments if len(cmtlist) > 0: if tstamp < cmtlist[0][0]: tstamp = cmtlist[0][0] # if the mtime is more recent than our marker, we toss the # stuff into our list of things to look at. if tstamp > marker: stuff.append([tstamp, entrytstamp, mem, cmtlist]) stuff.sort() stuff.reverse() # time stamp and blog entry e = "<tr>\n<td valign=\"top\" align=\"left\">%s:</td>\n" \ "<td><a href=\"%s/%s\">%s</a> (%s)<br />%s</td></tr>\n" entrylist = [] output = [] for mem in stuff: entry = entries.fileentry.FileEntry(request, mem[2], config["datadir"]) tstamp = time.strftime("%m/%d/%Y", time.localtime(mem[1])) temp = e % (tstamp, \ baseurl, \ entry["file_path"], \ entry["title"], \ entry["path"], \ "".join( [get_comment_text(c) + "<br />" for c in mem[3]])) output.append(temp) entrylist.append( new_entry(request, "Recent activity:", "<tr><td colspan=2> </td></tr>\n".join(output))) return entrylist
def cmp_mtime(a, b): return cmp(tools.filestat(req, a), tools.filestat(req, b))
def generate_calendar(self): """ Generates the calendar. We'd like to walk the archives for things that happen in this month and mark the dates accordingly. After doing that we pass it to a formatting method which turns the thing into HTML. """ config = self._request.get_configuration() data = self._request.get_data() entry_list = data["entry_list"] root = config["datadir"] baseurl = config.get("base_url", "") self._today = time.localtime() if len(entry_list) == 0: # if there are no entries, we shouldn't even try to # do something fancy. self._cal = "" return view = list(entry_list[0]["timetuple"]) # this comes in as '', 2001, 2002, 2003, ... so we can convert it # without an issue temp = data.get("pi_yr") if not temp: view[0] = int(self._today[0]) else: view[0] = int(temp) # the month is a bit harder since it can come in as "08", "", or # "Aug" (in the example of August). temp = data.get("pi_mo") if temp and temp.isdigit(): view[1] = int(temp) elif temp and tools.month2num.has_key(temp): view[1] = int(tools.month2num[temp]) else: view[1] = int(self._today[1]) self._view = view = tuple(view) # if we're looking at a specific day, we figure out what it is if data.get("pi_yr") and data.get("pi_mo") and data.get("pi_da"): if data["pi_mo"].isdigit(): mon = data["pi_mo"] else: mon = tools.month2num[data["pi_mo"]] self._specificday = (int(data.get("pi_yr", self._today[0])), int(mon), int(data.get("pi_da", self._today[2]))) archive_list = tools.walk(self._request, root) yearmonth = {} for mem in archive_list: timetuple = tools.filestat(self._request, mem) # if we already have an entry for this date, we skip to the # next one because we've already done this processing day = str(timetuple[2]).rjust(2) if self._entries.has_key(day): continue # add an entry for yyyymm so we can figure out next/previous year = str(timetuple[0]) dayzfill = string.zfill(timetuple[1], 2) yearmonth[year + dayzfill] = time.strftime("%b", timetuple) # if the entry isn't in the year/month we're looking at with # the calendar, then we skip to the next one if timetuple[0:2] != view[0:2]: continue # mark the entry because it's one we want to show datepiece = time.strftime("%Y/%b/%d", timetuple) self._entries[day] = (baseurl + "/" + datepiece, day) # Set the first day of the week (Sunday by default) first = config.get('calendar_firstweekday', 6) calendar.setfirstweekday(first) # create the calendar cal = calendar.monthcalendar(view[0], view[1]) # insert the days of the week cal.insert(0, calendar.weekheader(2).split()) # figure out next and previous links by taking the dict of # yyyymm strings we created, turning it into a list, sorting # them, and then finding "today"'s entry. then the one before # it (index-1) is prev, and the one after (index+1) is next. keys = yearmonth.keys() keys.sort() thismonth = time.strftime("%Y%m", view) # do some quick adjustment to make sure we didn't pick a # yearmonth that's outside the yearmonths of the entries we # know about. if thismonth in keys: index = keys.index(thismonth) elif len(keys) == 0 or keys[0] > thismonth: index = 0 else: index = len(keys) - 1 # build the prev link if index == 0 or len(keys) == 0: prev = None else: prev = ("%s/%s/%s" % (baseurl, keys[index - 1][:4], yearmonth[keys[index - 1]]), "<") # build the next link if index == len(yearmonth) - 1 or len(keys) == 0: next = None else: next = ("%s/%s/%s" % (baseurl, keys[index + 1][:4], yearmonth[keys[index + 1]]), ">") # insert the month name and next/previous links cal.insert(0, [prev, time.strftime("%B %Y", view), next]) self._cal = self.format_with_css(cal)
def run_static_renderer(self, incremental=False): """This will go through all possible things in the blog and statically render everything to the ``static_dir`` specified in the config file. This figures out all the possible ``path_info`` settings and calls ``self.run()`` a bazillion times saving each file. :param incremental: Whether (True) or not (False) to incrementally render the pages. If we're incrementally rendering pages, then we render only the ones that have changed. """ self.initialize() config = self._request.get_configuration() data = self._request.get_data() print "Performing static rendering." if incremental: print "Incremental is set." staticdir = config.get("static_dir", "") datadir = config["datadir"] if not staticdir: print "Error: You must set static_dir in your config file." return 0 flavours = config.get("static_flavours", ["html"]) renderme = [] monthnames = config.get("static_monthnames", True) monthnumbers = config.get("static_monthnumbers", False) yearindexes = config.get("static_yearindexes", True) dates = {} categories = {} # first we handle entries and categories listing = tools.walk(self._request, datadir) for mem in listing: # skip the ones that have bad extensions ext = mem[mem.rfind(".")+1:] if not ext in data["extensions"].keys(): continue # grab the mtime of the entry file mtime = time.mktime(tools.filestat(self._request, mem)) # remove the datadir from the front and the bit at the end mem = mem[len(datadir):mem.rfind(".")] # this is the static filename fn = os.path.normpath(staticdir + mem) # grab the mtime of one of the statically rendered file try: smtime = os.stat(fn + "." + flavours[0])[8] except: smtime = 0 # if the entry is more recent than the static, we want to # re-render if smtime < mtime or not incremental: # grab the categories temp = os.path.dirname(mem).split(os.sep) for i in range(len(temp)+1): p = os.sep.join(temp[0:i]) categories[p] = 0 # grab the date mtime = time.localtime(mtime) year = time.strftime("%Y", mtime) month = time.strftime("%m", mtime) day = time.strftime("%d", mtime) if yearindexes: dates[year] = 1 if monthnumbers: dates[year + "/" + month] = 1 dates[year + "/" + month + "/" + day] = 1 if monthnames: monthname = tools.num2month[month] dates[year + "/" + monthname] = 1 dates[year + "/" + monthname + "/" + day] = 1 # toss in the render queue for f in flavours: renderme.append( (mem + "." + f, "") ) print "rendering %d entries." % len(renderme) # handle categories categories = categories.keys() categories.sort() # if they have stuff in their root category, it'll add a "/" # to the category list and we want to remove that because it's # a duplicate of "". if "/" in categories: categories.remove("/") print "rendering %d category indexes." % len(categories) for mem in categories: mem = os.path.normpath(mem + "/index.") for f in flavours: renderme.append((mem + f, "")) # now we handle dates dates = dates.keys() dates.sort() dates = ["/" + d for d in dates] print "rendering %d date indexes." % len(dates) for mem in dates: mem = os.path.normpath(mem + "/index.") for f in flavours: renderme.append((mem + f, "")) # now we handle arbitrary urls additional_stuff = config.get("static_urls", []) print "rendering %d arbitrary urls." % len(additional_stuff) for mem in additional_stuff: if mem.find("?") != -1: url = mem[:mem.find("?")] query = mem[mem.find("?")+1:] else: url = mem query = "" renderme.append((url, query)) # now we pass the complete render list to all the plugins via # cb_staticrender_filelist and they can add to the filelist # any (url, query) tuples they want rendered. print "(before) building %s files." % len(renderme) tools.run_callback("staticrender_filelist", {'request': self._request, 'filelist': renderme, 'flavours': flavours, 'incremental': incremental}) renderme = sorted(set(renderme)) print "building %s files." % len(renderme) for url, q in renderme: url = url.replace(os.sep, "/") print "rendering '%s' ..." % url tools.render_url_statically(config, url, q) # we're done, clean up self.cleanup()
def cb_filelist(args): request = args["request"] pyhttp = request.getHttp() data = request.getData() config = request.getConfiguration() if not pyhttp["PATH_INFO"].startswith("/zqcrecent"): return datadir = config["datadir"] walkdir = datadir+"/"+str(pyhttp["QUERY_STRING"]) data["debug"] = "DEBUG::%s<br/>%s<br/>%s"%(datadir ,str(pyhttp["QUERY_STRING"]) ,walkdir) baseurl = config.get("base_url", "") cmntdir = config.get("comment_dir", datadir + os.sep + "comments") cmntext = config.get("comment_ext", ".cmt") data["blog_title"] = config.get("blog_title", "") + "<DIV id='recent'> - category recent</DIV>" data[INIT_KEY] = 1 config['num_entries'] = 9999 marker = time.time() - (60 * 60 * 24 * 14) # get entries and export # get all the entries #allentries = tools.Walk(request, datadir) ## Zoomq::060128 walking base Categories Point allentries = tools.Walk(request, walkdir) debug = [] stuff = [] for mem in allentries: timetuple = tools.filestat(request, mem) entrytstamp = time.mktime(timetuple) tstamp = entrytstamp absolute_path = mem[len(datadir):mem.rfind(os.sep)] fn = mem[mem.rfind(os.sep)+1:mem.rfind(".")] cmtexpr = os.path.join(cmntdir + absolute_path, fn + '-*.' + cmntext) cmtlist = glob.glob(cmtexpr) cmtlist = [ (os.stat(m)[8], m) for m in cmtlist] cmtlist.sort() cmtlist.reverse() # we want the most recent mtime from either the entry or # any of its comments if len(cmtlist) > 0: if tstamp < cmtlist[0][0]: tstamp = cmtlist[0][0] # if the mtime is more recent than our marker, we toss the # stuff into our list of things to look at. # Zoomq::060214 fixed cancel this limited """ if tstamp > marker: stuff.append( [tstamp, entrytstamp, mem, cmtlist] ) """ stuff.append( [tstamp, entrytstamp, mem, cmtlist] ) stuff.sort() stuff.reverse() # time stamp and blog entry #e = "<tr>\n<td valign=\"top\" align=\"left\">%s:</td>\n" \ # "<td><a href=\"%s/%s\">%s</a> (%s)<br />%s</td></tr>\n" e = """<tr> <td valign="top" align="left">%s:</td> <td><a href="%s/%s.html">%s</a> (%s) <br/>%s </td></tr> """ entrylist = [] output = [] for mem in stuff: entry = entries.fileentry.FileEntry(request, mem[2], data['root_datadir']) tstamp = time.strftime("%m/%d/%Y", time.localtime(mem[1])) temp = e % (tstamp, \ baseurl, \ entry["file_path"], \ entry["title"], \ "", \ "".join( [get_comment_text(c) + "<br />" for c in mem[3]]) #entry["path"] ) output.append(temp) entrylist.append(new_entry(request , "Category Recent Entries::" , "<tr><td colspan=2> </td></tr>\n".join(output) ) ) return entrylist