def genCategories(self): config = self._request.getConfiguration() root = config["datadir"] start_t = config.get("category_start", DEFAULT_START) begin_t = config.get("category_begin", DEFAULT_BEGIN) item_t = config.get("category_item", DEFAULT_ITEM) end_t = config.get("category_end", DEFAULT_END) finish_t = config.get("category_finish", DEFAULT_FINISH) #zoomq: configed order by mind the catrgorise root_path_list = config.get("category_root_list", DEFAULT_ROOT) cfgBaseUrl = config.get("base_url", "") form = self._request.getForm() flavour = (form.has_key('flav') and form['flav'].value or config.get('default_flavour', 'html')) #print flavour # build the list of all entries in the datadir output = "" #@others if 0==len(root_path_list): #as default walk and export Categories as word order elist = tools.Walk(self._request, root) output += self._subCategories(elist,root,"") else: for rootCategory in root_path_list: subroot = "%s/%s"%(root,rootCategory) self._baseurl = "%s/%s"%(cfgBaseUrl,rootCategory) elist = tools.Walk(self._request, subroot) output += self._subCategories(elist,subroot,rootCategory) # then we join the list and that's the final string #self._categories = "\n".join(output) self._categories = output
def genLinearArchive(self): config = self._request.getConfiguration() data = self._request.getData() root = config["datadir"] archives = {} archiveList = tools.Walk(self._request, root) fulldict = {} fulldict.update(config) fulldict.update(data) template = config.get( 'archive_template', '<a href="%(base_url)s/%(Y)s/%(b)s">%(Y)s-%(b)s</a><br />') for mem in archiveList: timetuple = tools.filestat(self._request, mem) timedict = {} for x in ["B", "b", "m", "Y", "y"]: timedict[x] = time.strftime("%" + x, timetuple) fulldict.update(timedict) if not archives.has_key(timedict['Y'] + timedict['m']): archives[timedict['Y'] + timedict['m']] = (template % fulldict) arcKeys = archives.keys() arcKeys.sort() arcKeys.reverse() result = [] for key in arcKeys: result.append(archives[key]) self._archives = '\n'.join(result)
def genLinearArchive(self): config = self._request.getConfiguration() data = self._request.getData() root = config["datadir"] baseurl = config.get("base_url", "") archives = {} archiveList = tools.Walk(self._request, root) items = [] for mem in archiveList: timetuple = tools.filestat(self._request, mem) y = time.strftime("%Y", timetuple) m = time.strftime("%m", timetuple) d = time.strftime("%d", timetuple) l = "<a href=\"%s/%s/\">%s</a><br>" % (baseurl, y, y) if not archives.has_key(y): archives[y] = l items.append( ["%s-%s" % (y, m), "%s-%s-%s" % (y, m, d), time.mktime(timetuple), mem] ) arcKeys = archives.keys() arcKeys.sort() arcKeys.reverse() result = [] for key in arcKeys: result.append(archives[key]) self._archives = '\n'.join(result) self._items = items
def cb_prepare(args): request = args['request'] data = request.getData() if data['bl_type'] != 'file': return entry = data['entry_list'] flavour = data['flavour'] filename = os.path.normpath( entry[0]['filename']) # normpath is for windows. config = request.getConfiguration() datadir = config['datadir'] base_url = config['base_url'] extension = config.get('entry_extension', 'txt') r = re.compile('(.*\.' + extension + '$)') allentries = tools.Walk(request, datadir, pattern=r) # cur_time = time.localtime() entrylist = [] for e in allentries: timetuple = tools.filestat(request, e) # if cur_time < timetuple: # continue entrylist.append((timetuple, e)) entrylist.sort() entrylist.reverse() entrylist = [x[1] for x in entrylist] try: num = entrylist.index(filename) except ValueError: data['entry_navi'] = '| <a href="%s">MAIN</a> |' % base_url return def _entry_url(file_entry): return "%s/%s.%s" % (base_url, file_entry["file_path"], flavour) navi_str = '' if num != (len(entrylist) - 1): fname = entrylist[num + 1] e = entries.fileentry.FileEntry(request, fname, datadir) prev_link = _entry_url(e) navi_str += '< <a href="%s">%s</a> ' % (prev_link, e['title']) navi_str += '| <a href="%s">MAIN</a> |' % base_url if num: fname = entrylist[num - 1] e = entries.fileentry.FileEntry(request, fname, datadir) next_link = _entry_url(e) navi_str += ' <a href="%s">%s</a> >' % (next_link, e['title']) data['entry_navi'] = navi_str
def _getFlavour(self, taste='html'): """ Flavours, or views, or templates, as some may call it, defaults are given, but can be overidden with files on the datadir. Don't like the default html templates, add your own, head.html, story.html etc. """ data = self._request.getData() config = self._request.getConfiguration() pattern = re.compile(r'.+?\.' + taste + '$') datadir = config["datadir"] dirname = data["root_datadir"] if os.path.isfile(dirname): dirname = os.path.dirname(dirname) template_files = None while len(dirname) >= len(datadir): template_files = tools.Walk(self._request, dirname, 1, pattern) if template_files: break dirname = os.path.split(dirname)[0] if not template_files: template_files = tools.Walk(self._request, config['datadir'], 1, pattern) # we grab a copy of the templates for the taste we want flavour = {} flavour.update(DEFAULT_FLAVOURS.get(taste, {})) # we update the flavours dict with what we found for filename in template_files: flavouring = os.path.basename(filename).split('.') flav_template = unicode( open(filename).read(), config.get('blog_encoding', 'iso-8859-1')) flavour[flavouring[0]] = flav_template if not flavour: return DEFAULT_FLAVOURS["error"] return flavour
def blogger_getRecentPosts(request, appkey, blogid, username, password, numberOfPosts=5): """ Get recent posts from a blog tree """ authenticate(request, username, password) config = request.getConfiguration() data = request.getData() from Pyblosxom.entries.fileentry import FileEntry from Pyblosxom import pyblosxom exts = tools.run_callback("entryparser", {'txt': pyblosxom.blosxom_entry_parser}, mappingfunc=lambda x, y: y, defaultfunc=lambda x: x) data['extensions'] = exts result = [] dataList = [] filelist = tools.Walk(request, os.path.join(config['datadir'], blogid[1:]), pattern=re.compile(r'.*\.(' + '|'.join(exts.keys()) + ')-?$'), recurse=1) for ourfile in filelist: entry = FileEntry(request, ourfile, config['datadir']) dataList.append((entry._mtime, entry)) # this sorts entries by mtime in reverse order. entries that have # no mtime get sorted to the top. dataList.sort() dataList.reverse() dataList = [x[1] for x in dataList] count = 1 for entry in dataList: result.append({ 'dateCreated': xmlrpclib.DateTime(entry['mtime']), 'userid': '01', 'postid': entry['filename'].replace(config['datadir'], ''), 'content': open(entry['filename']).read() }) if count >= int(numberOfPosts): break count += 1 return result
def keys(self): import re keys = [] cached = [] if os.path.isdir(self._config): cached = tools.Walk(self._request, self._config, 1, re.compile(r'.*\.entrypickle$')) for cache in cached: cache_data = pickle.load(open(cache)) key = cache_data.get('realfilename', '') if not key and os.path.isfile(cache): os.remove(cache) self.load(key) if not self.isCached(): self.rmEntry() else: keys.append(key) return keys
def cb_handle(args): request = args["request"] data = request.getData() pyhttp = request.getHttp() config = request.getConfiguration() url = config.get("sitemap_url", "/sitemap.xml") if pyhttp["PATH_INFO"] != url: return 0 response = request.getResponse() response.addHeader("Content-type", "text/xml") urls = [] for fname in tools.Walk(request, config["datadir"]): entry = entries.fileentry.FileEntry(request, fname, config["datadir"]) url = "%s.html" % os.path.join( config["base_url"], entry.get("absolute_path"), entry.get("fn")) urls.append(TEMPLATE_URL % (url, entry.get("w3cdate"))) urls.reverse() sitemap = TEMPLATE_BASE % ("\n".join(urls)) print >> response, sitemap return 1
def blogger_getUsersBlogs(request, appkey, username, password): """ Returns trees below datadir """ authenticate(request, username, password) config = request.getConfiguration() url = config.get('base_url', '') result = [{'url': url + '/', 'blogid': '/', 'blogName': '/'}] for directory in tools.Walk(request, config['datadir'], 0, re.compile(r'.*'), 1): blogpath = directory.replace(config['datadir'], '') + '/' blogpath = blogpath.replace(os.sep, '/') result.append({ 'url': url + blogpath, 'blogid': blogpath, 'blogName': blogpath }) if config.get('xmlrpc_blogger_metaweblog', '') == 'True': return result[:1] else: return result
def genCategories(self): config = self._request.getConfiguration() root = config["datadir"] data = self._request.getData() flav = config.get("category_flavour", "") if flav: self._flavour = "?flav=" + flav else: self._flavour = "" self._baseurl = config.get("base_url", "") # build the list of entries elist = tools.Walk(self._request, root) elist = [mem[len(root) + 1:] for mem in elist] total = len(elist) elistmap = {} for mem in elist: mem = os.path.dirname(mem) elistmap[mem] = 1 + elistmap.get(mem, 0) self._elistmap = elistmap clistmap = {} for mem in elistmap.keys(): mem = mem.split(os.sep) for i in range(len(mem) + 1): p = os.sep.join(mem[0:i]) clistmap[p] = 0 clist = clistmap.keys() clist.sort() clist = map(self.genitem, clist) self._categories = "<br />".join(clist)
def _getEntryCount(request): """ Return a count of the number of published and unpublished (suffixed with '-') entries @param request: the HTTP Request @type request: Request @return the number of published an unpublished entries @rtype int """ config = request.getConfiguration() root = config['datadir'] elist = tools.Walk(request, root, pattern=_allEntriesPattern(request)) elist = [mem[len(root) + 1:] for mem in elist] elistmap = {} for mem in elist: mem = os.path.dirname(mem) elistmap[mem] = 1 + elistmap.get(mem, 0) mem = mem.split(os.sep) return len(elist)
def handle_registry_queue(args): """ Handles showing all the entries that are in the submission queue and all the data involved in each. """ request = args["request"] pyhttp = request.getHttp() config = request.getConfiguration() registrydir = config["registry_dir"] entries = tools.Walk(request, registrydir, 0, re.compile(".*\\.[^\\.]+-$"), 0) if len(entries) == 0: return [generate_entry(request, "<p>No entries in the queue.</p>")] # Get our URL and configure the base_url param if pyhttp.has_key('SCRIPT_NAME'): if not config.has_key('base_url'): config['base_url'] = 'http://%s%s' % (pyhttp['HTTP_HOST'], pyhttp['SCRIPT_NAME']) else: config['base_url'] = config.get('base_url', '') config['base_url'] = config['base_url'] + TRIGGER output = [] entries = get_entries(request, registrydir, entries) for mem in entries: output.append((mem, "queue-summary")) entry = generate_entry(request, "", "queue", "queue") entry["registry_render"] = output entry["template_name"] = "registry-index" entry["nocomments"] = 1 return [entry]
def cb_filelist(args): request = args['request'] http = request.getHttp() data = request.getData() config = request.getConfiguration() trigger = config.get('index_trigger', 'site-index') if http['PATH_INFO'] != trigger: return # get the entries datadir = config['datadir'] files = tools.Walk(request, datadir) files.sort() # sort into sections, one for each letter. the dictionary is # letter => (entry name, path) where path is the relative to datadir. sections = {} entry_extensions = data['extensions'].keys() for file in files: assert file.startswith(datadir) path, ext = os.path.splitext(file[len(datadir):]) if ext[1:] in entry_extensions: # strip the leading period from ext entry_name = os.path.basename(path) sections.setdefault(entry_name[0].upper(), []).append((entry_name, path)) # extract the first letters. sort as usual, except that numbers and other # non-letters go *after* letters. def letters_before_symbols(a, b): if a.isalpha() and not b.isalpha(): return -1 elif not a.isalpha() and b.isalpha(): return 1 else: return cmp(a, b) letters = sections.keys() if config.get('index_letters_first', 1): letters.sort(letters_before_symbols) else: letters.sort() # add the header with links to each section body = '<p class="index-header">' letter_links = ['<a href="#%s">%s</a>' % (l, l) for l in letters] body += ' |\n'.join(letter_links) body += '</p>\n<hr class="index"/>\n\n' # add the sections themselves, with one link per entry, in a table. the # number of columns is taken from the index_num_columns config variable. # entries are ordered down each column, in order. num_cols = config.get('index_num_columns', 2) for l in letters: body += '<h3 class="index">%s</h3> <a name="%s"></a n>\n' % (l, l) body += '<table class="index">\n' entries = sections[l] entries.sort() num_rows = int(math.ceil(float(len(entries)) / num_cols)) for row in range(0, num_rows): # alternate the <tr> tags' class between index-row-stripe-0 and # index-row-stripe-1, so you can use CSS to alternate their color for # readability, if you want. body += '<tr class="index-row-stripe-%d">\n' % (row % 2) for col in range(0, num_cols): entry_index = col * num_rows + row if entry_index < len(entries): entry_name, path = entries[entry_index] else: entry_name = path = '' body += '<td><a href="%s">%s</a></td>\n' % (path, entry_name) body += '</tr>\n' body += '</table>\n<hr class="index"/>\n\n' data = {'title': config.get('index_title', 'index')} # use the epoch for mtime. otherwise, pyblosxom uses the current time, which # makes other plugins (like weblogsping) think this is a new entry. epoch = time.localtime(0) fe = Pyblosxom.entries.base.generate_entry(request, data, body, epoch) return [fe]
def metaWeblog_getRecentPosts(request, blogid, username, password, numberOfPosts): """ Get the most recent posts Part of the metaWeblog API @param request: the pyblosxom Request instance @type request: Request @param blogid: the id of the blog @type blogid: string @param username: the username @type username: string @param password: the password @type password: string @param numberOfPosts: the number of posts to retreive @type numberOfPosts: int @returns list of dicts, one per post @rtype list """ # tools.log("getRecentPosts blogid:%s count:%s" % (blogid, numberOfPosts)) authenticate(request, username, password) config = request.getConfiguration() filelist = tools.Walk(request, config['datadir'], int(config['depth']), pattern=_allEntriesPattern(request)) entryList = [] for f in filelist: entry = FileEntry(request, f, config['datadir']) entryList.append((entry._mtime, entry)) entryList.sort() entryList.reverse() try: numberOfPosts = int(numberOfPosts) except: # tools.log("Couldn't convert numberOfPosts") numberOfPosts = 5 entryList = [x[1] for x in entryList][:numberOfPosts] posts = [{ 'permaLink': "%s/%s/%s/%s#%s" % (config['base_url'], x['yr'], x['mo_num'], x['da'], x['fn']), 'title': x['title'], 'description': x['body'], 'postid': re.sub(r'^/', '', "%s/%s" % (x['absolute_path'], x['fn'])), 'categories': [x['absolute_path']], 'dateCreated': xmlrpclib.DateTime(x['w3cdate']) } for x in entryList] return posts
def cb_filelist(args): request = args["request"] pyhttp = request.getHttp() data = request.getData() config = request.getConfiguration() if not pyhttp["PATH_INFO"].startswith("/zqcrecent"): return datadir = config["datadir"] walkdir = datadir + "/" + str(pyhttp["QUERY_STRING"]) data["debug"] = "DEBUG::%s<br/>%s<br/>%s" % ( datadir, str(pyhttp["QUERY_STRING"]), walkdir) baseurl = config.get("base_url", "") cmntdir = config.get("comment_dir", datadir + os.sep + "comments") cmntext = config.get("comment_ext", ".cmt") data["blog_title"] = config.get( "blog_title", "") + "<DIV id='recent'> - category recent</DIV>" data[INIT_KEY] = 1 config['num_entries'] = 9999 marker = time.time() - (60 * 60 * 24 * 14) # get entries and export # get all the entries #allentries = tools.Walk(request, datadir) ## Zoomq::060128 walking base Categories Point allentries = tools.Walk(request, walkdir) debug = [] stuff = [] for mem in allentries: timetuple = tools.filestat(request, mem) entrytstamp = time.mktime(timetuple) tstamp = entrytstamp absolute_path = mem[len(datadir):mem.rfind(os.sep)] fn = mem[mem.rfind(os.sep) + 1:mem.rfind(".")] cmtexpr = os.path.join(cmntdir + absolute_path, fn + '-*.' + cmntext) cmtlist = glob.glob(cmtexpr) cmtlist = [(os.stat(m)[8], m) for m in cmtlist] cmtlist.sort() cmtlist.reverse() # we want the most recent mtime from either the entry or # any of its comments if len(cmtlist) > 0: if tstamp < cmtlist[0][0]: tstamp = cmtlist[0][0] # if the mtime is more recent than our marker, we toss the # stuff into our list of things to look at. # Zoomq::060214 fixed cancel this limited """ if tstamp > marker: stuff.append( [tstamp, entrytstamp, mem, cmtlist] ) """ stuff.append([tstamp, entrytstamp, mem, cmtlist]) stuff.sort() stuff.reverse() # time stamp and blog entry #e = "<tr>\n<td valign=\"top\" align=\"left\">%s:</td>\n" \ # "<td><a href=\"%s/%s\">%s</a> (%s)<br />%s</td></tr>\n" e = """<tr> <td valign="top" align="left">%s:</td> <td><a href="%s/%s.html">%s</a> (%s) <br/>%s </td></tr> """ entrylist = [] output = [] for mem in stuff: entry = entries.fileentry.FileEntry(request, mem[2], data['root_datadir']) tstamp = time.strftime("%m/%d/%Y", time.localtime(mem[1])) temp = e % (tstamp, \ baseurl, \ entry["file_path"], \ entry["title"], \ "", \ "".join( [get_comment_text(c) + "<br />" for c in mem[3]]) #entry["path"] ) output.append(temp) entrylist.append( new_entry(request, "Category Recent Entries::", "<tr><td colspan=2> </td></tr>\n".join(output))) return entrylist
def cb_filelist(args): global registrydir, TRIGGER request = args["request"] pyhttp = request.get_http() data = request.get_data() config = request.get_configuration() if not pyhttp["PATH_INFO"].startswith(TRIGGER): return data[INIT_KEY] = 1 data['root_datadir'] = config['datadir'] # if they haven't add a registrydir to their config file, we # pleasantly error out if not config.has_key("registry_dir"): output = ("<p>\"registry_dir\" config setting is not set. " "Refer to documentation.</p>") return [generate_entry(request, output, "setup error")] registrydir = config["registry_dir"] # make sure the registrydir has a / at the end if registrydir[-1] != os.sep: registrydir = registrydir + os.sep pathinfo = pyhttp["PATH_INFO"] dir2 = pathinfo[len(TRIGGER):] if dir2.startswith("/"): dir2 = dir2[1:] filename, ext = os.path.splitext(dir2) fullpath = os.path.join(registrydir, filename) if os.path.isdir(fullpath): entries = tools.Walk(request, fullpath) elif fullpath.endswith("index") and os.path.isdir(fullpath[:len("index")]): entries = tools.Walk(request, fullpath[:-1 * len("index")]) else: fext = tools.what_ext(data["extensions"].keys(), fullpath) entries = [fullpath + "." + fext] if ext[1:]: data["flavour"] = ext[1:] # that entry doesn't exist.... if len(entries) == 0: output = "<p>No entries of that kind registered here.</p>" return [generate_entry(request, output)] # if we're looking at a specific entry.... if len(entries) == 1: try: entry = build_entry(request, entries[0], registrydir, False) if entries[0].find("/flavours/") != -1: entry["template_name"] = "flavour-story" else: entry["template_name"] = "registry-story" return [entry] except Exception: output = "<p>That plugin does not exist.</p>" return [generate_entry(request, output)] return get_entries_by_item(request, registrydir, entries, "path")
def generateCalendar(self): """ Generates the calendar. We'd like to walk the archives for things that happen in this month and mark the dates accordingly. After doing that we pass it to a formatting method which turns the thing into HTML. """ config = self._request.getConfiguration() data = self._request.getData() entry_list = data["entry_list"] root = config["datadir"] baseurl = config.get("base_url", "") self._today = time.localtime() if len(entry_list) == 0: # if there are no entries, we shouldn't even try to # do something fancy. self._cal = "" return view = list(entry_list[0]["timetuple"]) # this comes in as 2001, 2002, 2003, ... so we can convert it # without an issue temp = data.get("pi_yr", time.strftime("%Y", self._today)) if temp: view[0] = int(temp) # the month is a bit harder since it can come in as "08", "", or # "Aug" (in the example of August). temp = data.get("pi_mo", time.strftime("%m", self._today)) if temp.isdigit(): temp = int(temp) else: if tools.month2num.has_key(temp): temp = int(tools.month2num[temp]) else: temp = view[1] view[1] = temp view = tuple(view) self._view = view # if we're looking at a specific day, we figure out what it is try: if data["pi_yr"] and data["pi_mo"] and data["pi_da"]: if data["pi_mo"].isdigit(): mon = data["pi_mo"] else: mon = tools.month2num[data["pi_mo"]] self._specificday = [data["pi_yr"], mon, data["pi_da"]] self._specificday = tuple( [int(mem) for mem in self._specificday]) except: pass archiveList = tools.Walk(self._request, root) yearmonth = {} for mem in archiveList: timetuple = tools.filestat(self._request, mem) # if we already have an entry for this date, we skip to the # next one because we've already done this processing day = str(timetuple[2]).rjust(2) if self._entries.has_key(day): continue # add an entry for yyyymm so we can figure out next/previous year = str(timetuple[0]) dayzfill = string.zfill(timetuple[1], 2) yearmonth[year + dayzfill] = time.strftime("%b", timetuple) # if the entry isn't in the year/month we're looking at with # the calendar, then we skip to the next one if timetuple[0:2] != view[0:2]: continue # mark the entry because it's one we want to show datepiece = time.strftime("%Y/%b/%d", timetuple) self._entries[day] = (baseurl + "/" + datepiece, day) # create the calendar calendar.setfirstweekday(calendar.SUNDAY) cal = calendar.monthcalendar(view[0], view[1]) # insert the days of the week cal.insert(0, ["Su", "Mo", "Tu", "We", "Th", "Fr", "Sa"]) # figure out next and previous links by taking the dict of yyyymm # strings we created, turning it into a list, sorting them, # and then finding "today"'s entry. then the one before it # (index-1) is prev, and the one after (index+1) is next. keys = yearmonth.keys() keys.sort() thismonth = time.strftime("%Y%m", view) # do some quick adjustment to make sure we didn't pick # a yearmonth that's outside the yearmonths of the entries we # know about. if thismonth in keys: index = keys.index(thismonth) elif len(keys) == 0 or keys[0] > thismonth: index = 0 else: index = len(keys) - 1 # build the prev link if index == 0: prev = None else: prev = ("%s/%s/%s" % (baseurl, keys[index - 1][:4], yearmonth[keys[index - 1]]), "<") # build the next link if index == len(yearmonth) - 1: next = None else: next = ("%s/%s/%s" % (baseurl, keys[index + 1][:4], yearmonth[keys[index + 1]]), ">") # insert the month name and next/previous links cal.insert(0, [prev, time.strftime("%B %Y", view), next]) self._cal = self.formatWithCSS(cal)
def cb_filelist(args): request = args["request"] pyhttp = request.getHttp() data = request.getData() config = request.getConfiguration() if not pyhttp["PATH_INFO"].startswith("/recent"): return datadir = config["datadir"] baseurl = config.get("base_url", "") cmntdir = config.get("comment_dir", datadir + os.sep + "comments") cmntext = config.get("comment_ext", ".cmt") data["blog_title"] = config.get("blog_title", "") + " - recent activity" data[INIT_KEY] = 1 config['num_entries'] = 9999 marker = time.time() - (60 * 60 * 24 * 14) # get all the entries allentries = tools.Walk(request, datadir) debug = [] stuff = [] for mem in allentries: timetuple = tools.filestat(request, mem) entrytstamp = time.mktime(timetuple) tstamp = entrytstamp absolute_path = mem[len(datadir):mem.rfind(os.sep)] fn = mem[mem.rfind(os.sep) + 1:mem.rfind(".")] cmtexpr = os.path.join(cmntdir + absolute_path, fn + '-*.' + cmntext) cmtlist = glob.glob(cmtexpr) cmtlist = [(os.stat(m)[8], m) for m in cmtlist] cmtlist.sort() cmtlist.reverse() # we want the most recent mtime from either the entry or # any of its comments if len(cmtlist) > 0: if tstamp < cmtlist[0][0]: tstamp = cmtlist[0][0] # if the mtime is more recent than our marker, we toss the # stuff into our list of things to look at. if tstamp > marker: stuff.append([tstamp, entrytstamp, mem, cmtlist]) stuff.sort() stuff.reverse() # time stamp and blog entry e = "<tr>\n<td valign=\"top\" align=\"left\">%s:</td>\n" \ "<td><a href=\"%s/%s\">%s</a> (%s)<br />%s</td></tr>\n" entrylist = [] output = [] for mem in stuff: entry = entries.fileentry.FileEntry(request, mem[2], config["datadir"]) tstamp = time.strftime("%m/%d/%Y", time.localtime(mem[1])) temp = e % (tstamp, \ baseurl, \ entry["file_path"], \ entry["title"], \ entry["path"], \ "".join( [get_comment_text(c) + "<br />" for c in mem[3]])) output.append(temp) entrylist.append( new_entry(request, "Recent activity:", "<tr><td colspan=2> </td></tr>\n".join(output))) return entrylist
def genCategories(self): config = self._request.getConfiguration() root = config["datadir"] start_t = config.get("category_start", DEFAULT_START) begin_t = config.get("category_begin", DEFAULT_BEGIN) item_t = config.get("category_item", DEFAULT_ITEM) end_t = config.get("category_end", DEFAULT_END) finish_t = config.get("category_start", DEFAULT_FINISH) self._baseurl = config.get("base_url", "") form = self._request.getForm() flavour = (form.has_key('flav') and form['flav'].value or config.get('default_flavour', 'html')) # build the list of all entries in the datadir elist = tools.Walk(self._request, root) # peel off the root dir from the list of entries elist = [mem[len(root)+1:] for mem in elist] # go through the list of entries and build a map that # maintains a count of how many entries are in each # category elistmap = {} for mem in elist: mem = os.path.dirname(mem) elistmap[mem] = 1 + elistmap.get(mem, 0) self._elistmap = elistmap # go through the elistmap keys (which is the list of # categories) and for each piece in the key (i.e. the key # could be "dev/pyblosxom/releases" and the pieces would # be "dev", "pyblosxom", and "releases") we build keys # for the category list map (i.e. "dev", "dev/pyblosxom", # "dev/pyblosxom/releases") clistmap = {} for mem in elistmap.keys(): mem = mem.split(os.sep) for index in range(len(mem)+1): p = os.sep.join(mem[0:index]) clistmap[p] = 0 # then we take the category list from the clistmap and # sort it alphabetically clist = clistmap.keys() clist.sort() output = [] indent = 0 output.append(start_t) # then we generate each item in the list for item in clist: itemlist = item.split(os.sep) num = 0 for key in self._elistmap.keys(): if key.endswith(item) or key.endswith(item + os.sep): num = num + self._elistmap[key] if not item: tab = "" else: tab = len(itemlist) * " " if indent > len(itemlist): for i in range(indent - len(itemlist)): output.append(end_t) elif indent < len(itemlist): for i in range(len(itemlist) - indent): output.append(begin_t) # now we build the dict with the values for substitution d = { "base_url": self._baseurl, "fullcategory": item + "/", "category": itemlist[-1] + "/", #"flavour": flavour, "count": num, "indent": tab } # and we toss it in the thing output.append(item_t % d) indent = len(itemlist) output.append(end_t * indent) output.append(finish_t) # then we join the list and that's the final string self._categories = "\n".join(output)
def cb_filelist(args): global registrydir, TRIGGER, SUBMITTRIGGER request = args["request"] pyhttp = request.getHttp() data = request.getData() config = request.getConfiguration() form = pyhttp["form"] if not pyhttp["PATH_INFO"].startswith(TRIGGER): return data[INIT_KEY] = 1 data['root_datadir'] = config['datadir'] # Get our URL and configure the base_url param if pyhttp.has_key('SCRIPT_NAME'): if not config.has_key('base_url'): config['base_url'] = 'http://%s%s' % (pyhttp['HTTP_HOST'], pyhttp['SCRIPT_NAME']) else: config['base_url'] = config.get('base_url', '') config['base_url'] = config['base_url'] + TRIGGER # if they haven't add a registrydir to their config file, # we pleasantly error out if not config.has_key("registry_dir"): output = "<p>\"registry_dir\" config setting is not set. Refer to documentation.</p>" return [generate_entry(request, output, "setup error")] registrydir = config["registry_dir"] # make sure the registrydir has a / at the end if registrydir[-1] != os.sep: registrydir = registrydir + os.sep # if they are doing the queue thing, then we spin them off to queue # stuff. if pyhttp["PATH_INFO"].startswith(QUEUETRIGGER): data["extensions"]["txt-"] = data["extensions"]["txt"] return handle_registry_queue(args) # if they are doing the submit thing, then we spin them off to # the submit stuff. if pyhttp["PATH_INFO"].startswith(SUBMITTRIGGER): return handle_registry_submit(args) # check if we're looking for a listing of all entries if pyhttp["PATH_INFO"] == TRIGGER: entries = tools.Walk(request, registrydir) else: dir2 = pyhttp["PATH_INFO"][len(TRIGGER):] filename, ext = os.path.splitext(dir2) if os.path.isdir(registrydir + filename): entries = tools.Walk(request, registrydir + filename) else: fn = registrydir + filename[1:] fext = tools.what_ext(data["extensions"].keys(), fn) entries = [fn + "." + fext] if ext[1:]: data["flavour"] = ext[1:] # that entry doesn't exist.... if len(entries) == 0: output = "<p>No entries of that kind registered here.</p>" return [generate_entry(request, output)] # if we're looking at a specific entry.... if len(entries) == 1: try: entry = fileentry.FileEntry(request, entries[0], registrydir, registrydir) if entries[0].find("flavours") != -1: entry["template_name"] = "flavour-story" else: entry["template_name"] = "registry-story" if entry.has_key("contrib"): entry["body"] = entry["body"] + CONTRIB_DESC return [entry] except Exception, e: output = "That plugin does not exist." return [generate_entry(request, output)]