def _getItems(self): (key, reverse) = self._getSorting() folderfilter = self.request.get('folderfilter', '') filter = self.context.decodeFolderFilter(folderfilter) items = self.context.listFolderContents(contentFilter=filter) return sequence.sort(items, ((key, 'cmp', reverse and 'desc' or 'asc'), ))
def getChangeLogItems(self, since, meta_types=None): """ get all Log Items """ objects = self._getChangeLogItems() objects = sequence.sort(objects, (('date',),)) objects.reverse() # youngest first # adjust value if meta_types == [] or meta_types == ['']: meta_types = None if not since: if meta_types: return self._filterOnMetaType(objects, meta_types) else: return objects else: if same_type(since, 's'): since = DateTime(since) today = DateTime() nobjects = [] for object in objects: if object.getDate() >= since and object.getDate()<= today: nobjects.append(object) if meta_types: return self._filterOnMetaTypes(nobjects, meta_types) else: return nobjects
def getBlogItems(self, sort=1, pub_date_filter=1, bookmarks=None, oc=None): """ return all BlogItems """ objects = self.objectValues(BLOGITEM_METATYPE) if bookmarks is not None: objects = [x for x in objects if x.isBookmark() == bookmarks] if sort: objects = sequence.sort(objects, (('pub_date', ), )) objects.reverse() if pub_date_filter: objects = self._filterTooNew(objects) # misc web function request = self.REQUEST if oc is None: if request.has_key('onlycategories') or request.has_key('oc'): oc = request.get('onlycategories', request.get('oc')) if oc: cat_only = oc if not self.same_type(cat_only, []): cat_only = [cat_only] nobjects = [] for object in objects: itemcategories = object.getItemCategories() for each in cat_only: if each in itemcategories: if object not in nobjects: nobjects.append(object) objects = nobjects return objects
def orderObjects(self, key, reverse=None): # Order sub-objects by key and direction. ids = [id for id, obj in sort( self.objectItems(), ((key, 'cmp', 'asc'), ))] if reverse: ids.reverse() return self.moveObjectsByDelta(ids, -len(self._objects))
def tpValues(self): # Return a list of subobjects, used by tree tag. r=[] if hasattr(aq_base(self), 'tree_ids'): tree_ids=self.tree_ids try: tree_ids=list(tree_ids) except TypeError: pass if hasattr(tree_ids, 'sort'): tree_ids.sort() for id in tree_ids: if hasattr(self, id): r.append(self._getOb(id)) else: # this part is different from the ObjectManager code r = [ obj for obj in self.objectValues() if getattr(obj, 'isPrincipiaFolderish', False) ] r = sort( r, ( (self._default_sort_key, 'cmp', 'asc'), ) ) if self._default_sort_reverse: r.reverse() return r
def _getItems(self): (key, reverse) = self.context.getDefaultSorting() items = self.context.contentValues() items = sequence.sort(items, ((key, 'cmp', reverse and 'desc' or 'asc'), )) return LazyFilter(items, skip='View')
def _get_items(self): key, reverse = self._get_sorting() items = self.contents return sequence.sort(items, ((key, 'cmp', reverse and 'desc' or 'asc'),))
def getGoogleSearchTerms(readpath, storepath): gdomains = re.compile(r'^http://www.google.(com|co.uk|de|se|it|fr|com.br|fi|be|nl|com.au|co.in|es|com.ar|dk|pl|ca|ie|ch)/search', re.I) #googledomains = ('http://www.google.com',) #p = csv.parser(field_sep='|') reader = csv.reader(file(readpath)) headers = None #f=open(readpath, 'r') lines = [] for row in reader: if headers is None: headers =1 elif row: r = ''.join(row) #r = row[0] r = r.replace('countryUK|countryGB','countryUK.countryGB') splitted = r.split('|') if len(splitted)==5: # paranoia if gdomains.findall(splitted[0]): lines.append(splitted) else: break #while 1: # line = f.readline() # if not line: # break # fields = p.parse(line) # if not fields: # continue # if headers is None: # headers = fields # else: # if gdomains.findall(fields[0]): # lines.append(fields) # # f.close() # -------- first_date = DateTime() last_date = DateTime()-300 searchterms = {} for line in lines: qs = line[1] if qs.startswith('cache:'): continue datestring = line[2] try: date = DateTime(datestring) if date < first_date: first_date = date if date > last_date: last_date = date except: pass q = decodeQS(qs) if searchterms.has_key(q): searchterms[q] += 1 else: searchterms[q] = 1 counts = [] for k,v in searchterms.items(): counts.append({'q':k, 'count':v}) counts = sequence.sort(counts, (('count',),)) counts.reverse() h = jaxml.HTML_document() # Write something about dates h._push() h.table(border=1) h._push() h.tr(bgcolor='#EFEFEF') h.th("Oldest search") h.th("Latest search") h.th("Difference in days") h.th("No searches") h.th("Searches per day") h._pop() h._push() h.tr() h.td(first_date.strftime('%Y/%m/%d %H:%M'), align="center") h.td(last_date.strftime('%Y/%m/%d %H:%M'), align="center") h.td(str(round(last_date-first_date, 1)), align="center") h.td(str(len(lines)), align="center") h.td(str(round(len(lines)/(last_date-first_date),1)), align="center") h._pop() h._pop() h.p(" ") h._push() h.table(border=1) h._push() h.tr(bgcolor='#EFEFEF') h._push() h.th("Term") h.th("Count") h.th("Index") h._pop() h._pop() for each in counts: h._push() h.tr() h._push() h.td(showAsGoogleLink(each['q'])) h.td(each['count']) i = getSearchResultIndexPickle(each['q']) if i is None: # not found in pickle cache try: i = getSearchResultIndex(each['q']) setSearchResultIndex(each['q'], i) except: i = None m = "getSearchResultIndex('%s') failed"%each['q'] LOG("getGoogleSearchTerms", ERROR, m) if i is None: i = '>10' else: i = str(i) h.td(i) h._pop() h._pop() h._pop() MostCommonHTML = str(h)[str(h).find('?>')+2:] html = "<html><head><title></title></head><body>\n\n" html += "<h2>Google Referer Log Analysis</h2>" html += "<!--BEGIN COMMON-->\n" html += "<h3>Most Common Search terms</h3>" html += MostCommonHTML html += "<!--END COMMON-->\n" html += "<br />\n"*2 now = DateTime().strftime('%Y/%m/%d %H:%M') html += "<small>Generated %s</small>\n\n"%now html += "</body></html>" # ------- f = open(storepath, 'w') f.write(html+'\n') f.close() return "Done" # -------- # h = jaxml.HTML_document() # h._push() # h.table(border=1) # h._push() # h.tr(bgcolor='#EFEFEF') # h._push() # h.th("Term") # h._pop() # h._pop() # lines.reverse() # for line in lines: # q = decodeQS(line[1]) # h._push() # h.tr() # h._push() # h.td(showAsGoogleLink(q)) # h._pop() # h._pop() # h._pop() MostRecentHTML = str(h)[str(h).find('?>')+2:] html = "<html><head><title></title></head><body>\n\n" html += "<h2>Google Referer Log Analysis</h2>" html += "<!--BEGIN COMMON-->\n" html += "<h3>Most Common Search terms</h3>" html += MostCommonHTML html += "<!--END COMMON-->\n" html += "<br />\n"*2 html += "<!--BEGIN RECENT-->\n" html += "<h3>Most Recent Search terms</h3>" html += MostRecentHTML html += "<!--END RECENT-->\n" html += "<br />\n"*2 now = DateTime().strftime('%Y/%m/%d %H:%M') html += "<small>Generated %s</small>\n\n"%now html += "</body></html>" # ------- f = open(storepath, 'w') f.write(html+'\n') f.close() return "Done"
def getSortedStatistics(self, sort_by='sortorder'): """Return the statistics in sorted order""" return sort(self.getStatistics(), ((sort_by, 'cmp', 'asc'), ))
def getSortedAttachments(self, sort_by='title'): """ """ return sort(self.getAttachments(), ((sort_by, 'cmp', 'asc'), ))
def getSortedReports(self, sort_by='title'): """ """ return sort(self.getReports(), ((sort_by, 'cmp', 'asc'), ))
def getSortedWidgets(self, sort_by='sortorder'): """ Return sorted widget""" return sort(self.getWidgets(), ((sort_by, 'cmp', 'asc'), ))
name, time = line.split('|') time = float(time) if all.has_key(name): all[name].append(time) else: all[name] = [time] all_calls = [] for k, timeslist in all.items(): average = sum(timeslist)/len(timeslist) all_calls.append(SQLCall(k, average, len(timeslist))) all_calls = sort(all_calls, (('time',),)) all_calls.reverse() c=0 for each in all_calls: print each c+=1 if c> LIMIT: print "...Only showing the first %s"%LIMIT break elif whattodo == 2: all = {}