def tz_date(date, fmt="%F %d %Y %H:%M"): # without this, unpickling from memcache fails badly import_wrapper.load_zip('dateutil') from dateutil import parser tz = memcache.get('tz') if tz is None: zone = config.timezone if not zone: tz = utc else: tz = _jsontime_timezone(zone) memcache.set('tz', tz) return date.replace(tzinfo=utc).astimezone(tz).strftime(fmt)
def get_archive_list(): """Return a list of the archive months and their article counts.""" import import_wrapper import_wrapper.load_zip('dateutil') from dateutil.relativedelta import relativedelta # Attempt to get a memcache'd copy first archive = memcache.get('archive_list') if archive is not None: return archive # Get the date of the oldest entry query = db.Query(blog.Publishable) query.order('pub_date') oldest = query.get() # Handle the situation where there are no posts if oldest is None: memcache.set('archive_list', []) return [] # Create a date delta for moving ahead 1 month plus_one_month = relativedelta(months=+1) # Calculate the start and end dates for the archive start_date = datetime.date(oldest.pub_date.year, oldest.pub_date.month, 1) end_date = datetime.date.today() end_date = datetime.date(end_date.year, end_date.month, 1) + plus_one_month # Loop through each month in the time span and count the number # of entries made in that month archive = [] current_date = start_date while current_date < end_date: next_date = current_date + plus_one_month query = db.Query(blog.Publishable) query.filter('pub_date >= ', current_date) query.filter('pub_date < ', next_date) archive.append({ 'date': current_date, 'count': query.count(1000), 'url': '/%04d/%02d' % (current_date.year, current_date.month), }) current_date = next_date memcache.set('archive_list', archive) return archive