Пример #1
0
    def _scan(self):
        try:
            mod_time = os.stat(self.content_path)[stat.ST_MTIME]
        except:
            return []

        if mod_time == cache.get('news_mod_time'):
            return

        try:
            files = os.listdir(self.content_path)
        except:
            files = []

        print 'rescanning news..'

        for item in News.objects.all():
            item.delete()

        for file in files:
            # Ignore tempfiles.
            if file.startswith('.'):
                continue

            name,ext = os.path.splitext(file)

            if not ext == '.rst':
                continue

            path = os.path.join(self.content_path, file)

            writer = NewsWriter()
            parts,data = get_parts(open(path).read().decode('utf-8'), writer)

            if not ('date' in data and 'summary' in data):
                continue

            date = data['date']
            summary = data['summary'].encode('utf-8')
            body = parts['fragment'].encode('utf-8')
            title = parts['title'].encode('utf-8')
            slug = slugify(title)

            date = strptime(date, '%Y-%m-%d')
            date = datetime.date(date[0], date[1], date[2])

            item,created = News.objects.get_or_create(slug=slug, date=date,
                defaults={'summary': summary, 'body': body, 'title': title})

            item.save()

        cache.set('news_mod_time', mod_time)
Пример #2
0
def get_pages(root_url):
    """Get all pages rooted at 'root_url'.
        
       If the modification time of the ReST file on-disk doesn't match the
       cached record's timestamp (recorded in the cache), updates the
       cached record. Returns a list of pages, or [] if no pages exists."""

    root_url = _normalize_url(root_url)
    path = os.path.join(content_root, root_url[1:])
    pages = cache.get('pages_for_' + root_url)

    try:
        mod_time = os.stat(path)[stat.ST_MTIME]
    except:
        return []

    if pages and mod_time == cache.get('mod_time_for_' + path):
        return pages
    
    try:
        files = os.listdir(path)
    except:
        files = []

    pages = []

    timeout = None
    for pattern,g in generators:
        if re.match(pattern, root_url[1:]):
            pages += g(root_url)
            timeout = 2*60
            break

    # Load all pages from filesystem.
    for f in files:
        name,ext = os.path.splitext(f)

        # skip over hidden and non-ReST files
        if f.startswith('.') or ext != '.rst':
            continue

        url = _normalize_url(os.path.join(root_url,name))
        page = _load_page(os.path.join(path, f), url);
        pages.append(page)

    pages.sort(lambda x,y: cmp((x.order,x.title), (y.order,y.title)))
    pages = tuple(pages) # make it immutable for the cache
    
    # Setup prev/next links for all siblings.
    prev = None
    for p in pages:
        if prev: prev.next = p
        p.prev = prev
        prev = p
        # the template may access the DB, so better not persist too long        
        if p.template_name:
            timeout = 2*60

    cache.set('mod_time_for_' + path, mod_time, timeout=timeout)
    cache.set('pages_for_' + root_url, pages, timeout=timeout)

    return pages