Esempio n. 1
0
def test_get_posts():
    with app.app_context():
        posts = storage.get_posts()
        assert isinstance(posts, Iterable)
        posts = list(posts)
        assert len(posts) == 3

        posts = list(storage.get_posts(include_draft=True))
        assert len(posts) == 4
        assert posts[-1].title == 'Hello, world!'
        assert posts[-1].is_draft == True
Esempio n. 2
0
def feed():
    def convert_to_dict(p):
        post_d = p.to_dict()
        del post_d['raw_content']
        post_d['content'] = get_parser(p.format).parse_whole(p.raw_content)
        post_d['url'] = site['root_url'] + make_abs_url(p.unique_key)
        return post_d

    posts = map(
        convert_to_dict,
        islice(storage.get_posts(include_draft=False), 0,
               current_app.config['FEED_COUNT']))

    atom = AtomFeed(title=site['title'],
                    subtitle=site['subtitle'],
                    url=site['root_url'] + request.script_root,
                    feed_url=site['root_url'] + url_for('.feed'),
                    author=site.get('author'))
    for post_ in posts:
        atom.add(title=post_['title'],
                 content=post_['content'],
                 url=post_['url'],
                 id=post_['unique_key'],
                 published=post_['created'].replace(
                     tzinfo=timezone_from_str(site['timezone'])),
                 updated=post_['updated'].replace(
                     tzinfo=timezone_from_str(site['timezone'])),
                 author=post_['author'])

    response = make_response(atom.to_string())
    response.content_type = 'application/atom+xml; charset=utf-8'
    return response
Esempio n. 3
0
def do_generate():
    from veripress import app
    from veripress.model import storage

    deploy_dir = get_deploy_dir()

    # copy global static folder
    dst_static_folder = os.path.join(deploy_dir, 'static')
    if os.path.isdir(app.static_folder):
        shutil.copytree(app.static_folder, dst_static_folder)

    # copy theme static files
    makedirs(dst_static_folder, mode=0o755, exist_ok=True)
    copy_folder_content(app.theme_static_folder, dst_static_folder)

    # collect all possible urls (except custom pages)
    all_urls = {'/', '/feed.xml', '/atom.xml', '/archive/'}
    with app.app_context():
        posts = list(storage.get_posts(include_draft=False))

        index_page_count = int(math.ceil(len(posts) / app.config['ENTRIES_PER_PAGE']))
        for i in range(2, index_page_count + 1):  # ignore '/page/1/', this will be generated separately later
            all_urls.add('/page/{}/'.format(i))

        for post in posts:
            all_urls.add(post.unique_key)
            all_urls.add('/archive/{}/'.format(post.created.strftime('%Y')))
            all_urls.add('/archive/{}/{}/'.format(post.created.strftime('%Y'), post.created.strftime('%m')))

        tags = storage.get_tags()
        for tag_item in tags:
            all_urls.add('/tag/{}/'.format(tag_item[0]))

        categories = storage.get_categories()
        for category_item in categories:
            all_urls.add('/category/{}/'.format(category_item[0]))

    with app.test_client() as client:
        # generate all possible urls
        for url in all_urls:
            resp = client.get(url)
            file_path = os.path.join(get_deploy_dir(), url.lstrip('/').replace('/', os.path.sep))
            if url.endswith('/'):
                file_path += 'index.html'

            makedirs(os.path.dirname(file_path), mode=0o755, exist_ok=True)
            with open(file_path, 'wb') as f:
                f.write(resp.data)

        # generate 404 page
        resp = client.get('/post/this-is-a-page-that-never-gonna-exist'
                          '-because-it-is-a-post-with-wrong-url-format/')
        with open(os.path.join(deploy_dir, '404.html'), 'wb') as f:
            f.write(resp.data)

    if app.config['STORAGE_TYPE'] == 'file':
        generate_pages_by_file()
Esempio n. 4
0
def test_get_posts_with_limits():
    with app.app_context():
        posts = storage.get_posts_with_limits(include_draft=True)
        assert posts == storage.get_posts(include_draft=True)

        posts = storage.get_posts_with_limits(include_draft=True, tags='Hello World', categories=['Default'])
        assert len(posts) == 2

        posts = storage.get_posts_with_limits(include_draft=True,
                                              created=(datetime.strptime('2016-02-02', '%Y-%m-%d'),
                                                       date(year=2016, month=3, day=3)))
        assert len(posts) == 1

        posts = storage.get_posts_with_limits(include_draft=True,
                                              created=(date(year=2011, month=2, day=2),
                                                       date(year=2014, month=2, day=2)))
        assert len(posts) == 0
Esempio n. 5
0
def index(page_num=1):
    if page_num <= 1 and request.path != '/':
        return redirect(url_for('.index'))  # redirect '/page/1' to '/'

    index_page = storage.get_page('index.html', include_draft=False)
    if index_page is not None:
        # there is an 'index.*' custom page, we use this as index.
        return page('index.html')

    all_posts = storage.get_posts(include_draft=False)

    count = current_app.config['ENTRIES_PER_PAGE']
    start = (page_num - 1) * count

    posts = []
    for post_ in islice(
            all_posts, start, start + count +
            1):  # slice an additional one to check if there is more
        post_d = post_.to_dict()
        del post_d['raw_content']
        post_d['preview'], post_d['has_more_content'] = get_parser(
            post_.format).parse_preview(post_.raw_content)
        post_d['url'] = make_abs_url(post_.unique_key)
        posts.append(post_d)

    if start > 0:
        next_page_num = page_num - 1
        next_url = url_for(
            '.index', page_num=next_page_num if next_page_num != 1 else None)
    else:
        next_url = None
    if len(posts) > count:
        # the additional one is included
        posts = posts[:count]
        prev_url = url_for('.index', page_num=page_num + 1)
    else:
        prev_url = None

    return dict(entries=posts, next_url=next_url, prev_url=prev_url)