Exemple #1
0
 def convert_to_dict(post_):
     post_d = post_.to_dict()
     del post_d['raw_content']
     post_d['preview'], post_d['has_more_content'] = \
         get_parser(post_.format).parse_preview(post_.raw_content)
     post_d['url'] = make_abs_url(post_.unique_key)
     return post_d
Exemple #2
0
 def contains_query_keyword(post_or_page):
     contains = query in post_or_page.title.lower() \
                or query in Markup(
         get_parser(post_or_page.format).parse_whole(
             post_or_page.raw_content)
     ).striptags().lower()
     return contains
Exemple #3
0
def page(rel_url):
    if not validate_custom_page_path(rel_url):
        abort(403)

    fixed_rel_url, exists = storage.fix_page_relative_url(rel_url)
    if exists:
        file_path = fixed_rel_url
        return send_file(file_path)
    elif fixed_rel_url is None:
        # relative url is invalid
        # this is never possible when visiting this site in web browser
        abort(404)  # pragma: no cover
    elif rel_url != fixed_rel_url:
        # it's not the correct relative url, so redirect
        return redirect(url_for('.page', rel_url=fixed_rel_url))

    resp = cache.get('view-handler.' + rel_url)
    if resp is not None:
        return resp  # pragma: no cover, here just get the cached response

    page_ = storage.get_page(rel_url, include_draft=False)
    if page_ is None:
        abort(404)

    page_d = page_.to_dict()
    del page_d['raw_content']
    page_d['content'] = get_parser(page_.format).parse_whole(page_.raw_content)
    page_d['content'], page_d['toc'], page_d['toc_html'] = \
        parse_toc(page_d['content'])
    page_d['url'] = make_abs_url(page_.unique_key)
    page_ = page_d

    resp = custom_render_template(page_['layout'] + '.html', entry=page_)
    cache.set('view-handler.' + rel_url, resp, timeout=2 * 60)
    return resp
Exemple #4
0
def pages(page_path):
    if not validate_custom_page_path(page_path):
        raise ApiException(
            error=Error.NOT_ALLOWED,
            message='The visit of path "{}" is not allowed.'.format(page_path)
        )

    rel_url, exists = storage.fix_relative_url('page', page_path)
    if exists:
        file_path = rel_url
        return send_file(file_path)
    elif rel_url is None:
        # pragma: no cover
        # it seems impossible to make this happen,
        # see code of 'fix_relative_url'
        raise ApiException(
            error=Error.BAD_PATH,
            message='The path "{}" cannot be recognized.'.format(page_path)
        )
    else:
        page_d = cache.get('api-handler.' + rel_url)
        if page_d is not None:
            return page_d  # pragma: no cover, here just get the cached dict

        page = storage.get_page(rel_url, include_draft=False)
        if page is None:
            raise ApiException(error=Error.RESOURCE_NOT_EXISTS)
        page_d = page.to_dict()
        del page_d['raw_content']
        page_d['content'] = get_parser(
            page.format).parse_whole(page.raw_content)

        cache.set('api-handler.' + rel_url, page_d, timeout=2 * 60)
        return page_d
Exemple #5
0
def parse_content_of_models(obj):
    """
    Parse the whole 'raw_content' attribute of a Post or Page or Widget object (in template files).

    :param obj: a Post or Page or Widget object
    :return: parsed whole content
    """
    return get_parser(obj.format).parse_whole(obj.raw_content)
Exemple #6
0
def widgets():
    result_widgets = storage.get_widgets(
        position=request.args.get('position'), include_draft=False)
    result = []
    for widget in result_widgets:
        widget_d = widget.to_dict()
        del widget_d['raw_content']
        widget_d['content'] = get_parser(
            widget.format).parse_whole(widget.raw_content)
        result.append(widget_d)
    return result if result else None
Exemple #7
0
def post(year, month, day, post_name):
    rel_url = request.path[len('/post/'):]
    fixed_rel_url = storage.fix_post_relative_url(rel_url)
    if rel_url != fixed_rel_url:
        # it's not the correct relative url, so redirect
        return redirect(request.url_root + 'post/' + fixed_rel_url)

    post_ = storage.get_post(rel_url, include_draft=False)
    if post_ is None:
        abort(404)

    post_d = post_.to_dict()
    del post_d['raw_content']
    post_d['content'] = get_parser(post_.format).parse_whole(post_.raw_content)
    post_d['content'], post_d['toc'], post_d['toc_html'] = \
        parse_toc(post_d['content'])
    post_d['url'] = make_abs_url(post_.unique_key)
    post_ = post_d

    return custom_render_template(post_['layout'] + '.html', entry=post_)
Exemple #8
0
def index(page_num=1):
    if page_num <= 1 and request.path != '/':
        return redirect(url_for('.index'))  # redirect '/page/1' to '/'

    index_page = storage.get_page('index.html', include_draft=False)
    if index_page is not None:
        # there is an 'index.*' custom page, we use this as index.
        return page('index.html')

    all_posts = storage.get_posts(include_draft=False)

    count = current_app.config['ENTRIES_PER_PAGE']
    start = (page_num - 1) * count

    posts = []
    for post_ in islice(
            all_posts, start, start + count +
            1):  # slice an additional one to check if there is more
        post_d = post_.to_dict()
        del post_d['raw_content']
        post_d['preview'], post_d['has_more_content'] = get_parser(
            post_.format).parse_preview(post_.raw_content)
        post_d['url'] = make_abs_url(post_.unique_key)
        posts.append(post_d)

    if start > 0:
        next_page_num = page_num - 1
        next_url = url_for(
            '.index', page_num=next_page_num if next_page_num != 1 else None)
    else:
        next_url = None
    if len(posts) > count:
        # the additional one is included
        posts = posts[:count]
        prev_url = url_for('.index', page_num=page_num + 1)
    else:
        prev_url = None

    return dict(entries=posts, next_url=next_url, prev_url=prev_url)
Exemple #9
0
 def convert_to_dict(p):
     post_d = p.to_dict()
     del post_d['raw_content']
     post_d['content'] = get_parser(p.format).parse_whole(p.raw_content)
     post_d['url'] = site['root_url'] + make_abs_url(p.unique_key)
     return post_d
Exemple #10
0
def test_get_parser():
    assert isinstance(get_parser('txt'), TxtParser)
    assert isinstance(get_parser('markdown'), MarkdownParser)
    assert get_parser('non-exists') is None
Exemple #11
0
def posts(year: int = None, month: int = None, day: int = None,
          post_name: str = None):
    args = {k: [x.strip() for x in v.split(',')]
            for k, v in request.args.items()}

    for key in ('include_draft', 'start', 'count'):
        # pop out items that should not be passed into the 'get_posts' method
        # as 'limits'
        args.pop(key, None)

    # fields that the API user needs, a list or None
    fields = args.pop('fields', None)

    for key in ('created', 'updated'):
        if key in args:
            try:
                interval = args[key]
                # should be ['2017-02-13', '2017-03-13'] if it's valid
                for i in range(2):
                    y, m, d = re.match(
                        '(\d{4})-(\d{1,2})-(\d{1,2})', interval[i]).groups()
                    interval[i] = date(year=int(y), month=int(m), day=int(d))
            except (IndexError, AttributeError, ValueError):
                raise ApiException(
                    message='The "{}" argument is invalid, and it should be '
                            'like "2017-02-13,2017-03-13".'.format(key),
                    error=Error.INVALID_ARGUMENTS
                )

    # get the post list here
    result_posts = storage.get_posts_with_limits(include_draft=False, **args)

    return_single_item = False
    rel_url_prefix = ''
    if year is not None:
        rel_url_prefix += '%04d/' % year
    if month is not None:
        rel_url_prefix += '%02d/' % month
    if day is not None:
        rel_url_prefix += '%02d/' % day
    if post_name is not None:
        rel_url_prefix += '%s/' % post_name
        # if a full relative url is given, we return just ONE post,
        # instead of a list
        return_single_item = True
    result_posts = filter(lambda p: p.rel_url.startswith(rel_url_prefix),
                          result_posts)

    start = request.args.get('start', '')
    start = int(start) if start.isdigit() else 0
    count = request.args.get('count', '')
    count = int(count) if count.isdigit() else -1

    result_posts_list = []
    for post in islice(
            result_posts, start, start + count if count >= 0 else None):
        parser = get_parser(post.format)
        post_d = post.to_dict()
        del post_d['raw_content']
        if return_single_item:
            # if a certain ONE post is needed,
            # we parse all content instead of preview
            post_d['content'] = parser.parse_whole(post.raw_content)
            post_d['content'], post_d['toc'], post_d['toc_html'] = \
                parse_toc(post_d['content'])
        else:
            # a list of posts is needed, we parse only previews
            post_d['preview'], post_d['has_more_content'] = \
                parser.parse_preview(post.raw_content)
        if fields is not None:
            # select only needed fields to return
            assert isinstance(fields, list)
            full_post_d = post_d
            post_d = {}
            for key in fields:
                if key in full_post_d:
                    post_d[key] = full_post_d[key]
        result_posts_list.append(post_d)

    if result_posts_list and return_single_item:
        return result_posts_list[0]
    else:
        return result_posts_list if result_posts_list else None