예제 #1
0
def post(name, page=0) -> 'html':
    info = get_content(name, post_info("post_list.csv"))
    pictures = post_pictures(name)
    tags = get_tags_for_post(info)
    comm_content = get_comments(name)
    comment_pages = list_comments(len(comm_content))
    comments = page_comments_distribution(comm_content, page)
    visits(POST_VIEWS, name)

    def favor_check():
        if request.cookies.get('favor'):
            return (request.cookies.get('favor')).split("/")
        else:
            return False

    checked = favor_check()

    return render_template('post.html',
                           navbar="Вернуться к содержанию",
                           the_checked=checked,
                           the_comments=comments,
                           the_comment_pages=comment_pages,
                           the_info=info,
                           the_pictures=pictures,
                           the_tags=tags,
                           the_visits=POST_VIEWS[name],
                           the_title="%s" %
                           get_content(name, post_info("post_list.csv"))[1])
예제 #2
0
def edit_user_post_page(id_post) -> 'html':
    info = get_content(id_post, post_info("post_list.csv"))
    pictures = post_pictures(id_post)
    return render_template('edit_page.html',
                           the_info=info,
                           the_pictures=pictures,
                           the_title="Редактирование поста")
예제 #3
0
def favor_page(page=0) -> 'html':
    if request.cookies.get('favor'):
        favor_cookie = (request.cookies.get('favor')).split("/")
        content = [
            get_content(name, post_info("post_list.csv"))
            for name in favor_cookie
        ]
        pages = list_pages(len(content))
        posts = page_distribution(content, int(page))

        if posts:
            num_comments = list(
                map(lambda x: len(get_comments(x)),
                    [post[0] for post in posts]))
        else:
            num_comments = False

        return render_template('favor_page.html',
                               the_posts=posts,
                               the_pages=pages,
                               the_num_comments=num_comments,
                               the_visits=POST_VIEWS,
                               the_title="Избранные посты")
    return render_template('favor_page.html',
                           the_title="Список избранных пуст")
def content_page() -> 'html':
    contents = post_iteration()
    info = post_info()
    return render_template('post.html',
                           the_title='Конструкция Титаника',
                           the_contents=contents,
                           the_info=info)
예제 #5
0
def add_user_post() -> 'html':
    post_id = int(post_info("post_list.csv")[-1][0]) + 1
    post_name = request.values.get("post_name")
    post_description = request.values.get("post_description")
    post_content = request.values.get("post_content")
    post_tags = request.values.get("post_tags", None)
    post_pictures = request.values.get("post_pictures", None)
    entered_post(post_id, post_name, post_description, post_content, post_tags,
                 post_pictures)
    return redirect("/post/%s" % post_id, code=302)
예제 #6
0
def content_page(page=0) -> 'html':
    content = post_info("post_list.csv")
    pages = list_pages(len(content))
    posts = page_distribution(content, int(page))
    num_comments = list(
        map(lambda x: len(get_comments(x)), [post[0] for post in posts]))
    return render_template('root_page.html',
                           the_pages=pages,
                           the_posts=posts,
                           the_num_comments=num_comments,
                           the_visits=POST_VIEWS,
                           the_title="RMS Titanic")
예제 #7
0
def add_posts(N):
    """Cкрипт N раз загружает случайную статью из Википедии"""
    page_id = int(post_info("post_list.csv")[-1][0]) + 1

    def down_pages():
        random_url = 'https://ru.wikipedia.org/wiki/%D0%A1%D0%BB%D1%83%D0%B6%D0%B5%D0%B1%D0%BD%D0%B0%D1%8F:%D0%A1%D0%BB%D1%83%D1%87%D0%B0%D0%B9%D0%BD%D0%B0%D1%8F_%D1%81%D1%82%D1%80%D0%B0%D0%BD%D0%B8%D1%86%D0%B0'
        response = requests.get(random_url)
        soup = BeautifulSoup(response.text, "html.parser")
        page_info = []
        page_info.append(page_id)
        page_info.append(soup.find(
            "h1", class_="firstHeading").text)  #Извлекает тайтл

        def description_extraction(soup):
            """Извлекает описание"""
            description = soup.find("p").text
            description = description.replace("\n", "")
            description = description.encode().decode('utf-8', 'ignore')
            return description

        page_info.append(description_extraction(soup))

        def content_extraction(soup):
            """Извлекает текст для поста"""
            block = soup.find(class_="mw-parser-output")
            text = block.findAll("p")
            content = text[0].get_text()
            for string in text[1:]:
                content = content + string.get_text()
            content = content.replace("\n", "")
            content = content.encode().decode('utf-8', 'ignore')
            return content

        page_info.append(content_extraction(soup))

        def tag_extraction(soup):
            """Извлекает теги поста"""
            block = soup.find(id="mw-normal-catlinks")
            tags = block.findAll("a")
            if len(tags) >= 2:
                tag_list = "tags"
                for tag in tags[1:]:
                    tag_list += ";" + tag.text
                return tag_list
            else:
                return tags.text

        page_info.append(tag_extraction(soup))

        def image_extraction(soup):
            """Извлекает ссылки картинок и записывает их в файл"""
            blocks = soup.findAll(class_="infobox-image") + soup.findAll(
                class_="thumb")
            if blocks:
                img_list = [page_id]
                for block in blocks:
                    image = block.find("img")
                    img_list.append(image.get('src'))
                with open("post_pictures.csv",
                          "a",
                          encoding='utf_8',
                          newline='') as csv_file:
                    writer = csv.writer(csv_file, delimiter='|')
                    writer.writerow(img_list)
                return img_list
            else:
                return None

        image_extraction(soup)
        return page_info

    with open("post_list.csv", "a", encoding='utf_8', newline='') as csv_file:
        writer = csv.writer(csv_file, delimiter='|')
        for i in range(N):
            data = down_pages()
            writer.writerow(data)
            page_id += 1