Exemple #1
0
def test():
    # database.del_all("movie")
    database.del_all("comment")
    # database.insert_movie(spider.getMoviesInfor(10))
    database.insert_comment(spider.getMovieShortComments(32659890, 20, proxy=1), 32659890)
    # # No more than 10 pages. getMovieShortComments()方法不要爬10页以上,因为豆瓣限制了短评页数。
    # print(database.get_movies())
    # print(database.get_comments(26885074))
    pass
Exemple #2
0
def new_comment():
    data = request.get_json()
    print 'Received comment "{0}" for {1} from {2}'.format(
        data['comment'], data['url'], data['user_id'])

    comment = Comment(data['user_id'],
                      data['url'],
                      data['comment'])
    database.insert_comment(g.db.cursor(), comment)
    g.db.commit()

    # TODO: Fix me. For now, respond with nothing and 204 No Content
    return ('', 204)
def insert_comment():
    meeting_id = request.form["id"]
    comment = request.form["comment"]
    chat_id = database.get_chat_id(meeting_id)
    database.insert_comment(chat_id, comment)
    return "your comment was inserted"
    # Insert the post
    database.insert_post(connection=connection,
                         post_id=post_id,
                         username=username,
                         sub_name=sub_name,
                         title=title,
                         score=score,
                         nsfw=nsfw)

    # Get top 100 comments of that post
    for comment in comments[:100]:
        try:
            username = comment.author.name
        except AttributeError:
            username = "******"

        comment_id = comment.id

        # Insert comment
        database.insert_comment(connection=connection,
                                comment_id=comment_id,
                                post_id=post_id,
                                username=username)

    # Commit the post and comments
    connection.commit()

# Close the connection at the end
connection.close()
Exemple #5
0
import database




con = database.set_up_db("check.db");
cur= con.cursor()
database.create_table(cur)



database.insert_comment(cur, "Isha", "Trying hard")

print(database.get_comments(cur))
# database.drop_all_tables(cur)

con.commit()
con.close()
                area = matchObj.group(1)
                sex = matchObj2.group(1)
            except Exception as e:
                print("re Error.")
                print(e)

            commentList.append(
                {'id': commentbody['id'],
                 'text': commentbody['text'],
                 'time': commentbody['created_at'],
                 'name': commentbody['user']["screen_name"],
                 "area": area,
                 "sex": sex})

        last_max_id = jsondatum['data']['max_id']
        last_max_id_type = jsondatum['data']['max_id_type']

        page += 1
        print(page)
        time.sleep(random.randint(1, 4))

    return commentList


if __name__ == '__main__':
    login("18214888360", "6366565")
    commentlist = getComments(2000)
    print("爬到" + str(len(commentlist)) + "条")
    util_csv.save_csv(commentlist)
    database.insert_comment(commentlist)
Exemple #7
0
# coding=utf-8

# Code by SmallSquare, 2020/5.
# An example that show how to prepare data before wordCloudAnalysis.
# 展示如何在数据分析之前获得数据。

import spider.spider_main as spider
import database

# Use spider like following.
# 这样用爬虫爬数据。
database.del_all("movie")
database.del_all("comment")
database.insert_movie(spider.getMoviesInfor(10))
database.insert_comment(spider.getMovieShortComments(26885074, 2), 26885074)

# Get data from database.
# 这样获取数据库里的数据。
print(database.get_movies())
print(database.get_comments(26885074))