def auto_remove_topic(remove_topic_url): # 自动删除发起的话题 r = requests.get(remove_topic_url, cookies=doubanutil.get_cookies()) doubanutil.logger.info( "in func auto_remove_topic(), " + str(remove_topic_url) + ", status_code: " + str(r.status_code)) return r
def auto_quit_group(quit_group_url): # 自动退出小组 r = requests.get(quit_group_url, cookies=doubanutil.get_cookies()) doubanutil.logger.info("in func auto_quit_group(), " + str(quit_group_url) + ", status_code: " + str(r.status_code)) return r
def post_new_topic(group_url, topic_dict): # 在指定的小组发帖 r = requests.post(group_url, cookies=doubanutil.get_cookies(), data=topic_dict) doubanutil.logger.info("in func post_new_topic(), " + str(group_url) + ", status_code: " + str(r.status_code)) return r
def comment_topic(topic_url, comment_dict): # 在一个帖子下发表回复 r = requests.post(topic_url, cookies=doubanutil.get_cookies(), data=comment_dict) doubanutil.logger.info("in func comment_topic(), " + str(comment_dict) + ", status_code: " + str(r.status_code)) return r
def comment_topic(topic_url, comment_dict): # 在一个帖子下发表回复 # doubanutil.get_cookies_as_strings() r = requests.post(topic_url, cookies=doubanutil.get_cookies(), data=comment_dict) # doubanutil.logger.info("in func comment_topic(), " + str(comment_dict) + "| status_code: " + str(r.status_code) # + " , the content : " + str(r.text)) doubanutil.logger.info("in func comment_topic(), " + str(comment_dict) + "| status_code: " + str(r.status_code) + " ") return r
import requests from lxml import etree from group import remove from config import doubanurl from util import doubanutil if __name__ == "__main__": user_id = "170612630" group_url = doubanurl.DOUBAN_GROUP_MY + str(user_id) + "/publish" group_topics = [] to_delete_topics_set = set() while True: # 一直轮询 doubanutil.logger.info("检测是否有新发的帖子... ...") while len(group_topics) == 0: # 检测是否有新发的帖子 r = requests.get(group_url, cookies=doubanutil.get_cookies()) group_topics_html = etree.HTML(r.text) group_topics = group_topics_html.xpath( "//table[@class='olt']/tr/td[@class='title']/a/@href") time.sleep(10) # 每隔10秒检测一次 for topic_url in group_topics: to_delete_topics_set.add(topic_url) # 将获取到的所有帖子挪到待删除set中 doubanutil.logger.info("to_delete_topics_set中的帖子链接如下:") doubanutil.logger.info(to_delete_topics_set) group_topics = [] # 将获取到的帖子置空 doubanutil.logger.info("group_topics中的帖子链接如下(理论上应为空):") doubanutil.logger.info(group_topics) random_sleep = random.randint(180, 300) doubanutil.logger.info("sleep for " + str(random_sleep) + " seconds...让帖子存活一段时间") time.sleep(random_sleep) # 获取到帖子后,先休眠一段时间,让帖子存活一段时间
# -*- coding: utf-8 -*- import random import time import requests from lxml import etree from group import comment from config import doubanurl from util import doubanutil from baiconfig import headers if __name__ == "__main__": group_id = "699571" group_url = "https://www.douban.com/group/"+group_id+"/" r = requests.get(group_url, cookies=doubanutil.get_cookies(), headers=headers) group_topics_html = etree.HTML(r.text) group_topics = group_topics_html.xpath("//table[@class='olt']/tr[@class='']/td[@class='title']/a/@href") group_topics = group_topics[3:] print(group_topics) # print (r.text) for topic_url in group_topics: comment_topic_url = topic_url + "/add_comment" list_data = [] list_data2 = [] with open("D:\\douban\\DoubanAuto-master\\DoubanAuto-master\\data\\reponse.txt", "r", encoding="utf-8") as fr: list_data = fr.readlines() for i in list_data: temp_line = i.strip() # 去除每行的\n list_data2.append(temp_line) print(list_data2) random_index = random.randint(0, len(list_data2) - 1) print(random_index)
# -*- coding: utf-8 -*- import random import time import requests from lxml import etree from group import comment from config import doubanurl from util import doubanutil if __name__ == "__main__": group_id = "stage1st" group_url = doubanurl.DOUBAN_GROUP + group_id + "/" print(group_url) r = requests.get(group_url, cookies=doubanutil.get_cookies(), headers=doubanutil.get_headers()) group_topics_html = etree.HTML(r.text) # print(group_topics_html) group_topics = group_topics_html.xpath( "//table[@class='olt']/tr/td[@class='title']/a/@href") # group_topics = group_topics[5:] group_topics = [group_topics[0]] print(group_topics) for topic_url in group_topics: comment_topic_url = topic_url + "/add_comment#last" comment_str = "什么东西。。。" # comment_str = "自动帮你顶帖 \n from https://github.com/echoTheLiar/DoubanAuto" comment_dict = comment.make_comment_dict(topic_url, comment_str) comment.comment_topic(comment_topic_url, comment_dict) # random_sleep = random.randint(100, 500)