def add (url, torrentUrl, pathCookies): session = requests.Session() session.verify = False util.loadCookies(session, pathCookies) util.setUserAgent(session, 'chrome') file = os.path.dirname(os.path.realpath(__file__)) + '/' + 'temp.torrent' import xbmcvfs output = xbmcvfs.File (file, 'w') r = session.get(torrentUrl) for chunk in r.iter_content(chunk_size=1024): if chunk: output.write(chunk) output.close() f = open(file, "rb") encoded_string = base64.b64encode(f.read()) f.close() os.remove(file) data = { "method": "torrent-add", "arguments": {"metainfo": encoded_string} } reply = send(url,data) if 'torrent-duplicate' in reply["arguments"].keys(): return reply["arguments"]["torrent-duplicate"]["hashString"] return reply["arguments"]["torrent-added"]["hashString"]
def initSession(pathCookies=None): session = requests.Session() session.verify = False if pathCookies is not None: util.loadCookies(session, pathCookies) util.setUserAgent(session, 'chrome') return session
def getMagnet(pathCookies, id): session = requests.Session() session.verify = False util.loadCookies(session, pathCookies) util.setUserAgent(session, 'chrome') data = BeautifulSoup( session.get('https://rutracker.org/forum/viewtopic.php?t=' + id).content, "html.parser") magnet = data.find(class_='magnet-link')['href'] name = data.find(class_='topic-title-' + id).get_text() return (magnet + '&' + urllib.urlencode({'dn': name}))
def initSession(pathCookies=None): session = requests.Session() session.verify = False if pathCookies is not None: util.loadCookies(session, pathCookies) util.setUserAgent(session, 'chrome') session.headers['Accept'] = 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8' session.headers['Accept-Encoding'] = 'gzip, deflate, br' session.headers['Accept-Language'] = 'en-US,en;q=0.9,he-IL;q=0.8,he;q=0.7,ru-RU;q=0.6,ru;q=0.5' session.headers['Upgrade-Insecure-Requests'] = '1' session.headers['Referer'] = 'https://www.kinopoisk.ru/' return session
def get_verify_code_pic(session, url): # 获取验证码的图片URL和id r = session.get(url, cookies=util.loadCookies()) if r.status_code == 200: pic_url, pic_id = get_image_and_id(r.text) print(str(pic_url)) return pic_url, pic_id else: print(str(url) + ", status_code: " + str(r.status_code)) return "", ""
def reject_user(session, groupid, requestid): # 拒绝入组申请 url = 'https://www.douban.com/j/group/' + groupid + '/requests/reject' reply_dict = { 'MIME Type': 'application/x-www-form-urlencoded', 'req_item': requestid, "ck": util.get_ck_from_cookies(session) } print(reply_dict) response = session.post(url, reply_dict, cookies=util.loadCookies()) print(response)
def get_verify_code_pic_doumail(session, url): # 获取验证码的图片URL和id r = session.get(url, cookies=util.loadCookies()) if r.status_code == 200: pattern = "REPLY_FORM_DATA.captcha = {\n id: \'(.*)\',\n image: \'(.*)\'" searchObj = re.search(pattern, r.text, re.M | re.I) if searchObj: pic_id = searchObj.group(1) pic_url = searchObj.group(2) print(pic_id,pic_url) return pic_url, pic_id print(str(url) + ", status_code: " + str(r.status_code)) return "", ""
def initial_session(): req_wrapper = RequestWrapper.ReqWrapper() s = req_wrapper.session s.headers.update({ 'Host': 'www.douban.com', 'Connection': 'keep-alive', 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.111 Safari/537.36', 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9', 'Accept-Language': 'en,zh-CN;q=0.9,zh;q=0.8', }) s.cookies.update(util.loadCookies()) return s
def search(pathCookies, str): session = requests.Session() session.verify = False util.loadCookies(session, pathCookies) util.setUserAgent(session, 'chrome') url = 'https://rutracker.org/forum/tracker.php' dataGet = { # 'f': '100,101,1102,1120,1214,1235,1359,1363,1531,1576,1666,185,187,189,1900,1936,208,209,2090,2091,2092,2093,2100,212,2198,2199,22,2200,2201,2220,2221,235,2366,242,2459,2540,271,312,313,315,376,387,4,505,521,539,7,721,819,822,842,9,905,911,921,93,930,934,941', 'nm': str } dataPost = {'o': 10, 's': 2, 'oop': 1} data = BeautifulSoup( session.post(url=url + '?' + urllib.urlencode(dataGet), data=dataPost).content, "html.parser") result = [] for tag in data.find('tbody').find_all(class_="tCenter"): try: seeds = tag.find(class_="seedmed").get_text() except: seeds = '0' if seeds == '0': continue subtag = tag.find(class_="tLink") name = subtag.get_text() result.append({ 'id': subtag['data-topic_id'], 'name': name, 'url': 'https://rutracker.org/forum/dl.php?t=' + subtag['data-topic_id'], 'seeds': seeds, 'size': tag.find(class_="tor-size").find('u').get_text() }) return (result)
def sent_doumail(session, user_id, content): # 向用户发送豆邮 url = 'https://www.douban.com/j/doumail/send' user_url = 'https://www.douban.com/doumail/%s/' % user_id pic_url, pic_id = captcha_util.get_verify_code_pic_doumail( session, user_url) verify_code = "" # pic_url, pic_id="","" if len(pic_url): retry_count = 0 while retry_count < 3: pic_path = captcha_util.save_pic_to_disk(pic_url, session) verify_code = captcha_util.get_word_in_pic(pic_path) os.remove(pic_path) retry_count = retry_count + 1 time.sleep(5) if verify_code == '': # 识别不出的话重试 pic_url, pic_id = captcha_util.get_verify_code_pic_doumail( session, user_url) verify_code = "" else: break if retry_count == 3: util.email_notify() raise RuntimeError('Captcha failed') param_dict = { "ck": util.get_ck_from_cookies(session), 'to': user_id, "m_text": content, "m_image": '', "captcha-solution": verify_code, "captcha-id": pic_id #'m_submit': '%E5%A5%BD%E4%BA%86%EF%BC%8C%E5%AF%84%E5%87%BA%E5%8E%BB'#encoded '好了,寄出去' } print(param_dict) response = session.post(url, param_dict, cookies=util.loadCookies()) print(response) logging.info(str(param_dict) + str(response))
def get_all_applied_user(session, groupid): # 获取待处理的入组申请 url = 'https://www.douban.com/group/' + groupid + '/requests/applied' print(url) doc = pq(url, headers=session.headers, cookies=util.loadCookies()) user_request_dict = {} li = doc('.group-request-list') request_id, user_id, content = [], [], [] for i in li('input'): request_id.append(pq(i).attr("value")) for i in li('.douban-home-page'): user_id.append(pq(i).attr("href").split('/')[-2]) for i in li('.inq'): content.append(pq(i).text()) for k in range(len(user_id)): print(user_id[k], request_id[k], content[k]) user_request_dict[user_id[k]] = { 'request_id': request_id[k], 'content': content[k] } with open('applications/user_request_dict.json', 'w') as f: json.dump(user_request_dict, f) return len(user_request_dict)
util.random_sleep(60, 90, 'processed message') else: # 已经回复过消息 if mail['user_id'] in wait_to_reply: wait_to_reply.remove(mail['user_id']) with open('resources/wait_to_reply.json', 'w') as f: json.dump(wait_to_reply, f) content = '🤖消息已收到,记得发送"申请入组"四个字开始自动审核' sent_doumail(session, mail['user_id'], content) util.random_sleep(60, 90, 'processed message') logging.info(mail['preview'] + ' sent by id =' + mail['user_id']) return len(doumails) if __name__ == '__main__': import RequestWrapper req_wrapper = RequestWrapper.ReqWrapper() s = req_wrapper.session s.headers.update({ 'Host': 'www.douban.com', 'Connection': 'keep-alive', 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:60.0) Gecko/20100101 Firefox/60.0', 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', 'Accept-Language': 'en-US,en;q=0.5', }) s.cookies.update(util.loadCookies()) #sent_doumail(s,'','testtest') process_new_doumail(s, group_id, newonly=False, force_range=[5, 0])