def RunCleaner(): now_timestamp = int(time.time()) last_timestamp = int(get_value("Last_TimeStamp")) intevral = int(get_value("Clean_Intevral")) if now_timestamp >= last_timestamp + intevral: from proj_manga.mod_mysql import SetLogStatus from proj_manga.mod_mysql import GetLogSingle logdir = get_value("Log_Dir") tempdir = get_value("Temp_Dir") outdir = get_value("Output_Dir") delfolder(tempdir, fileonly=True) try: set_value("Last_TimeStamp", now_timestamp, change=True) for i in os.listdir(logdir): log_info = GetLogSingle(i.split(".")[0], "System", True) # print(i.split(".")[0]) if log_info == -1: delfile(os.path.join(logdir, i)) pass else: time_difference = (datetime.datetime.now() - log_info['time']).total_seconds() # print(time_difference) if time_difference > 24 * 3600: SetLogStatus(log_info['logid'], "outdated") delfile(os.path.join(logdir, i)) delfolder(os.path.join(outdir, log_info['logid'])) except: pass
def Watch_dmzj(title, chapter, url, ext, logmini, logid): try: max_threads = multiprocessing.cpu_count() tempdir = get_value("Temp_Dir") outdir = get_value("Output_Dir") folderpath = "" oriurl = url chrome_options = Options() chrome_options.add_argument('--headless') chrome_options.add_argument('--disable-gpu') logmini.info("尝试获取网页数据,这可能需要较长的时间") driver = webdriver.Chrome(options=chrome_options) driver.get(url) html = driver.execute_script( 'return document.documentElement.outerHTML') soup = BeautifulSoup(html, "html.parser") urls = soup.find_all("option") page = 0 driver.close() # 记得关闭,否则占用内存资源 threads = [] for url in urls: page += 1 logmini.info(url.getText()) imgurl = "https:" + url.__getitem__('value') logmini.info(imgurl) folderpath = title + "_" + chapter try: if not os.path.exists(tempdir): os.mkdir(tempdir) if not os.path.exists(outdir): os.mkdir(outdir) if not os.path.exists(tempdir + folderpath): os.mkdir(tempdir + folderpath) except Exception as e: logmini.warning(e) filepath = str(page).zfill(3) # 此处多线程决定废弃 # newthread = thread_download(ua.random, oriurl, imgurl, tempdir + folderpath + "/" + filepath, logmini, logid) # while True: # if (len(threading.enumerate()) < max_threads): # break # threads.append(newthread) # newthread.start() Downpic(ua.random, oriurl, imgurl, tempdir + folderpath + "/" + filepath, logmini, logid) if ext == "pdf": folder2pdf(folderpath, logmini, logid) except Exception as e: logmini.error("%s_%s 下载失败:%s" % (title, chapter, e)) SetLogStatus(logid, "uncompleted") driver.close()
def logger_init(): logdir = get_value("Log_Dir") try: if not os.path.exists(logdir): os.mkdir(logdir) except Exception as e: print(e)
def printerrorlist(): if len(errorlist) == 0: return 0 logdir = get_value("Log_Dir") file = open( logdir + "ErrorList " + time.strftime('%Y-%m-%d %H-%M-%S', time.localtime(time.time())) + ".txt", "w+") for item in errorlist: print(item, file=file) file.close() return 0
def GetLog(logid, token, fromsystem=False): # try: db = MySQLdb.connect(Mysql_host, Mysql_user, Mysql_pass, Mysql_db, charset='utf8') cursor = db.cursor() sql = "SELECT * FROM MANGA_DOWNLOAD WHERE LOGID = '%s'" % (logid) cursor.execute(sql) result = cursor.fetchone() db.close() if result == None: return "没有找到这个日记" user = GetUser(GetUsername(token)) if (result[0] != user['username']) and (user['authorization'] != "管理员") and (not fromsystem): return "您没有权限访问其他人的日记" log = open(get_value('Log_Dir') + logid + ".log", "r") text = log.read() log.close() return text
import _thread from proj_manga.mod_imports import * from proj_manga.mod_safety import pass_hash, s_passencrypt from proj_manga.mod_settings import get_value, set_value Mysql_host = get_value("Mysql_host") Mysql_pass = get_value("Mysql_pass") Mysql_db = get_value("Mysql_db") Mysql_user = get_value("Mysql_user") temprory_token_list = {} def UpdateUser(username, password, email, s_host, s_pass, chara, s_port, kindleemail): # 更新用户 ori = email ori = ori.encode("utf8") emailmd5 = hashlib.md5(ori).hexdigest() user = GetUser(username) if password == "" and user['username'] != None: password = GetUser(username)['pass_hash'] else: password = pass_hash(password) s_pass = str(s_passencrypt(s_pass), encoding="utf-8") db = MySQLdb.connect(Mysql_host, Mysql_user, Mysql_pass, Mysql_db, charset='utf8') cursor = db.cursor() if user['username'] == None: sql = """INSERT INTO MANGA_USER(UUID, USERNAME, EMAIL, PASS, KINDLEEMAIL, S_HOST, S_PORT, S_PASS, CHARA, EMAILMD5) VALUES (uuid(), '%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s')""" % ( username, email, password, kindleemail, s_host, s_port, s_pass, chara, emailmd5) else:
def Analyze_dmzj(url, ext, downloadlist, downloadall, logid, sendmail, merge, token): while get_value("Task_Running") == True: time.sleep(1) set_value("Task_Running", True, local=True) SetLogStatus(logid, "running") max_threads = multiprocessing.cpu_count() logmini = html_logclass(get_value("Log_Dir") + logid + ".log") try: tempdir = get_value("Temp_Dir") outdir = get_value("Output_Dir") logmini.info("正在下载地址" + url) headers = {"User-Agent": ua.random} logmini.info("尝试获取网页数据") response = requests.get(url=url, headers=headers, timeout=20) if response.status_code != 200: logmini.error("您输入的URL地址不合法!") logmini.error("响应状态" + str(response.status_code)) SetLogStatus(logid, "failed") return html = response.text response.close() rooturl = url.split('/')[2] soup = BeautifulSoup(html, "html.parser") title = soup.find("span", class_="anim_title_text") logmini.info("作品名称:" + title.getText()) category = soup.find_all("div", class_="cartoon_online_border") id = 0 threads = [] for subcategory in category: id += 1 logmini.info("第" + str(id) + "页") list = subcategory.find_all("li") sid = 0 for item in list: sid += 1 logmini.info("Sid:" + str(sid) + " " + item.find("a").getText()) referlink = "https://" + rooturl + item.a['href'] logmini.info("链接:" + referlink) if (sid in downloadlist) or (downloadall): # 此处的多线程决定废弃 # n_thread = thread_watch(title.getText(), item.find("a").getText(), referlink, ext, logmini, logid) # while True: # if (len(threading.enumerate()) < int(max_threads / 2)): # #这里除以2的原因是为了保证下载能够正常运行,避免死锁 # break # n_thread.start() # threads.append(n_thread) Watch_dmzj(title.getText(), item.find("a").getText(), referlink, ext, logmini, logid) # for t in threads: # t.join() if merge == True: logmini.info("自动合并被设置为开,正在合并文件(注意:合并后单独文件将会被删除)。") path = get_value("Output_Dir") + logid + "/" if not downloadall: result = mergefiles( path, title.getText() + "_第%s到第%s话.pdf" % (str(downloadlist[0]), str(downloadlist[-1])), logmini) else: result = mergefiles(path, title.getText() + "_全部下载.pdf", logmini) if result == 0: logmini.info("合并成功。") else: logmini.info("合并失败。") SetLogStatus(logid, "failed") raise Exception if sendmail == True: logmini.info("自动发送kindle被设置为开,正在发送信件。") user = GetUser(GetUsername(token)) s_host = user['s_host'] s_port = user['s_port'] s_pass = user['s_pass'] s_email = user['email'] kindle_email = user['kindle_email'] valid = (s_host != "") and (s_port != "") and (s_pass != "") and ( s_email != "") and (kindle_email != "") try: if not valid: raise Exception path = get_value("Output_Dir") + logid filelist = os.listdir(path) from proj_manga.mod_email import SendEmail_File for file in filelist: logmini.info("正在发送文件 %s" % file) path = os.path.join( os.getcwd(), get_value("Output_Dir")) + logid + "/" + file mail_result = SendEmail_File(s_email, kindle_email, s_host, s_port, s_pass, path, file) if mail_result == 0: logmini.info("发送文件 %s 成功" % file) else: logmini.error("发送文件 %s 失败" % file) except Exception as e: logmini.error("发送文件失败") logmini.info("任务完成。") SetLogStatus(logid, "complete") except Exception as e: logmini.error("任务失败。%s" % e) SetLogStatus(logid, "failed") finally: set_value("Task_Running", False, local=True)
def folder2pdf(folderpath, logmini, logid): outdir = get_value("Output_Dir") tempdir = get_value("Temp_Dir") pdf_name = folderpath path = tempdir + folderpath ori_file_list = os.listdir(path) file_list = [] for item in ori_file_list: file_list.append(item.lower()) pic_name = [] im_list = [] for x in file_list: if "jpg" in x or 'png' in x or 'jpeg' in x: pic_name.append(x) pic_name.sort() im1 = Image.open(os.path.join(path, pic_name[0])) pic_name.pop(0) for i in pic_name: img = Image.open(os.path.join(path, i)) if img.mode == "RGBA": img = img.convert('RGB') im_list.append(img) elif img.mode == "P": img = img.convert('RGB') im_list.append(img) else: im_list.append(img) try: if not os.path.exists(outdir + logid): os.mkdir(outdir + logid) except Exception as e: logmini.error("无法创建文件夹!%s" % e) if get_value("GenerateBookMark") == "True": try: im1.save(tempdir + pdf_name + "_ori.pdf", "PDF", resolution=100.0, save_all=True, append_images=im_list) logmini.info("PDF初步创建完成:" + tempdir + pdf_name + "_ori.pdf,生成书签...") bookmarks = [] for i in range(0, len(pic_name) + 1): bookmark = {"ID": i, "Title": str(i + 1), "Page": i, "Parent": -1} bookmarks.append(bookmark) result = pdfbookmark(tempdir + pdf_name + "_ori.pdf", outdir + logid + "/" + pdf_name + ".pdf", bookmarks, logmini) if result == 0: logmini.info("成功生成PDF") else: logmini.warning("PDF生成失败 %s" % (result)) if get_value("CleanOriPDF") == "True": logmini.info("清理PDF缓存") result = delfile(tempdir + pdf_name + "_ori.pdf") if result != 0: logmini.warning("清理PDF缓存失败 %s" % (result)) except Exception as e: print(e) else: try: im1.save(outdir + logid + "/" + pdf_name + ".pdf", "PDF", resolution=100.0, save_all=True, append_images=im_list) logmini.info("PDF创建完成:", pdf_name + ".pdf") except Exception as e: logmini.error(e) if get_value("CleanOriPic") == "True": logmini.info("清理图片缓存") result = delfolder(path) if result != 0: logmini.warning("清理图片缓存失败 %s" % (result)) return 0