def setConfig(self, db_config): """ 设置配置,自动初始化,参数为数据库配置字典 """ if isinstance(db_config, dict): log.log_exception(50, "初始化数据库类失败,缺乏配置字典。") return None self.__MySQLConfig = db_config return self.init()
def request(url, data, method='POST', from_url=""): """发送网页请求,返回response对象,含下载 :param url: 请求地址 """ response = None if time.time() - last_init_session > session_time: init_session() if from_url: ses.headers.update(from_url=from_url) for i in range(retry_count): try: if (method == 'POST'): response = ses.post(url, json=data) else: response = ses.get(url, params=data) if response.status_code == 200: break except Exception as e: if i < retry_count: log.log_exception( 30, "请求网址【%s】时发生了错误%s,尝试%d次失败。即将初始会话" % (url, e, retry_count)) init_session() else: log.log_exception( 50, "请求网址【%s】时发生了错误%s,尝试%d次失败。" % (url, e, retry_count)) return response.json()
def POST(self): try: args = web.input(track_id="", token=None) logging.debug("Vote request: %s" % args) sender = auth.get_id_by_token(args.token) if sender is None: raise web.forbidden("Bad token.") if args.track_id.isdigit(): track_id = int(args.track_id) else: track_id = tracks.get_last_track_id() weight = tracks.add_vote(track_id, sender, self.vote_value) if weight is None: return {"status": "error", "message": "No such track."} database.commit() message = 'OK, current weight of track #%u is %.04f.' % (track_id, weight) return { "status": "ok", "message": message, "id": track_id, "weight": weight, } except web.Forbidden: raise except Exception, e: log.log_exception(str(e), e) return {"status": "error", "message": str(e)}
def get_news_list(param): ob_url = param[1] page = param[0] doc = pq(page) try: trs = doc('div.border_tr td:eq(1) tr:eq(1) tr').items() # print(trs) news_list = [] for tr in trs: news = {} # 新闻日期 date = tr('td:eq(2)').text().replace('(', '').replace(')', '').strip() news['date'] = date # print(date) # 新闻url url = tr('td:eq(1) a').make_links_absolute( 'http://cmee.nuaa.edu.cn/').attr.href # print(url) news['url'] = url # 新闻标题 title = tr('td:eq(1) a').text() news['title'] = title news_list.append(news) if news_list: log.log_debug('fetch:解析列表成功' + str(news_list)) return news_list else: log.log_error('fetch:新闻列表为空' + ob_url + '网站规则可能发生变化') except: log.log_exception('fetch:解析列表错误' + ob_url + '网站规则可能发生变化')
def close(self): """关闭数据库连接,会在__del__中自动调用""" try: if self.__isInit: log.log_exception(10, "关闭数据连接。") self.__cursor.close() self.__db.commit() self.__db.close() self.__isInit = False except Exception as e: log.log_exception(30, "数据库类关闭连接失败。%s" % e)
def execute(self, sql): """执行SQL语句,返回结果集 :param sql:需要执行的SQL语句""" try: log.log_exception(10, "执行SQL语句:" + sql) ret = self.__cursor.execute(sql) ret = self.__cursor.fetchall() self.__db.commit() return ret except Exception as e: log.log_exception(30, "数据库类执行查询失败%s,SQL语句:%s" % (e, sql)) raise e
def update_token(self, wechat_id, token): """ 更新微信公众号令牌,返回执行结果""" try: sql_update = ( """UPDATE `%saccount` SET `access_token` = '%s' , `token_time` = %s WHERE `id` = %s""" % (self.prefix, token, round(time.time() + 7000), wechat_id)) return self.execute(sql_update) except Exception as e: log.log_exception( 40, "update_token数据库更新失败%s," "SQL语句:%s" % (e, sql_update))
def init(self): """初始化数据库连接""" if self.__isInit: try: self.__cursor.close() self.__db.close() except: pass finally: self.__isInit = False try: self.__db = MySQLdb.connect(**(self.__MySQLConfig)) self.__cursor = self.__db.cursor() self.__isInit = True self.prefix = setting.ini.get("database", "prefix") return self except Exception as e: log.log_exception(50, "数据库连接失败,%s" % e)
def insert(self, table_name, keys, values): """ 插入数据,其中values为元组 :param table_name:表名 :param keys:表字段,可以是空格或者逗号分隔的字符串,或者元组 :param values:插入的值,元组,如果插入多行可以使用二维元组""" if not (self.__isInit) or not (isinstance(values, tuple)): return None if len(values) < 1: return None if type(keys) == str: keys = keys.replace(" ", ",") keys = tuple(keys.split(",")) values = list(values) for key in range(len(values)): if isinstance(values[key], tuple): tmp_list = list(values[key]) for a1 in range(len(tmp_list)): tmp_list[a1] = tmp_list[a1].replace("'", "\\'") tmp_list[a1] = tmp_list[a1].replace('"', '\\"') tmp_list[a1] = tmp_list[a1].replace("\r", "") tmp_list[a1] = tmp_list[a1].replace("\n", "\\n") tmp_list[a1] = tmp_list[a1].replace("\t", "\\t") values[key] = tuple(tmp_list) else: values[key] = values[key].replace("'", "\\'") values[key] = values[key].replace('"', '\\"') values[key] = values[key].replace("\r", "") values[key] = values[key].replace("\n", "\\n") values[key] = values[key].replace("\t", "\\t") values = tuple(values) sql = "INSERT INTO `%s` %s VALUES (%s)" % (table_name, keys, (r'"%s",' * len(keys))[:-1]) try: log.log_exception(10, "插入数据,SQL语句:\n%s" % (sql % values)) if isinstance(values[0], tuple): ret = self.__cursor.executemany(sql, values) else: ret = self.__cursor.execute(sql, values) self.__db.commit() except Exception as e: log.log_exception(40, "insert数据库类插入数据失败%s,SQL语句:%s" % (e, sql)) return ret
def send(sub, meg): if meg: try: msg = MIMEText(meg, 'html', 'UTF-8') msg['From'] = formataddr(["南航更新了", my_sender]) # 括号里的对应发件人邮箱昵称、发件人邮箱账号 msg['To'] = formataddr(["wzx", my_user]) # 括号里的对应收件人邮箱昵称、收件人邮箱账号 msg['Subject'] = sub # 邮件的主题,也可以说是标题 server = smtplib.SMTP_SSL("smtp.qq.com", 465) # 发件人邮箱中的SMTP服务器,端口是25 server.login(my_sender, my_pass) # 括号中对应的是发件人邮箱账号、邮箱密码 server.sendmail(my_sender, [ my_user, ], msg.as_string()) # 括号中对应的是发件人邮箱账号、收件人邮箱账号、发送邮件 server.quit() # 关闭连接 log.log_info('notify:发送"' + sub + '"') except: log.log_exception('notify:发送邮件出现问题') else: log.log_error('notify:更新内容为空')
def resetToken(appid, appsecret): """重置令牌 :param {str} appid: appid :param {str} appsecret: appsecret :return: token """ if (not appid or not appsecret): return None url = "https://api.weixin.qq.com/cgi-bin/token" payload = { 'grant_type': 'client_credential', 'appid': appid, 'secret': appsecret } retData = request(url, payload, 'GET') if (retData.get('errcode')): log.log_exception( Exception(), 50, 'resetToken:重置token失败,请检查微信公众号设置里是否把本机IP纳入安全地址。 ' + retData.get('errmsg')) return retData.get('access_token')
def select_msg(self): """ 获取一条待发消息,返回dict,类似{'字段':'值'...}""" try: sql_select = ( """SELECT a.`id`, a.`openid`, a.`title`, a.`color`, a.`keyword1`, a.`keyword2`, a.`keyword3`, a.`url`, a.`remark`, a.`wechat_id`, c.`template_id`, b.`appid`, b.`appsecret`, b.`access_token`, b.`token_time` FROM `%smsg` a,`%saccount` b ,`%stemplate` c WHERE a.`delete_time` IS NULL AND a.`is_send`=0 AND a.`settime`< %s AND a.`wechat_id`=b.`id` And a.`template_id`=c.`id` ORDER BY a.`id` LIMIT 1""" % (self.prefix, self.prefix, self.prefix, round(time.time()))) sql_update = ("""UPDATE `%smsg` SET `is_send`=1,`send_time`=now() WHERE `id`= %%s""" % self.prefix) ret = self.execute(sql_select) if ret: self.execute(sql_update % ret[0][0]) field_name = ('id', 'openid', 'title', 'color', 'keyword1', 'keyword2', 'keyword3', 'url', 'remark', 'wechat_id', 'template_id', 'appid', 'appsecret', 'access_token', 'token_time') return dict(zip(field_name, ret[0])) return None except Exception as e: log.log_exception( 40, "select_msg数据库查询失败%s," "SQL语句1:%s,SQL语句2:%s" % (e, sql_select, sql_update))
def spider_list(): db = Db_connect() msg = db.select_msg() err_count = 0 max_error_num = int(setting.ini.get('system', 'max_error_num', 100)) while True: if not msg: log.log_exception(20, "消息队列中没有数据。") time.sleep(1) msg = db.select_msg() else: if not msg.get('appid') or not msg.get('appsecret'): log.log_exception( 30, '消息%s未设置appid或appsecret,因此未发送' % msg.get('id')) continue appid = msg.get('appid') appsecret = msg.get('appsecret') if (msg.get('token_time') < round(time.time())): token = resetToken(appid, appsecret) db.update_token(msg.get('wechat_id'), token) else: token = msg.get('access_token') data = { "touser": msg.get('openid'), "template_id": msg.get('template_id'), "url": msg.get('url'), "data": { "first": { "value": msg.get('title'), "color": msg.get('color'), }, "keyword1": { "value": msg.get('keyword1'), "color": "#000000" }, "keyword2": { "value": msg.get('keyword2'), "color": "#000000" }, "keyword3": { "value": msg.get('keyword3'), "color": "#000000" }, "remark": { "value": msg.get('remark'), "color": "#000000" } } } url = 'https://api.weixin.qq.com/cgi-bin/message/template/send?access_token=' + token retData = request(url, data=data, method='POST') errcode = retData.get('errcode') if (errcode == 0): log.log_exception(10, '信息%s发送成功' % msg.get('id')) msg = db.select_msg() err_count = 0 else: err_count += 1 if (err_count >= max_error_num): log.log_exception(50, '连续错误次数超过设定值,程序已终止') if (errcode == 40003): #不合法的 OpenID log.log_exception(30, '信息%s发送失败,原因:不合法的openid' % msg.get('id')) msg = db.select_msg() elif (errcode == 42001 or errcode == 41001): #access_token 超时 token = resetToken(appid, appsecret) db.update_token(msg.get('wechat_id'), token) log.log_exception( 30, '信息%s发送失败,现在重试,原因:access_token 超时' % msg.get('id')) elif (errcode == 48001): #api功能未授权 log.log_exception( 50, '信息%s发送失败,原因:api功能未授权,请确认公众号已获得该接口,可以在公众平台官网 - 开发者中心页中查看接口权限' % msg.get('id')) elif (errcode == 48004): #api接口被封禁 log.log_exception(50, '信息%s发送失败,原因:api接口被封禁' % msg.get('id')) else: log.log_exception( 30, '信息%s发送失败,原因:%s' % (msg.get('id'), retData.get('errmsg'))) msg = db.select_msg()
def backup_files(new_files, changed_files, remove_files): """ The files provided in the given lists will be backed up. This will first delete all the files in the remove_files list, then copy over all the files in the new_files list, then modify each file in the changed_files list. :param new_files: A list of new files to backup generated by mark_files(). :param changed_files: A list of changed files to backup generated by mark_files(). :param remove_files: A list of files to delete from the backup generated by mark_files(). :return: The number of errors that occurred. """ # If there's no changes to make, display a message if len(new_files) == 0 and len(changed_files) == 0 and len( remove_files) == 0: print("No changes are needed.", end="\r", flush=True) # Prepare values that will track the progress of each section of the backup num_errors = 0 count = 0 limit = NUM_FILES_DELETED # Delete every file in the remove list for file_tuple in remove_files: delete_file_path = file_tuple[0] # Use the correct delete function based on if it's a file or folder try: set_status("Deleting {}".format( os.path.split(delete_file_path)[1])) if os.path.isdir(delete_file_path): deleted_file_count = util.rmtree(delete_file_path) for _ in range(deleted_file_count): count += 1 print("Deleting old files: {}/{}".format(count, limit) + ' ' * 20, end="\r", flush=True) increment_backup_progress() else: os.remove(delete_file_path) increment_backup_progress() count += 1 print("Deleting old files: {}/{}".format(count, limit) + ' ' * 20, end="\r", flush=True) log.log("DELETED - " + delete_file_path) except PermissionError: # Log the exception and indicate that an error occurred log.log_exception(delete_file_path, "DELETING") num_errors += 1 # Reset the counter values and copy over every file in the new list count = 0 limit = len(new_files) for file_tuple in new_files: new_file = file_tuple[0] output_path = file_tuple[2] try: set_status("Copying over {} ({})".format( os.path.split(new_file)[1], util.bytes_to_string(os.path.getsize(new_file), 2))) shutil.copy2(new_file, output_path) log.log("NEW - " + output_path) except PermissionError: # Write the full error to the log file and record that an error occurred log.log_exception(output_path, "CREATING") num_errors += 1 count += 1 increment_backup_progress() print("Copying over new files: {}/{}".format(count, limit) + ' ' * 20, end="\r", flush=True) # Reset the counter values and overwrite every file in the changed list count = 0 limit = len(changed_files) for file_tuple in changed_files: new_file = file_tuple[0] output_path = file_tuple[2] try: set_status("Updating {}, ({})".format( os.path.split(new_file)[1], util.bytes_to_string(os.path.getsize(new_file), 2))) shutil.copy2(new_file, output_path) log.log("UPDATED - " + output_path) except PermissionError: # Write the full error to the log file and record that an error occurred log.log_exception(output_path, "UPDATING") num_errors += 1 count += 1 increment_backup_progress() print("Updating existing files: {}/{}".format(count, limit) + ' ' * 20, end="\r", flush=True) return num_errors
def mark_files(input_path, output_path, config, input_number, depth=0): """ This is the file preparation stage of the backup process. The directory to be backed up is walked through, and all new files, changed files, and files that should be deleted are compiled into their respective lists, essentially "marking" those files for later. While the directory is walked, a directory skeleton structure is created in the output, so any directories that will have files sent to them later will exist in the output. :param input_path: The file or directory to backup. :param output_path: The file or directory in the drive to backup to. :param config: The current configuration. :param input_number: The index of the entry currently being worked with, starting from 1. :param depth: The depth of the recursive search. Will be 0 if not specified. :return: A tuple of three lists is returned. First is a list of new files. Each element of this list is a tuple of three values: first the absolute file path, second that file's size in bytes, and third the absolute file path from the output. Second is a list of changed files. Each element of this list is a tuple of four values: first the absolute file path from the input, second the size of the file from the input, third the absolute file path from the output, and fourth the size of the file from the output. Third is a list of files to delete. Each element of this list is a tuple of two values: first the absolute file path from the output, and second that file's size in bytes. """ global THREAD_START_DEPTH # Don't continue down this path if it should be excluded if config.get_entry(input_number).should_exclude(input_path, output_path): log.log("EXCLUDED - " + input_path) return [], [], [] # If this is a file, check what to do with it and increment counters as necessary if os.path.isfile(input_path): file_size = os.path.getsize(input_path) if os.path.exists(output_path): if not util.file_compare(input_path, output_path): # The file has changed and will be added to the update list mark_file_processed(file_size, modified=True) return [], [(input_path, file_size, output_path, os.path.getsize(output_path))], [] else: # The file needs no attention mark_file_processed(file_size) return [], [], [] else: # The file is new and will be added to the new list mark_file_processed(file_size, is_new=True) return [(input_path, file_size, output_path)], [], [] # Otherwise, it's a directory, so recurse on each child of the directory else: new_files = [] changed_files = [] remove_files = [] # If this directory doesn't exist in the output, make it if not os.path.exists(output_path): try: os.mkdir(output_path) shutil.copymode(input_path, output_path) except PermissionError: # Log the exception and return so we don't process any of this directory's children log.log_exception(output_path, "CREATING DIRECTORY") increment_error() return [], [], [] # Initialize values that will help in efficiently gathering names of files to remove input_dir_files = os.listdir(input_path) output_dir_files = os.listdir(output_path) output_dir_idx = 0 len_output_dir = len(output_dir_files) param_list = [] # Start by sorting the file lists so we can index them and compare them side by side input_dir_files.sort() output_dir_files.sort() try: # Check every file in the input for input_dir_idx in range(len(input_dir_files)): filename = input_dir_files[input_dir_idx] new_input = os.path.join(input_path, filename) new_output = os.path.join(output_path, filename) # Loop to check if this file exists in the output as well by looping through output files while output_dir_idx < len_output_dir: # If it does, index over it and leave the loop, leaving the file in the output alone if filename == output_dir_files[output_dir_idx]: output_dir_idx += 1 break # If this output file isn't the current input file, add it to the remove list else: # Stop checking if we are beyond where this file would alphabetically be if filename < output_dir_files[output_dir_idx]: break else: output_filename = os.path.join( output_path, output_dir_files[output_dir_idx]) # Only add this to the list if it's not the old confirmation file if not output_dir_files[output_dir_idx] == CONFIRMATION_FILENAME or \ not input_path == config.get_entry(input_number).input: if os.path.isdir(output_filename): delete_size, delete_files = util.directory_size( output_filename) for _ in range(delete_files): mark_file_processed(deleted=True) else: mark_file_processed(deleted=True) remove_files.append( (output_filename, os.path.getsize(output_filename))) output_dir_idx += 1 # If this is a directory, save parameters to spawn a thread later if os.path.isdir(new_input): param_list.append([ new_input, new_output, config, input_number, depth + 1 ]) # Otherwise, recurse and process this file here else: temp_new, temp_changed, temp_remove = mark_files( new_input, new_output, config, input_number, depth + 1) new_files.extend(temp_new) changed_files.extend(temp_changed) remove_files.extend(temp_remove) # In spawn_threads mode, execute each list of parameters on a separate thread and combine the results with concurrent.futures.ThreadPoolExecutor() as executor: futures = [] # Don't spawn a new thread if only one set of parameters is in the list if len(param_list) == 1: temp_new, temp_changed, temp_remove = mark_files( *param_list[0]) new_files.extend(temp_new) changed_files.extend(temp_changed) remove_files.extend(temp_remove) else: for params in param_list: # Only spawn new threads around the same depth in the file heirarchy if ACTIVE_THREADS < MAX_THREADS and ( depth <= THREAD_START_DEPTH + 1 or THREAD_START_DEPTH == -1): if THREAD_START_DEPTH == -1: THREAD_START_DEPTH = depth edit_thread_count(1) futures.append(executor.submit( mark_files, *params)) else: temp_new, temp_changed, temp_remove = mark_files( *params) new_files.extend(temp_new) changed_files.extend(temp_changed) remove_files.extend(temp_remove) results = [f.result() for f in futures] edit_thread_count(-1 * len(results)) for (temp_new, temp_changed, temp_remove) in results: new_files.extend(temp_new) changed_files.extend(temp_changed) remove_files.extend(temp_remove) # If there's still more files in the output that weren't looped over, add them all to the remove list if output_dir_idx < len_output_dir: for end_output_idx in range(output_dir_idx, len(output_dir_files)): output_filename = os.path.join( output_path, output_dir_files[end_output_idx]) # Only add this to the list if it's not the old confirmation file if not output_dir_files[end_output_idx] == CONFIRMATION_FILENAME or \ not input_path == config.get_entry(input_number).input: if os.path.isdir(output_filename): delete_size, delete_files = util.directory_size( output_filename) for _ in range(delete_files): mark_file_processed(deleted=True) else: mark_file_processed(deleted=True) remove_files.append((output_filename, os.path.getsize(output_filename))) except FileNotFoundError as error: # Display a warning if long paths need to be enabled on Windows if len(input_path) >= 260: print("FileNotFoundError: Unable to access " + input_path) print( "This is likely because the file path is longer than 260 characters." ) print( "If you are running this on Windows, set LongPathsEnabled to 1 in your registry." ) else: print(error) exit(1) # Show the current progress and return progress_str = "{} files found, {} ({} new, {} changed, {} to remove)".format( NUM_FILES_PROCESSED, util.bytes_to_string(TOTAL_SIZE_PROCESSED, 2), NUM_FILES_NEW, NUM_FILES_MODIFIED, NUM_FILES_DELETED) print(progress_str + ' ' * 10, end="\r", flush=True) return new_files, changed_files, remove_files