async def change_email(request): """ 修改用户邮箱 :param request: :return: : -1 用户session失效 需要重新登录 : 0 修改邮箱失败 : 1 添加邮箱成功 """ user = request['session'].get('user', None) data = parse_qs(str(request.body, encoding='utf-8')) if user: try: email = data.get('email', None)[0] motor_db = motor_base.get_db() await motor_db.user.update_one({'user': user}, {'$set': { 'email': email }}) LOGGER.info('修改邮箱成功') return json({'status': 1}) except Exception as e: LOGGER.exception(e) return json({'status': 0}) else: return json({'status': -1})
async def owllook_delete_bookmark(request): """ 删除书签 :param request: :return: : -1 用户session失效 需要重新登录 : 0 删除书签失败 : 1 删除书签成功 """ user = request['session'].get('user', None) data = parse_qs(str(request.body, encoding='utf-8')) bookmarkurl = data.get('bookmarkurl', '') if user and bookmarkurl: bookmark = unquote(bookmarkurl[0]) try: motor_db = motor_base.get_db() await motor_db.user_message.update_one( {'user': user}, {'$pull': { 'bookmarks': { "bookmark": bookmark } }}) LOGGER.info('删除书签成功') return json({'status': 1}) except Exception as e: LOGGER.exception(e) return json({'status': 0}) else: return json({'status': -1})
async def owllook_delete_book(request): """ 删除书架 :param request: :return: : -1 用户session失效 需要重新登录 : 0 删除书架失败 : 1 删除书架成功 """ user = request['session'].get('user', None) data = parse_qs(str(request.body, encoding='utf-8')) if user: if data.get('book_url', None): book_url = data.get('book_url', None)[0] else: novels_name = data.get('novels_name', '') chapter_url = data.get('chapter_url', '') book_url = "/chapter?url={chapter_url}&novels_name={novels_name}".format( chapter_url=chapter_url[0], novels_name=novels_name[0]) try: motor_db = motor_base.get_db() await motor_db.user_message.update_one( {'user': user}, {'$pull': { 'books_url': { "book_url": unquote(book_url) } }}) LOGGER.info('删除书架成功') return json({'status': 1}) except Exception as e: LOGGER.exception(e) return json({'status': 0}) else: return json({'status': -1})
def main(): parser = argparse.ArgumentParser() parser.add_argument( '-i', '--input-data-dir', type=str, default='data/train-img/', help='Path to input folder directory.' ) parser.add_argument( '-o', '--train-data-dir', type=str, default='data/train-img-cropped/', help='Path to output folder directory.' ) parser.add_argument( '-m', '--model-path', type=str, help='Path to model path.' ) input_data_dir = parser.parse_args().input_data_dir output_data_dir = parser.parse_args().train_data_dir model_path = parser.parse_args().model_path files = glob.glob(os.path.join(input_data_dir, '*')) files_crop = [os.path.join(output_data_dir, os.path.basename(file)) for file in files] preprocessor = TensorflowPreprocessor(model_path) for file, file_crop in zip(files, files_crop): LOGGER.info('running file: {}'.format(file)) preprocessor.run(file, file_crop)
def load_state(self, appendix: str = "",): """Load a previously saved model. `appendix` can contain any valid string, that will be appended to the default load directory name when loading the model. For example: if `appendix` is `model_1`, and the network type is `PreUpsamplingNetwork` then the function will try to load the model checkpoints from "./checkpoints/preupsamplingnetwork_model_1" directory. Arguments: appendix: string, to append to the directory name (default: ""). save_best_only: if `save_best_only=True`, the latest best model according monitor: quantity to monitor, a NetworkState attribute (default: train_loss). mode: one of {min, max}. If `save_best_only=True`, the decision to overwrite the current save file is made based on either the maximization or the minimization of the monitored quantity. For `val_acc`, this should be `max`, for `val_loss` this should be `min`, etc. save_freq: number of epochs between each checkpoint (default: 10). """ dir_name = str.lower(self.__class__.__name__) + appendix # make sure the save directory exists checkpoint_dir_path = ROOT_PATH.joinpath(Network._CHECKPOINTS_DIR) model_dir_path = checkpoint_dir_path.joinpath(dir_name) if not model_dir_path.exists() or not model_dir_path.is_dir(): raise FileExistsError( f"Model directory {model_dir_path} does not exist. Couldn't find any checkpoints.") # load the layer's weights from Tensorflow SavedModel format self.model.load_weights(filepath=str(model_dir_path.joinpath("weights"))) # also load the previous state of the model state_file_path = model_dir_path.joinpath("state.dat") with open(str(state_file_path), "rb") as f: self._current_state = pickle.load(f) # update the saved state status self._saved_state = copy.deepcopy(self._current_state) LOGGER.info(f"Loaded state with: {chr(10)}{self.state}")
async def auth_param(request, *args, **kwargs): request_params = {} # POST request if request.method == 'POST' or request.method == 'DELETE': try: post_data = json_loads(str(request.body, encoding='utf-8')) except Exception as e: LOGGER.exception(e) return response_handle(request, UniResponse.PARAM_PARSE_ERR, status=400) else: request_params.update(post_data) params = [key for key, value in post_data.items() if value] elif request.method == 'GET': request_params.update(request.args) params = [key for key, value in request.args.items() if value] else: # TODO return response_handle(request, UniResponse.PARAM_UNKNOWN_ERR, status=400) if set(keys).issubset(set(params)): try: kwargs['request_params'] = request_params response = await func(request, *args, **kwargs) return response except Exception as e: LOGGER.exception(e) return response_handle(request, UniResponse.SERVER_UNKNOWN_ERR, 500) else: return response_handle(request, UniResponse.PARAM_ERR, status=400)
async def save_session(request, response): if request.path == '/operate/login' and request['session'].get( 'user', None): await app.session_interface.save(request, response) response.cookies['novel_sid']['expires'] = datetime.datetime.now( ) + datetime.timedelta(days=30) elif request.path == '/register': try: response.cookies['reg_index'] = str(request['session']['index'][0]) except KeyError as e: LOGGER.error(e)
async def save_session(request, response): # after each request save the session, # pass the response to set client cookies # await app.session_interface.save(request, response) if request.path == '/operate/login' and request['session'].get('user', None): await app.session_interface.save(request, response) import datetime response.cookies['lhz_sid']['expires'] = datetime.datetime.now( ) + datetime.timedelta(days=30) elif request.path == '/register': try: response.cookies['reg_index'] = str(request['session']['index'][0]) except KeyError as e: LOGGER.error(e)
async def add_session_to_request(request): print(request.headers) # host = request.headers.get('host', None) user_agent = request.headers.get('user-agent', None) if user_agent: user_ip = request.headers.get('X-Forwarded-For') LOGGER.info('user ip is:{}'.format(user_ip)) if user_ip in CONFIG.FORBITDDEN: return html('<h3>网站正在维护中.....</h3>') if CONFIG.WEBSITE['IS_RUNNING']: await app.session_interface.open(request) else: return html('<h3>网站正在维护中.....</h3>') else: return html('<h3>网站正在维护中.....</h3>')
async def owllook_add_book(request): """ 添加书架 :param request: :return: : -1 用户session失效 需要重新登录 : 0 添加书架失败 : 1 添加书架成功 """ user = request['session'].get('user', None) data = parse_qs(str(request.body, encoding='utf-8')) novels_name = data.get('novels_name', '') chapter_url = data.get('chapter_url', '') last_read_url = data.get('last_read_url', '') if user and novels_name and chapter_url: url = "/chapter?url={chapter_url}&novels_name={novels_name}".format( chapter_url=chapter_url[0], novels_name=novels_name[0]) time = get_time() try: motor_db = motor_base.get_db() res = await motor_db.user_message.update_one( {'user': user}, {'$set': { 'last_update_time': time }}, upsert=True) if res: await motor_db.user_message.update_one( { 'user': user, 'books_url.book_url': { '$ne': url } }, { '$push': { 'books_url': { 'book_url': url, 'add_time': time, 'last_read_url': unquote(last_read_url[0]) } } }) LOGGER.info('书架添加成功') return json({'status': 1}) except Exception as e: LOGGER.exception(e) return json({'status': 0}) else: return json({'status': -1})
def requests_target_fetch(url): """ :param url: :return: """ try: headers = {'user-agent': get_random_user_agent()} response = requests.get(url=url, headers=headers, verify=False) response.raise_for_status() content = response.content charset = cchardet.detect(content) text = content.decode(charset['encoding']) return text except Exception as e: LOGGER.exception(e) return None
def init_cache(app, loop): LOGGER.info("Start aiocahe") app.config.from_object(CONFIG) REDIS_DICT = CONFIG.REDIS_DICT aiocache.settings.set_defaults( class_="aiocache.RedisCache", endpoint=REDIS_DICT.get('REDIS_ENDPOINT', 'localhost'), port=REDIS_DICT.get('REDIS_PORT', 6379), db=REDIS_DICT.get('CACHE_DB', 0), password=REDIS_DICT.get('REDIS_PASSWORD', None), loop=loop, ) LOGGER.info("Start reids pool") redis_session = RedisSession() app.get_redis_pool = redis_session.get_redis_pool app.session_interface = RedisSessionInterface(app.get_redis_pool, cookie_name="novel_sid", expiry=30 * 24 * 60 * 60)
def get_html_by_requests(url, headers, timeout=15): """ :param url: :return: """ try: response = requests.get(url=url, headers=headers, verify=False, timeout=timeout) response.raise_for_status() content = response.content charset = cchardet.detect(content) text = content.decode(charset['encoding']) return text except Exception as e: LOGGER.exception(e) return None
async def owllook_add_bookmark(request): """ 添加书签 :param request: :return: : -1 用户session失效 需要重新登录 : 0 添加书签失败 : 1 添加书签成功 """ user = request['session'].get('user', None) data = parse_qs(str(request.body, encoding='utf-8')) bookmark_url = data.get('bookmark_url', '') if user and bookmark_url: url = unquote(bookmark_url[0]) time = get_time() try: motor_db = motor_base.get_db() res = await motor_db.user_message.update_one( {'user': user}, {'$set': { 'last_update_time': time }}, upsert=True) if res: await motor_db.user_message.update_one( { 'user': user, 'bookmarks.bookmark': { '$ne': url } }, { '$push': { 'bookmarks': { 'bookmark': url, 'add_time': time } } }) LOGGER.info('书签添加成功') return json({'status': 1}) except Exception as e: LOGGER.exception(e) return json({'status': 0}) else: return json({'status': -1})
async def data_extraction_for_web_bing(client, html): with async_timeout.timeout(15): try: try: title = html.select('h2 a')[0].get_text() url = html.select('h2 a')[0].get('href', None) netloc = urlparse(url).netloc url = url.replace('index.html', '').replace('Index.html', '') if not url or 'baidu' in url or 'baike.so.com' in url or netloc in BLACK_DOMAIN or '.html' in url: return None is_parse = 1 if netloc in RULES.keys() else 0 is_recommend = 1 if netloc in LATEST_RULES.keys() else 0 # time = html.select('div.b_attribution')[0].get_text() # time = re.findall(r'\d+-\d+-\d+', time) # time = time[0] if time else '' timestamp = 0 time = '' # if time: # try: # time_list = [int(i) for i in time.split('-')] # years = str(time_list[0])[-4:] # timestamp = arrow.get(int(years), time_list[1], time_list[2]).timestamp # time = years + "-" + str(time_list[1]) + "-" + str(time_list[2]) # except Exception as e: # LOGGER.exception(e) # timestamp = 0 return { 'title': title, 'url': url, 'time': time, 'is_parse': is_parse, 'is_recommend': is_recommend, 'timestamp': timestamp, 'netloc': netloc } except Exception as e: LOGGER.exception(e) url, title = None, None return None except Exception as e: LOGGER.exception(e) return None
def init_cache(app, loop): LOGGER.info("Starting aiocache") app.config.from_object(CONFIG) REDIS_DICT = CONFIG.REDIS_DICT aiocache.settings.set_defaults( class_="aiocache.RedisCache", endpoint=REDIS_DICT.get('REDIS_ENDPOINT', 'localhost'), port=REDIS_DICT.get('REDIS_PORT', 6379), db=REDIS_DICT.get('CACHE_DB', 0), password=REDIS_DICT.get('REDIS_PASSWORD', None), loop=loop, ) LOGGER.info("Starting redis pool") redis_session = RedisSession() # redis instance for app app.get_redis_pool = redis_session.get_redis_pool # pass the getter method for the connection pool into the session app.session_interface = RedisSessionInterface( app.get_redis_pool, cookie_name="owl_sid", expiry=30 * 24 * 60 * 60)
async def change_pass(request): """ 修改用户密码 :param request: :return: : -1 用户session失效 需要重新登录 : 0 修改密码失败 : 1 添加密码成功 : -2 原始密码错误 """ user = request['session'].get('user', None) data = parse_qs(str(request.body, encoding='utf-8')) if user: try: new_pass = data.get('new_pass', None)[0] old_pass = data.get('old_pass', None)[0] motor_db = motor_base.get_db() user_data = await motor_db.user.find_one({'user': user}) if user_data: pass_first = hashlib.md5( (CONFIG.WEBSITE["TOKEN"] + old_pass).encode("utf-8")).hexdigest() pass_second = hashlib.md5( (CONFIG.WEBSITE["TOKEN"] + new_pass).encode("utf-8")).hexdigest() new_password = hashlib.md5( pass_second.encode("utf-8")).hexdigest() password = hashlib.md5(pass_first.encode("utf-8")).hexdigest() if password == user_data.get('password'): await motor_db.user.update_one( {'user': user}, {'$set': { 'password': new_password }}) LOGGER.info('修改密码成功') return json({'status': 1}) else: return json({'status': -2}) except Exception as e: LOGGER.exception(e) return json({'status': 0}) else: return json({'status': -1})
async def fetch(client, url, novels_name): with async_timeout.timeout(20): try: headers = { 'user-agent': await get_random_user_agent(), 'referer': "https://www.bing.com/" } print(headers) params = {'q': novels_name, 'ensearch': 0} async with client.get(url, params=params, headers=headers) as response: assert response.status == 200 LOGGER.info('Task url: {}'.format(response.url)) try: text = await response.text() except: text = await response.read() return text except Exception as e: LOGGER.exception(e) return None
def call(self, inst): # don't save the model if it's not time yet if inst.state.epochs <= 0 or inst.state.epochs % self._save_freq != 0: return # validate parameters assert self._monitor in inst.state.__dict__.keys( ), f"Monitored quantity `{self._monitor}` is not a NetworkState attribute." assert self._mode in ["min", "max"] # don't save the model either if it doesn't meet the requirements if self._save_best_only is True: if inst._saved_state is not None: if self._mode == "min" and inst.state.__dict__[ self._monitor] >= inst._saved_state.__dict__[ self._monitor]: LOGGER.info( f"Skipping saving. `{self._monitor}` current >= best : " f"[{inst.state.__dict__[self._monitor]:.4f}] >= [{inst._saved_state.__dict__[self._monitor]:.4f}]" ) return elif self._mode == "max" and inst.state.__dict__[ self._monitor] <= inst._saved_state.__dict__[ self._monitor]: LOGGER.info( f"Skipping saving. `{self._monitor}` current <= best : " f"[{inst.state.__dict__[self._monitor]:.4f}] <= [{inst._saved_state.__dict__[self._monitor]:.4f}]" ) return # checks passed, save the model LOGGER.info(f"Saving state after {inst.state.epochs} epochs.") inst.save_state(self._appendix)
def call(self, inst): # validate parameters assert self._monitor in inst.state.__dict__.keys( ), f"Monitored quantity `{self._monitor}` is not a NetworkState attribute." assert self._mode in ["min", "max"] if self._reference_value is None: self._reference_value = inst.state.__dict__[self._monitor] if (self._mode == "min" and inst.state.__dict__[self._monitor] > self._reference_value + self._min_delta) or \ (self._mode == "max" and inst.state.__dict__[self._monitor] < self._reference_value - self._min_delta): self._current_patience += 1 else: self._reference_value = inst.state.__dict__[self._monitor] self._current_patience = 0 if self._current_patience >= self._patience: # check whether there is still room for reduction LOGGER.info( f"`{self._monitor}` did not {'increase' if self._mode == 'max' else 'decrease'} for {self._patience} epochs." ) if self._learning_rate > self._min_learning_rate + 1e-10: self._learning_rate *= self._factor self._current_patience = 0 self._reference_value = inst.state.__dict__[self._monitor] LOGGER.info(f"Reducing learning rate to {self._learning_rate}") else: LOGGER.info(f"Learning rate is already at minimum value.") return self._learning_rate
def _parse_predict_optionals(x: tf.Tensor, args, kwargs) -> Tuple[int, int]: """Helper function for parsing the optional arguments given to the `predict` function. :return: The determined upsampling shape as a (height, width) tuple. """ default_upsampling_factor = 2 assert len(x.shape) == 4, "`x` should be a (batch, height, width, depth) shaped array." x_size = (x.shape[1], x.shape[2]) size = (int(x_size[0] * default_upsampling_factor), int(x_size[1] * default_upsampling_factor)) # the default size params = [] + list(args) + list(kwargs.values()) if len(params) == 0: # use default upsampling factor LOGGER.info(f"Predicting using the default upsampling factor of {default_upsampling_factor}.") elif len(params) == 1: if type(params[0]) == int or type(params[0]) == float: LOGGER.info(f"Predicting using the supplied upsampling factor of {params[0]}.") size = (int(x_size[0] * params[0]), int(x_size[1] * params[0])) elif type(params[0]) == tuple: assert 2 <= len(params[0]) <= 4, f"Desired output size dim should be between 2 and 4, got {len(params[0])}" size = (params[0][1], params[0][2]) if len(params[0]) == 4 else (params[0][0], params[0][1]) LOGGER.info(f"Predicting using the supplied size parameter {size}.") else: raise TypeError("The optional input parameter type did not match any of the acceptable types (int,float, tuple).") else: raise ValueError("Found more than 1 optional input parameters.") return size
def save_state(self, appendix: str = ""): """Save current model. `appendix` can contain any valid string, that will be appended to the default save directory name when saving the model. For example: if `appendix` is `model_1`, and the network type is `PreUpsamplingNetwork` then the model checkpoints will be saved under "./checkpoints/preupsamplingnetwork_model_1" directory. Arguments: appendix: string, to append to the directory name (default: ""). """ dir_name = str.lower(self.__class__.__name__) + appendix self._saved_state = copy.deepcopy(self._current_state) # make sure the save directory exists checkpoint_dir_path = ROOT_PATH.joinpath(Network._CHECKPOINTS_DIR) if not checkpoint_dir_path.exists() or not checkpoint_dir_path.is_dir(): LOGGER.warning(f"Checkpoints directory {checkpoint_dir_path} does not exist. Creating it.") checkpoint_dir_path.mkdir(parents=False, exist_ok=False) # save the layer's weights and optimizer state to Tensorflow SavedModel format model_dir_path = checkpoint_dir_path.joinpath(dir_name) if not model_dir_path.exists() or not checkpoint_dir_path.is_dir(): LOGGER.warning(f"Model directory {model_dir_path} does not exist. Creating it.") model_dir_path.mkdir(parents=False, exist_ok=False) self.model.save_weights(filepath=str(model_dir_path.joinpath("weights")), overwrite=True, save_format="tf") # save the state of the model state_file_path = model_dir_path.joinpath("state.dat") with open(str(state_file_path), "wb") as f: pickle.dump(self.state, f) # update the saved state status self._saved_state = copy.deepcopy(self._current_state) LOGGER.info("Saved state.")
async def fetch_url(self, url, params, headers): """ 公共抓取函数 :param client: :param url: :param params: :return: """ with async_timeout.timeout(15): try: async with aiohttp.ClientSession() as client: async with client.get(url, params=params, headers=headers,ssl=False) as response: assert response.status == 200 LOGGER.info('Task url: {}'.format(response.url)) try: text = await response.text() except: text = await response.read() return text except Exception as e: LOGGER.exception(e) return None
async def target_fetch(url, headers, timeout=15): """ :param url: target url :return: text """ with async_timeout.timeout(timeout): try: async with aiohttp.ClientSession() as client: async with client.get(url, headers=headers) as response: assert response.status == 200 LOGGER.info('Task url: {}'.format(response.url)) try: text = await response.text() except: try: text = await response.read() except aiohttp.ServerDisconnectedError as e: LOGGER.exception(e) text = None return text except Exception as e: LOGGER.exception(str(e)) return None
async def target_fetch(client, url): """ :param client: aiohttp client :param url: target url :return: text """ with async_timeout.timeout(30): try: headers = {'user-agent': get_random_user_agent()} async with client.get(url, headers=headers) as response: assert response.status == 200 LOGGER.info('Task url: {}'.format(response.url)) try: text = await response.text() except: try: text = await response.read() except aiohttp.ServerDisconnectedError as e: LOGGER.exception(e) text = None return text except Exception as e: LOGGER.exception(e) return None
async def author_notification(request): """ 作者新书通知 :param request: :return: : -1 用户session失效 需要重新登录 : 2 无该作者信息 : 3 作者已经添加 : 4 超过添加的上限 : 0 操作失败 : 1 操作成功 """ user = request['session'].get('user', None) user_data = parse_qs(str(request.body, encoding='utf-8')) if user: try: motor_db = motor_base.get_db() all_authors = await motor_db.user_message.find_one( {'user': user}, { 'author_latest': 1, '_id': 0 }) count = len(all_authors.get('author_latest', [])) if count == CONFIG.WEBSITE.get("AUTHOR_LATEST_COUNT", 5): return json({'status': 4}) author_name = user_data.get('author_name', None)[0] data = [] author_cursor = motor_db.all_books.find({'author': author_name}, { 'name': 1, 'url': 1, '_id': 0 }) async for document in author_cursor: data.append(document) if data: time = get_time() res = await motor_db.user_message.update_one( {'user': user}, {'$set': { 'last_update_time': time }}, upsert=True) is_exist = await motor_db.user_message.find_one({ 'user': user, 'author_latest.author_name': author_name }) if is_exist: return json({'status': 3}) if res: await motor_db.user_message.update_one( { 'user': user, 'author_latest.author_name': { '$ne': author_name } }, { '$push': { 'author_latest': { 'author_name': author_name, 'add_time': time } } }) is_author_exist = await motor_db.author_message.find_one( {'name': author_name}) if not is_author_exist: author_data = { "author_name": author_name, "nums": len(data), "updated_time": get_time(), } await motor_db.author_message.save(author_data) LOGGER.info('作者添加成功') return json({'status': 1}) else: return json({'status': 2}) else: return json({'status': 2}) except Exception as e: LOGGER.exception(e) return json({'status': 0}) else: return json({'status': -1})