def get_detail(self, detail_page_url: str): url = self._base_url + detail_page_url logger.info(f"Parsing detail page: {url}") resp = self.get(url) if resp.status_code != 200: logger.warning(f"Response error: {resp.status_code} {url}") return AnimeDetailInfo() body = self.xpath(resp.text, '//div[@class="fire l"]')[0] anime_detail = AnimeDetailInfo() anime_detail.title = body.xpath("./div/h1/text()")[0] anime_detail.category = " ".join( body.xpath('.//div[@class="sinfo"]/span[3]/a/text()')) anime_detail.desc = body.xpath( './/div[@class="info"]/text()')[0].replace("\r\n", "").strip() anime_detail.cover_url = body.xpath( './/div[@class="thumb l"]/img/@src')[0] vc = VideoCollection() vc.name = "播放列表" video_blocks = body.xpath('.//div[@class="movurl"]//li') for block in video_blocks: video = Video() video.name = block.xpath("./a/text()")[0] video.raw_url = block.xpath("./a/@href")[0] # '/v/3849-162.html' video.handler = "YHDMVideoHandler" vc.append(video) anime_detail.append(vc) return anime_detail
def main(): application = make_app() HTTPServer(application) application.listen(LISTEN_PORT) logger.info('server is running....!!') print('server is running....!!') tornado.ioloop.IOLoop.current().start()
def post(self, request, *args, **kwargs): """ post will handle the endpoint that allow a group admin to create and send an invite to a user to join his group endpoint : api/groups/:id/members/invite/ """ group = self.get_object() to_be_member = request.data.get("username") try: user = User.objects.get(username=to_be_member) except User.DoesNotExist: return Response(data={ "message": "The User you wanted to add as member doesnt exist" }, status=status.HTTP_400_BAD_REQUEST) membership = Membership.objects.create(group=group, user=user, status='I') encrypted_details = encrypt(str(membership.id), SECRET_KEY) url = "{}/api/groups/members/invite/accept/{}/".format( DOMAIN, encrypted_details.decode()) group_invite = "<h3>Dear {}, you have been invited by {}, to join the esusu group {} on Esusu confam<a href='{}'>Esusu confam</a>!</h3><br />Please kindly click on the url to accept and activate your membership.{} May the force be with you!".format( user.username, group.admin.username, group.name, DOMAIN, url) logger.info("invite details: {}".format({ "invite url": url, "user": user.username, "email": user.email })) mail_subject = "Esusu Group Invite" text_heading = "Hello, Dear" mail_resp = send_mail(mail_subject, user, text_heading, group_invite) logger.debug("email response : {}".format(mail_resp.json())) return Response(data={"url": url}, status=status.HTTP_201_CREATED)
def send_conf_tar(): logger.info("{} 下载客户端主程序".format(request.remote_addr)) file_dir = os.getcwd() + "/ThirPath/suricata/" api.web.make_tar() response = make_response(send_from_directory( file_dir, "marioips.tar.gz", as_attachment=True)) return response
def enable_engine(self, engine: str) -> bool: """启用某个动漫搜索引擎""" if engine in self._engines: logger.info(f"Anime Engine {engine} has already loaded") return True # 已经启用了 self._load_engine(engine) # 动态加载引擎 return GLOBAL_CONFIG.enable_engine(engine) # 更新配置文件
def response_change(self, request, obj): if "_activate-user" in request.POST: usr = User.objects.get(id=obj.user.id) if usr: if usr.is_active is False: email_context = {'frontend_url': settings.FRONTEND_URL} send_notification( 'Your account has been approved', [usr.email], render_to_string( 'email/registration/outside-email-success.html', email_context), f'Approved account successfully - {usr.username}') usr.is_active = True usr.save() else: logger.info(f'User {usr.username} was already active') else: logger.info( f'There is no User record with the ID: {obj.user.id}') return HttpResponseRedirect(".") return super().response_change(request, obj)
def search(self, keyword: str): logger.info(f"Searching for: {keyword}") ret = [] resp = self.get(self._search_api, params={ "kw": keyword, "per_page": 100, "page": 1 }) # 取前 100 条结果 if resp.status_code != 200 or resp.json()["code"] != 1: logger.warning( f"Response error: {resp.status_code} {self._search_api}") return ret data = resp.json() anime_meta_list = data.get("data").get("data") if data else [] for meta in anime_meta_list: anime = AnimeMetaInfo() anime.title = meta["name"] anime.cover_url = meta["pic"] anime.category = meta["type"] anime.detail_page_url = str(meta["vid"]) anime.desc = meta["label"] ret.append(anime) return ret
def evaluate_line(self, line): if isinstance(line, str): raw_test_comments = [line] elif isinstance(line, (list, tuple)): raw_test_comments = [question for question, entity_dict in line] else: raise ValueError('【格式错误】question 字段值应该为字符串或列表!') processed_test_comments = [] for comment in raw_test_comments: processed_test_comments.append(preprocess_text(comment)) test_sequences = self.tokenizer.texts_to_sequences( processed_test_comments) final_test_data = pad_sequences(test_sequences, maxlen=150) rets = self.model.predict(x=final_test_data, batch_size=1) ret = [] for pred, question in zip(rets, raw_test_comments): # argsort函数返回的是数组值从小到大的索引值 sort_index = pred.argsort() pred_ret = [{ 'question': question, 'intent': self.id2label[_index], 'score': float(pred[_index]) } for _index in sort_index[-5:][::-1]] ret.append(pred_ret) # label = self.id2label[pred.argmax()] # score = float(pred.max()) # ret.append([{'question': question, 'intent': label, 'score': score}]) logger.info("问句`{}`实体识别的结果:{}".format(line, ret)) return ret
def parse_one_page(self, keyword: str, page: int): """处理一页的所有番剧摘要信息""" logger.info(f"Searching for: {keyword}, page: {page}") resp = self.get(self._search_api, params={ 'query': keyword, 'page': page }) if resp.status_code != 200 or "0纪录" in resp.text: logger.info(f"No search result for {keyword}") return [], "" ret = [] anime_meta_list = self.xpath( resp.text, '//div[@class="cell blockdiff2"] | //div[@class="cell blockdiff"]') for meta in anime_meta_list: anime = AnimeMetaInfo() anime.title = meta.xpath( './/a[@class="cell_imform_name"]/text()')[0] anime.cover_url = "https:" + meta.xpath( './/a[@class="cell_poster"]/img/@src')[0] anime.category = meta.xpath( '//div[@class="cell_imform_kv"][7]/span[2]/text()')[0] anime.detail_page_url = meta.xpath("a/@href")[ 0] # "/detail/20170172" ret.append(anime) return ret, resp.text
def evetomongo(eve_file=None): try: config['mongo_url'] except: start() if clean_status['clean_db'] == "waiting process": logger.info("数据库清理程序触发") clean_status['clean_db'] = "running" del_stats() clean_mongo() clean_status['clean_db'] = "ready" myclient = pymongo.MongoClient(config['mongo_url'], connect=False) mydb = myclient["mariodb"] num = 0 if eve_file: eve_lines = eve_file for eve_line in eve_lines: try: eve_line = json.loads(eve_line.decode('utf-8')) except: eve_line = json.loads(eve_line) mycol = mydb[eve_line["event_type"]] mydict = eve_line try: mydict['client_ip'] = config['client_ip'] except Exception as e: pass mycol.insert_one(mydict) num += 1 running_status['total'] += num logger.info("新增数据{}条".format(num)) myclient.close() api.analyze.analyze_suricata_alert() return num
def enable_danmaku(self, danmaku: str) -> bool: """启用某个弹幕搜索引擎""" if danmaku in self._danmaku_engine: logger.info(f"Danmaku Engine {danmaku} has already loaded") return True # 已经启用了 self._load_danmaku(danmaku) # 动态加载引擎 return GLOBAL_CONFIG.enable_danmaku(danmaku) # 更新配置文件
def get_clientrules(): logger.info("{} 更新了防御策略".format(request.remote_addr)) clientrules = open( './ThirPath/suricata/marioips/rules/local.rules', 'r') rulesfile = clientrules.read() clientrules.close() return rulesfile
def disable_engine(self, engine: str) -> bool: """禁用某个动漫搜索引擎, engine: api.engines.xxx""" if engine not in self._engines: logger.info(f"Anime Engine {engine} has already disabled") return True # 本来就没启用 self._engines.pop(engine) logger.info(f"Disabled Anime Engine: {engine}") return GLOBAL_CONFIG.disable_engine(engine)
def upload_evefile(): if request.method == 'POST': config['client_ip'] = request.remote_addr filename = request.files['clientfile'].filename file = request.files['clientfile'].readlines() evetomongo(eve_file=file) logger.info("{} 提交了日志 {}".format(request.remote_addr, filename)) return "upload eve.json success"
def set_proxy_headers(self): # 有些视频是超星学习通网盘里面的, 需要设置为客户端的 UA, 直接访问会 403 real_url = self._get_real_url() if "chaoxing.com" in real_url: logger.info(f"Set proxy headers for {real_url}") return { "User-Agent": "Dalvik/2.1.0 (Linux; U; Android 8.1.0; 16th Build/OPM1.171019.026)" }
def enable_danmaku(self, danmaku: str) -> bool: """启用某个弹幕引擎""" if danmaku in self.get_all_danmaku(): logger.info(f"Danmaku {danmaku} enabled") self._dict["danmaku"][danmaku] = True self._save() return True return False
def enable_engine(self, engine: str) -> bool: """启用某个引擎""" if engine in self.get_all_engines(): logger.info(f"Engine {engine} enabled") self._dict["engines"][engine] = True self._save() return True return False
def __init__(self): self._file = os.path.dirname(__file__) + os.sep + "config.json" self._dict = {} logger.info(f"Loading config from {self._file}") with open(self._file, "r") as f: self._dict = json.load(f)
def del_stats(): myclient = pymongo.MongoClient(config['mongo_url'], connect=False) mydb = myclient["mariodb"] for colname in ["stats", "flow"]: mycol = mydb[colname] mycol.delete_many({}) logger.info("流量日志状态更新") myclient.close()
def disable_danmaku(self, danmaku: str) -> bool: """禁用某个弹幕搜索引擎, engine: api.danmaku.xxx""" if danmaku not in self._danmaku_engine: logger.info(f"Danmaku Engine {danmaku} has already disabled") return True # 本来就没启用 self._danmaku_engine.pop(danmaku) logger.info(f"Disabled Danmaku Engine: {danmaku}") return GLOBAL_CONFIG.disable_danmaku(danmaku)
def change_client_rules(): if request.method == 'POST': change_info = request.get_data().decode('utf-8') change_id = json.loads(change_info)['id'] change_type = json.loads(change_info)['type'] api.web.change_rules(change_id, change_type) logger.info("{} 更改防御方式为 {}".format(change_id, change_type)) return "{} changed to type {}".format(change_id, change_type)
def get_readonly_users(self): readonly = [] for u in User.objects.filter(is_staff=True).filter(is_superuser=False): if u.groups.filter(name__icontains='read only'): readonly.append(u) logger.info("Read only: " + u.get_full_name()) return readonly
def push_table_to_index(self, model): query = model.objects.all() data = [self.convert_for_bulk(s) for s in list(query)] created, errors = bulk(client=ES_CLIENT, actions=data) logger.info('Created %s records' % created) if len(errors): logger.error('Produced the following errors:') logger.error('[%s]' % ', '.join(map(str, errors)))
def update_es_index(instance): ''' Updates the Elasticsearch index from the record instance ''' if ES_CLIENT and ES_PAGE_NAME: # To make sure it doesn't run for tests updated, errors = bulk(client=ES_CLIENT, actions=[construct_es_data(instance)]) logger.info(f'Updated {updated} records') log_errors(errors)
def check_ingest_issues(self, having_ingest_issue): # having_ingest_issue = CronJob.objects.raw('SELECT * FROM api_cronjob WHERE status=' + str(CronJobStatus.ERRONEOUS.value)) ingest_issue_id = having_ingest_issue[0].id if len(having_ingest_issue) > 0 else -1 ingestor_name = having_ingest_issue[0].name if len(having_ingest_issue) > 0 else '' if len(having_ingest_issue) > 0: # Would be better in ENV variable: send_notification('API monitor – ingest issues!', ['*****@*****.**'], 'Ingest issue(s) occured, one of them is ' + ingestor_name + ', via CronJob log record id: https://' + settings.BASE_URL + '/api/cronjob/' + str(ingest_issue_id) + '. Please fix it ASAP.') logger.info('Ingest issue occured, e.g. by ' + ingestor_name + ', via CronJob log record id: ' + str(ingest_issue_id) + ', notification sent to IM team')
def _get_stream_with_range(self): """按客户端请求头设置的 Range 范围获取视频流""" byte_start = 0 range_header = request.headers.get("Range", None) logger.info(f"Client header: Range={range_header}") if range_header: result = re.search(r"(\d+)-\d*", range_header) if result: byte_start = int(result.group(1)) # 客户端要求的视频流起始位置 return self._get_stream_from_server(byte_start)
def mongo_connect(): if connect("mongodb://*****:*****@0.0.0.0:27017/") != False: config['mongo_url'] = "mongodb://*****:*****@0.0.0.0:27017/" logger.info("数据库连接成功") else: if connect("mongodb://*****:*****@34.64.197.155:27017/") != False: config['mongo_url'] = "mongodb://*****:*****@34.64.197.155:27017/" logger.info("数据库连接成功") else: logger.error("请检查数据库设置") sys.exit()
def evaluate_line(self, line: str): inputs = input_from_line(line, self.char_to_id) lengths, scores = self.model.run_step(self.sess, False, inputs) batch_paths = self.model.decode(scores, lengths, self.trans) tags = [self.id_to_tag[idx] for idx in batch_paths[0]] # print('tags={}'.format(tags)) ret = self.result_to_json(inputs[0][0], tags) # result = self.model.evaluate_line(self.sess, input_from_line(line, self.char_to_id), self.id_to_tag) logger.info("问句`{}`实体识别的结果:{}".format(line, ret)) return ret
def _load_danmaku(self, danmaku: str): """按照配置加载弹幕库引擎 @danmaku: api.danmaku.xxx """ module = import_module(danmaku) for _, cls in getmembers(module, isclass): if issubclass(cls, DanmakuEngine) and cls != DanmakuEngine: self._danmaku_engine.setdefault( cls.__module__, cls ) # 'api.danmaku.xxx': <class 'api.danmaku.xx.xxEngine'> logger.info( f"Loading DanmakuEngine {cls.__module__}.{cls.__name__}: {cls}" )
def push_table_to_index(self, model): if model.__name__ == 'Event': query = model.objects.filter(parent_event__isnull=True) elif model.__name__ == 'Country': query = model.objects.filter(in_search=True) else: query = model.objects.all() data = [construct_es_data(s, is_create=True) for s in list(query)] created, errors = bulk(client=ES_CLIENT, actions=data) logger.info('Created %s records' % created) if len(errors): logger.error('Produced the following errors:') logger.error('[%s]' % ', '.join(map(str, errors)))