def networkDisk_export(level): headers = [ '序号', '标题', '文件列表', '分享人', '资料来源', '链接地址', '疑似程度', '归属地', '归属公司', '备注', 'url_md5' ] networkDisks = NetworkDisk.query.filter( NetworkDisk.suspect_level == level).all() import datetime path = os.path.join( config['dev'].CACHE_TMP, f'networkDisk-{level_conversion(level, True)}-' f'{datetime.datetime.now().strftime("%Y%m%d-%H%M%S")}.csv') with open(path, 'w', newline='', encoding='utf_8_sig') as f: f_csv = csv.writer(f) f_csv.writerow(headers) f_csv.writerows([[ one.id, one.name, one.file_list, one.share_people, one.source, one.url, level_conversion(one.suspect_level), one.location, one.company, one.remark, one.url_md5 ] for one in networkDisks]) if os.path.isfile(path): return send_file(path, as_attachment=True) else: return jsonify({'code': '500', 'msg': '文件不存在:' + path})
def wechat_export(level): headers = [ '序号', '公众号', '微信认证', '最近文章', '更新时间', '链接地址', '疑似级别', '归属地', '归属公司', '备注', 'url_md5' ] if level == 0: ones = Wechat.query.all() else: ones = Wechat.query.filter(Wechat.suspect_level == level).all() import datetime path = os.path.join( config['dev'].CACHE_TMP, f'wechat-{level_conversion(level, True)}-{datetime.datetime.now().strftime("%Y%m%d-%H%M%S")}.csv' ) with open(path, 'w', newline='', encoding='utf_8_sig') as f: import csv f_csv = csv.writer(f) f_csv.writerow(headers) f_csv.writerows([[ one.id, one.name, one.authentication, one.recent_article, one.recent_article_update_time, one.url, level_conversion(one.suspect_level), one.location, one.company, one.remark, one.url_md5 ] for one in ones]) if os.path.isfile(path): return send_file(path, as_attachment=True) else: return jsonify({'code': '500', 'msg': '文件不存在:' + path})
def sourceCode_export(level): headers = [ '序号', '标题', '源码介绍', '作者', '源码来源', '链接地址', '疑似程度', '归属地', '归属公司', '备注', 'url_md5' ] if level == 0: document = SourceCode.query.all() else: document = SourceCode.query.filter( SourceCode.suspect_level == level).all() import datetime path = os.path.join( config['dev'].CACHE_TMP, f'sourceCode-{level_conversion(level, True)}-' f'{datetime.datetime.now().strftime("%Y%m%d-%H%M%S")}.csv') with open(path, 'w', newline='', encoding='utf_8_sig') as f: import csv f_csv = csv.writer(f) f_csv.writerow(headers) f_csv.writerows([[ one.id, one.name, one.introduction, one.author, one.source, one.url, level_conversion(one.suspect_level), one.location, one.company, one.remark, one.url_md5 ] for one in document]) if os.path.isfile(path): return send_file(path, as_attachment=True) else: return jsonify({'code': '500', 'msg': '文件不存在:' + path})
def app_export(level): headers = [ '序号', '名称', 'app摘要', 'app信息', '开发者', '链接地址', '疑似程度', '归属地', '归属公司', '备注', '来源', 'url_md5' ] if level == 0: ones = MobileApp.query.all() else: ones = MobileApp.query.filter(MobileApp.suspect_level == level).all() import datetime path = os.path.join( config['dev'].CACHE_TMP, f'app-{level_conversion(level, True)}-{datetime.datetime.now().strftime("%Y%m%d-%H%M%S")}.csv' ) with open(path, 'w', newline='', encoding='utf_8_sig') as f: import csv f_csv = csv.writer(f) f_csv.writerow(headers) f_csv.writerows([[ one.id, one.name, one.introduction, f"{one.download_count}; app大小: {one.app_size}; 版本:{one.version}", one.developer, one.url, level_conversion(one.suspect_level), one.location, one.company, one.remark, one.source, one.url_md5 ] for one in ones]) if os.path.isfile(path): return send_file(path, as_attachment=True) else: return jsonify({'code': '500', 'msg': '文件不存在:' + path})
def doc_export(level): headers = [ '序号', '标题', '文章摘要', '上传人', '上传时间', '文档信息', '链接地址', '疑似程度', '归属地', '归属公司', '备注', '来源', 'url_md5' ] if level == 0: document = Document.query.all() else: document = Document.query.filter(Document.suspect_level == level).all() path = os.path.join( config['dev'].CACHE_TMP, f'document-{level_conversion(level, True)}-{datetime.datetime.now().strftime("%Y%m%d%H%M%S")}.csv' ) with open(path, 'w', newline='', encoding='utf_8_sig') as f: f_csv = csv.writer(f) f_csv.writerow(headers) f_csv.writerows([[ one.id, one.name, one.introduction, one.upload_people, one.upload_time, one.doc_remark, one.url, level_conversion(one.suspect_level), one.location, one.company, one.remark, one.source, one.url_md5 ] for one in document]) if os.path.isfile(path): return send_file(path, as_attachment=True) else: return jsonify({'code': '500', 'msg': '文件不存在:' + path})
def website_export(level): headers = [ '序号', '标题', '链接地址', '来源', '疑似级别', 'ip归属地址', '归属公司', '备注', 'url_md5' ] websites = Website.query.filter(Website.suspect_level == level).all() path = os.path.join( config['dev'].CACHE_TMP, f'websites-{level_conversion(level, True)}-{datetime.datetime.now().strftime("%Y%m%d-%H%M%S")}.csv' ) with open(path, 'w', newline='', encoding='utf_8_sig') as f: f_csv = csv.writer(f) f_csv.writerow(headers) f_csv.writerows([[ w.id, w.title, w.url, w.source, level_conversion(w.suspect_level), w.location, w.company, w.remark, w.url_md5 ] for w in websites]) if os.path.isfile(path): return send_file(path, as_attachment=True) else: return jsonify({'code': '500', 'msg': '文件不存在:' + path})
def export_result(): groups = request.args.get("groups", '') if groups: website_param = [] # 拼接查询条件 document_param = [] # 拼接查询条件 networkDisk_param = [] # 拼接查询条件 sourceCode_param = [] # 拼接查询条件 mobileApp_param = [] # 拼接查询条件 wechat_param = [] # 拼接查询条件 for group in groups.split(','): print('group:', group) keywords = Keyword.query.filter(Keyword.group == group).all() for keyword in keywords: website_param.append(Website.keyword == keyword.keyword) document_param.append(Document.keyword == keyword.keyword) networkDisk_param.append(Document.keyword == keyword.keyword) sourceCode_param.append(Document.keyword == keyword.keyword) mobileApp_param.append(Document.keyword == keyword.keyword) wechat_param.append(Document.keyword == keyword.keyword) websites = Website.query.filter(or_(*website_param)).all() documents = Document.query.filter(or_(*document_param)).all() networkDisks = NetworkDisk.query.filter(or_(*networkDisk_param)).all() sourceCodes = SourceCode.query.filter(or_(*sourceCode_param)).all() mobileApps = MobileApp.query.filter(or_(*mobileApp_param)).all() wechat = Wechat.query.filter(or_(*wechat_param)).all() website_path = os.path.join( CACHE_TMP, f'websites-{datetime.now().strftime("%Y%m%d-%H%M%S")}.csv') with open(website_path, 'w', newline='', encoding='utf_8_sig') as f: website_headers = [ '序号', '标题', '链接地址', '来源', '疑似级别', 'ip归属地址', '归属公司', '备注', 'url_md5', '关键字' ] f_csv = csv.writer(f) f_csv.writerow(website_headers) f_csv.writerows([[ w.id, w.title, w.url, w.source, level_conversion(w.suspect_level), w.location, w.company, w.remark, w.url_md5, w.keyword ] for w in websites]) document_path = os.path.join( CACHE_TMP, f'document-{datetime.now().strftime("%Y%m%d%H%M%S")}.csv') with open(document_path, 'w', newline='', encoding='utf_8_sig') as f: headers = [ '序号', '标题', '文章摘要', '上传人', '上传时间', '文档信息', '链接地址', '疑似程度', '归属地', '归属公司', '备注', '来源', 'url_md5' ] f_csv = csv.writer(f) f_csv.writerow(headers) f_csv.writerows([[ one.id, one.name, one.introduction, one.upload_people, one.upload_time, one.doc_remark, one.url, level_conversion(one.suspect_level), one.location, one.company, one.remark, one.source, one.url_md5 ] for one in documents]) networkDisk_path = os.path.join( CACHE_TMP, f'networkDisk-{datetime.now().strftime("%Y%m%d-%H%M%S")}.csv') with open(networkDisk_path, 'w', newline='', encoding='utf_8_sig') as f: headers = [ '序号', '标题', '文件列表', '分享人', '资料来源', '链接地址', '疑似程度', '归属地', '归属公司', '备注', 'url_md5' ] f_csv = csv.writer(f) f_csv.writerow(headers) f_csv.writerows([[ one.id, one.name, one.file_list, one.share_people, one.source, one.url, level_conversion(one.suspect_level), one.location, one.company, one.remark, one.url_md5 ] for one in networkDisks]) sourceCode_path = os.path.join( CACHE_TMP, f'sourceCode-{datetime.now().strftime("%Y%m%d-%H%M%S")}.csv') with open(sourceCode_path, 'w', newline='', encoding='utf_8_sig') as f: headers = [ '序号', '标题', '源码介绍', '作者', '源码来源', '链接地址', '疑似程度', '归属地', '归属公司', '备注', 'url_md5' ] f_csv = csv.writer(f) f_csv.writerow(headers) f_csv.writerows([[ one.id, one.name, one.introduction, one.author, one.source, one.url, level_conversion(one.suspect_level), one.location, one.company, one.remark, one.url_md5 ] for one in sourceCodes]) app_path = os.path.join( CACHE_TMP, f'app-{datetime.now().strftime("%Y%m%d-%H%M%S")}.csv') with open(app_path, 'w', newline='', encoding='utf_8_sig') as f: headers = [ '序号', '名称', 'app摘要', 'app信息', '开发者', '链接地址', '疑似程度', '归属地', '归属公司', '备注', '来源', 'url_md5' ] f_csv = csv.writer(f) f_csv.writerow(headers) f_csv.writerows([[ one.id, one.name, one.introduction, f"{one.download_count}; app大小: {one.app_size}; 版本:{one.version}", one.developer, one.url, level_conversion(one.suspect_level), one.location, one.company, one.remark, one.source, one.url_md5 ] for one in mobileApps]) wechat_path = os.path.join( CACHE_TMP, f'wechat-{datetime.now().strftime("%Y%m%d-%H%M%S")}.csv') with open(wechat_path, 'w', newline='', encoding='utf_8_sig') as f: headers = [ '序号', '公众号', '微信认证', '最近文章', '更新时间', '链接地址', '疑似级别', '归属地', '归属公司', '备注', 'url_md5' ] f_csv = csv.writer(f) f_csv.writerow(headers) f_csv.writerows([[ one.id, one.name, one.authentication, one.recent_article, one.recent_article_update_time, one.url, level_conversion(one.suspect_level), one.location, one.company, one.remark, one.url_md5 ] for one in wechat]) file_list = [ website_path, document_path, networkDisk_path, sourceCode_path, app_path, wechat_path ] # file_list = [website_path, sourceCode_path, wechat_path] memory_file = BytesIO() with zipfile.ZipFile(memory_file, "w", zipfile.ZIP_DEFLATED) as zf: for _file in file_list: with open(_file, 'rb') as fp: zf.writestr(_file, fp.read()) memory_file.seek(0) return send_file(memory_file, attachment_filename='keywords.zip', as_attachment=True) return jsonify(code=500)