def getReg_Exp_sample(): sample = request.form.get('sample').strip() result = OrderedDict() result['Regular_Exp_Name'] = list() result['Regular_Exp'] = list() # result['IP_Address'] = list() result['Description'] = list() # result['url'] = list() # result['description'] = list() result['Regular_Exp_Name'].append("Network") # result['IP_Address'].append("111.112.33.54") result['Regular_Exp'].append("32") result['Description'].append("35") # result['url'].append("http://www.daum.net") # result['description'].append("자동 등록") return excel.make_response_from_dict(result, "xlsx", file_name="export_data")
def getCrawlingBatchExcel_sample(): # -*- coding: utf-8 -*- sample = request.form.get('sample').strip() result = OrderedDict() result['URL'] = list() result['Depth'] = list() result['Comment'] = list() result['Register_from'] = list() # result['description'] = list() result['URL'].append("http://www.daum.net") result['Depth'].append("1") result['Comment'].append(u"User Input") result['Register_from'].append(u"International") # result['type'].append("Network") # result['ip'].append("111.112.33.54") # result['mask'].append("32") # result['url'].append("http://www.daum.net") # result['description'].append("자동 등록") return excel.make_response_from_dict(result, "csv", file_name="export_data")
def getDnaAnalysisResultExcel(): columns = list() es = Elasticsearch([{'host': app.config['ELASTICSEARCH_URI'], 'port': app.config['ELASTICSEARCH_PORT']}]) doc = dna_result.getAnalysisResult(request) res = es.search(index="gsp-link_result" + "", doc_type="dna_result", body=doc, request_timeout=60) esResult = res['hits']['hits'] total = int(res['hits']['total']) if total > 10000: total = 10000 result = OrderedDict() result['src_ip'] = list() result['dst_ip'] = list() for _item in esResult: result['src_ip'].append(_item['_source']['src_ip']) result['dst_ip'].append(_item['_source']['dst_ip']) for _item in esResult: for _key, _val in _item['_source'].iteritems(): if _key == "@timestamp" or _key=='src_ip' or _key =="dst_ip": continue if result.has_key(_key) == False: result[_key] = list() if isinstance(_val, dict) is True: _val = _val['sector'] result[_key].append(_val) return excel.make_response_from_dict(result, "xlsx", file_name="export_data")
def getWhiteListExcel(): per_page = int(request.form.get('perpage')) #draw = int(request.form.get('draw')) start_idx = int(request.form.get('start')) keyword = request.form.get('search_keyword').strip() query = Rules_White_IP.query if keyword != "": query = query.filter(Rules_White_IP.ip.like('%' + keyword + '%')) curpage = int(start_idx / per_page) + 1 cncList = query.order_by(Rules_White_IP.cre_dt.desc()).paginate( curpage, per_page, error_out=False) #inchan = cncList.items[0].ip result = OrderedDict() result['날짜'] = list() result['ip'] = list() result['mask'] = list() result['description'] = list() result['type'] = list() for _item in cncList.items: result['날짜'].append(_item.cre_dt) result['ip'].append(_item.ip) result['mask'].append(_item.mask) result['description'].append(_item.description) result['type'].append(_item.type) return excel.make_response_from_dict(result, "xlsx", file_name="export_data")
def getCustomer_Category_ListExcel_url(): per_page = int(request.form.get('perpage')) #draw = int(request.form.get('draw')) start_idx = int(request.form.get('start')) keyword = request.form.get('search_keyword').strip() search_keyword_type = request.form.get('search_keyword_type') query = Integrated_Customer_Category.query # if keyword != "" and search_keyword_type == "IP": # query = query.filter(Integrated_Customer_Category.ip.like('%'+keyword+'%')) # # if keyword != "" and search_keyword_type == "url": # query = query.filter(Integrated_Customer_Category.url.like('%' + keyword + '%')) if keyword != "" and search_keyword_type == "Customer_Category": query = query.filter( Integrated_Customer_Category.Customer_Category.like('%' + keyword + '%')) # if keyword != "" and search_keyword_type == "IP_Address": # query = query.filter(Integrated_Customer_Category.IP_Address.like('%' + keyword + '%')) # if keyword != "" and search_keyword_type == "type": # query = query.filter(Integrated_Customer_Category.type.like('%' + keyword + '%')) if keyword != "" and search_keyword_type == "Description": query = query.filter( Integrated_Customer_Category.Description.like('%' + keyword + '%')) curpage = int(start_idx / per_page) + 1 rowCount = query.count() # if rowCount > 10000: # rowCount = 10000 cncList = query.order_by(Integrated_Customer_Category.seq.asc()).paginate( curpage, rowCount, error_out=False) #inchan = cncList.items[0].ip result = OrderedDict() result['Customer_Category'] = list() # result['IP_Address'] = list() result['Description'] = list() # result['mask'] = list() # result['description'] = list() # result['url'] = list() # result['수정일'] = list() for _item in cncList.items: result['Customer_Category'].append(_item.Customer_Category) # result['IP_Address'].append(_item.IP_Address) result['Description'].append(_item.Description) # result['mask'].append(_item.mask) # result['url'].append(_item.url) # result['description'].append(_item.description) # result['수정일'].append(_item.mod_dt) return excel.make_response_from_dict(result, "xlsx", file_name="export_data")
def getElement_Customer_sample(): sample = request.form.get('sample').strip() result = OrderedDict() result['Customer_Category'] = list() result['Customer_Name'] = list() result['IP_Address'] = list() result['Branch'] = list() # result['IP_Address'] = list() result['Description'] = list() # result['url'] = list() # result['description'] = list() result['Customer_Category'].append("Network") result['Customer_Name'].append("name") result['IP_Address'].append("Address") result['Branch'].append("Branch") # result['IP_Address'].append("111.112.33.54") result['Description'].append("32") # result['url'].append("http://www.daum.net") # result['description'].append("자동 등록") return excel.make_response_from_dict(result, "xlsx", file_name="export_data")
def download_workflow_csv(workflow_id): workflow = WorkflowModel.query.get_or_404(workflow_id) excel.init_excel(app) extension_type = "csv" filename = workflow_id + "." + extension_type return excel.make_response_from_dict(workflow.data, file_type=extension_type, file_name=filename)
def getIpCollectionExcel(): per_page = int(request.form.get('perpage')) #draw = int(request.form.get('draw')) start_idx = int(request.form.get('start')) keyword = request.form.get('search_keyword').strip() query = Rules_IP_Collection.query if keyword != "": query = query.filter(Rules_IP_Collection.ip.like('%' + keyword + '%')) curpage = int(start_idx / per_page) + 1 rowCount = query.count() # if rowCount > 10000: # rowCount = 10000 cncList = query.order_by(Rules_IP_Collection.cre_dt.desc()).paginate( curpage, rowCount, error_out=False) #inchan = cncList.items[0].ip result = OrderedDict() result['날짜'] = list() result['ip'] = list() result['mask'] = list() result['detection_point'] = list() result['etc'] = list() result['description'] = list() result['use_yn'] = list() # result['description'] = list() # result['type'] = list() for _item in cncList.items: result['날짜'].append(_item.cre_dt) result['ip'].append(_item.ip) result['mask'].append(_item.mask) result['detection_point'].append(_item.detection_point) result['etc'].append(_item.etc) result['description'].append(_item.description) result['use_yn'].append(_item.use_yn) # result['description'].append(_item.description) # result['type'].append(_item.type) return excel.make_response_from_dict(result, "xlsx", file_name="export_data")
def exp_excel(e_id): """ :param e_id: 根据e_id 决定下载Excel文件 :return: """ # 根据e_id 从数据库中查询信息,返回Excel文件 result = { "专利名称": "邱哥钓鱼", "专利号": "1024", "专利类别": "发明", "日期": "2019.02.28" } column_names = list(result.keys()) return excel.make_response_from_dict( result, column_names=column_names, file_type='xls', file_name='专利.xls' )
def upload_array(struct_type): if struct_type == "array": array = request.get_array(field_name='file') return excel.make_response_from_array(array, 'xls', sheet_name='test_array') elif struct_type == "dict": adict = request.get_dict(field_name='file') return excel.make_response_from_dict(adict, 'xls', sheet_name='test_array') elif struct_type == "records": records = request.get_records(field_name='file') return excel.make_response_from_records(records, 'xls', sheet_name='test_array') elif struct_type == "book": book = request.get_book(field_name='file') return excel.make_response(book, 'xls') elif struct_type == "book_dict": book_dict = request.get_book_dict(field_name='file') return excel.make_response_from_book_dict(book_dict, 'xls')
def getWhiteListExcel_sample(): sample = request.form.get('sample').strip() result = OrderedDict() result['type'] = list() result['ip'] = list() result['mask'] = list() result['url'] = list() result['description'] = list() result['type'].append("Network") result['ip'].append("111.112.33.54") result['mask'].append("32") result['url'].append("http://www.daum.net") result['description'].append("자동 등록") return excel.make_response_from_dict(result, "xlsx", file_name="export_data")
def get_excel_list(): #logUtil.addLog(request.remote_addr,1,'links > list ') per_page = int(request.form.get('perpage')) start_idx = int(request.form.get('start')) keyword = request.form.get('search_keyword').strip() query = DNA_Element.query.filter(DNA_Element.del_yn == 'N') if keyword != "": query = query.filter(DNA_Element.dna_name.like('%' + keyword + '%')) curpage = int(start_idx / per_page) + 1 dndList = query.order_by(DNA_Element.cre_dt.desc()).paginate( curpage, per_page, error_out=False) result = OrderedDict() result['DNA 명'] = list() result['섹터목록'] = list() result['등록일'] = list() result['수정일'] = list() for _item in dndList.items: result['DNA 명'].append(_item.dna_name) result['섹터목록'].append(_item.operate_function) result['등록일'].append(_item.cre_dt) result['수정일'].append(_item.mod_dt) # result = dict() # result["draw"] = str(draw) # result["recordsTotal"] = str(dndList.total) # result["recordsFiltered"] = str(dndList.total) # result["data"] = DNA_Element.serialize_list(dndList.items) # str_json = json.dumps(result) return excel.make_response_from_dict(result, "xlsx", file_name="export_data")
def getMaliciousFileLogListExcel(): logList = None # start_idx = int(request.form['start']) MaxWindowValue = int(request.form['max_window_value']) es = Elasticsearch([{ 'host': app.config['ELASTICSEARCH_URI'], 'port': app.config['ELASTICSEARCH_PORT'] }]) query_type = "analysis_info" documentCount = getMaliciousCodeLogDataCount(request, query_type, per_pageP=None) resCountDoc = es.count(index="gsp*" + "", doc_type="analysis_info", body=documentCount, request_timeout=600) doc = getMaliciousCodeLogData(request, query_type, resCountDoc['count']) res = es.search(index="gsp*" + "", doc_type="analysis_info", body=doc) esResult = res['hits']['hits'] resultList = [] # C&C타입 목록 # type_list = CommonCode.query.filter_by(GroupCode='RULE_CNC_TYPE').all() for row in esResult: resultRow = dict() resultRow['_id'] = row['_id'] resultRow['_index'] = row['_index'] resultRow['_source'] = row['_source'] resultList.append(resultRow) result = OrderedDict() result['Date'] = list() result['fullurl'] = list() result['url'] = list() result['subpath'] = list() result['uri'] = list() result['IP'] = list() result['Country'] = list() result['File'] = list() result['MD5'] = list() result['Detection_source'] = list() result['Detection_name'] = list() result['URI_analysis_result'] = list() result['File_analysis_result'] = list() result['Comments'] = list() for _item in resultList: result['Date'].append(_item['_source']['kor_timestamp']) result['fullurl'].append(_item['_source']['url']) urlparsed = urlparse(_item['_source']['url']) url_fore_part = str(urlparsed.scheme) + "://" + str(urlparsed.netloc) pureUrl = urlparsed.path fileName = os.path.basename(pureUrl) urlInMiddle = list() if fileName != "": urlInMiddle = pureUrl.rsplit(fileName, 1) else: urlInMiddle.append("") result['url'].append(url_fore_part) result['subpath'].append(urlparsed.path) result['uri'].append(url_fore_part + "/" + urlInMiddle[0]) result['IP'].append(_item['_source']['dst_ip']) result['Country'].append(_item['_source']['dst_country_code1']) result['File'].append(_item['_source']['file_name']) result['MD5'].append(_item['_source']['md5']) result['Detection_source'].append(_item['_source']['data_type']) result['Detection_name'].append(_item['_source']['malware_comment']) URI_Analysis_Result = str( _item['_source']['detect_cnt_url']) + "/" + str( _item['_source']['total_cnt_url']) File_Analysis_Result = str( _item['_source']['detect_cnt_file']) + "/" + str( _item['_source']['total_cnt_file']) result['URI_analysis_result'].append(URI_Analysis_Result) result['File_analysis_result'].append(File_Analysis_Result) result['Comments'].append( _item['_source']['comment'] if _item['_source']['comment'] is not None else "") return excel.make_response_from_dict(result, "xlsx", file_name="export_data")
def getAnalysisResultExcel(): columns = list() es = Elasticsearch([{ 'host': app.config['ELASTICSEARCH_URI'], 'port': app.config['ELASTICSEARCH_PORT'] }]) doc = Link_Element_List.getAnalysisResult(request) res = es.search(index="gsp-link_dna" + "", doc_type="link_dna", body=doc, request_timeout=60) esResult = res['hits']['hits'] total = int(res['hits']['total']) if total > 10000: total = 10000 # 차트 타입에 따라 array search_chart_type = request.form.get("search_chart_type") search_type = request.form.get("search_type") colList = request.form.getlist('requestColumnList') for _row in esResult: for _key, _val in _row['_source'].iteritems(): if _key not in colList: continue ar = np.array(_row['_source'][_key]) # 다차원 배열인 경우 주, 일 등으로 통계치를 구한다. if ar.ndim > 1: if search_chart_type == "DOW": _row['_source'][_key] = getDayOfWeekSum( _row['_source'][_key]) elif search_chart_type == "HOD": _row['_source'][_key] = getHoursOfDay( _row['_source'][_key]) for _row in esResult: for _key, _val in _row['_source'].iteritems(): if _key not in colList: continue ar = np.array(_row['_source'][_key]) if ar.ndim > 0: _row['_source'][_key] = getFlattenList(_row['_source'][_key]) result = {} #sorted(esResult, key=lambda k: parser.parse(k['_source']['@timestamp'])) ##sort the retrieved data by timestamp so that the items in the view and the excel are listed identically. Modified by In chan Hwang #sorted(esResult, key=lambda k: (len(k['_source']['flag_list']) - k['_source']['flag_list'].count(0)), reverse=True) sorted(esResult, key=lambda k: k['_source']['pkts-dispersion'], reverse=True) for _item in esResult: for _key, _val in _item['_source'].iteritems(): if result.has_key(_key) == False: result[_key] = list() temp_val = None if isinstance(_val, list): temp_val = NoneToEmptyString(_val)[:] else: temp_val = _val result[_key].append(temp_val) return excel.make_response_from_dict(result, "csv", file_name="export_data")
def getFileLogListExcel(): logList = None # region search option #per_page = int(request.form['perpage']) start_idx = int(request.form['start']) # endregion es = Elasticsearch([{'host': app.config['ELASTICSEARCH_URI'], 'port': app.config['ELASTICSEARCH_PORT']}]) query_type = "file" documentCount = getCncLogQueryCountFileAnalysisStatus(request, query_type) resCountDoc = es.count(index="gsp*" + "", doc_type="analysis_results", body=documentCount) doc = getCncLogQuery(request, query_type, resCountDoc['count']) res = es.search(index="gsp*" + "", doc_type="analysis_results", body=doc) esResult = res['hits']['hits'] total = int(res['hits']['total']) resultList = [] # search_keyword_type = request.form['search_keyword_type'] # search_keyword = request.form['search_keyword'] # sortedESresult = list() # if search_keyword and search_keyword_type: # for aitem in esResult: # if search_keyword in aitem['_source'][search_keyword_type]: # sortedESresult.append(aitem) # # if sortedESresult: # esResult = sortedESresult ## Search feature has been added in Python implementation## # total = len(esResult) # C&C타입 목록 type_list = CommonCode.query.filter_by(GroupCode='RULE_CNC_TYPE').all() for row in esResult: resultRow = dict() times = parser.parse(row['_source']['@timestamp']) resultRow['timestamp'] = times.strftime("%Y.%m.%d %H:%M:%S") resultRow['data_from'] = row['_source'].get('data_from') resultRow['file_name'] = row['_source'].get('file_name') resultRow['md5'] = row['_source'].get('md5') if row['_source'].get('data_from') == "zombie zero": resultRow['collect_uri'] = row['_source'].get('uri') resultRow['category'] = row['_source'].get('malware_info') else: resultRow['collect_uri'] = row['_source'].get('collect_uri') resultRow['category'] = row['_source'].get('category') # resultRow['collect_seed_uri'] = row['_source'].get('collect_seed_uri') if int(row['_source']['security_level']) >= int(app.config['ANALYSIS_RESULTS_SECURITY_LEVEL_MIN']): resultRow['result'] = '악성' else: resultRow['result'] = '정상' resultList.append(resultRow) result = OrderedDict() result['날짜'] = list() result['파일명'] = list() result['해시 값'] = list() result['다운로드 경로'] = list() result['시드 URI 경로'] = list() result['카테고리'] = list() result['분석 장비'] = list() result['분석 결과'] = list() for _item in resultList: result['카테고리'].append(_item['category']) result['다운로드 경로'].append(_item['collect_uri']) result['해시 값'].append(_item['md5']) result['파일명'].append(_item['file_name']) result['분석 장비'].append(_item['data_from']) result['날짜'].append(_item['timestamp']) result['분석 결과'].append(_item['result']) return excel.make_response_from_dict(result, "xlsx", file_name="export_data")
def getCncLogListExcel(): logList = None # region search option #per_page = int(request.form['perpage']) #start_idx = int(request.form['start']) # endregion es = Elasticsearch([{'host': app.config['ELASTICSEARCH_URI'], 'port': app.config['ELASTICSEARCH_PORT']}]) query_type = "uri" countDoc = getCncLogQueryURLCount(request, query_type) documentAmount = es.count(index="gsp*" + "", doc_type="analysis_results", body=countDoc) doc = getCncLogQueryURL(request, query_type, documentAmount['count']) res = es.search(index="gsp*" + "", doc_type="analysis_results", body=doc) esresult = res['hits']['hits'] total = int(res['hits']['total']) resultList = [] sortedESresult = list() search_keyword = request.form['search_keyword'] search_keyword_type = request.form['search_keyword_type'] # if search_keyword: # for myItem in esresult: # if search_keyword in myItem['_source']['uri']: #Searched items excel download bug fixed. It happened due to invalid input read from HTML by Jquery. Server does not understand such thing. # sortedESresult.append(myItem) # # if sortedESresult: # esresult = sortedESresult ## Search feature has been added in Python implementation # C&C타입 목록 type_list = CommonCode.query.filter_by(GroupCode='RULE_CNC_TYPE').all() for row in esresult: resultRow = dict() times = parser.parse(row['_source']['@timestamp']) resultRow['timestamp'] = times.strftime("%Y.%m.%d %H:%M:%S") # uri_type_name = [item.EXT1 for item in type_list if item.Code == row['_source']['uri_type'] ] uri_type_name = row['_source']['data_from'] if int(row['_source']['security_level']) >= int(app.config['ANALYSIS_RESULTS_SECURITY_LEVEL_MIN']): resultRow['result'] = '악성' else: resultRow['result'] = '정상' resultRow['data_from'] = row['_source'].get('data_from') resultRow['category'] = row['_source'].get('category') resultRow['uri'] = row['_source'].get('uri') resultList.append(resultRow) result = OrderedDict() result['날짜'] = list() result['분석장비'] = list() result['분석 URI'] = list() result['카테고리'] = list() result['분석 결과'] = list() for _item in resultList: result['날짜'].append(_item['timestamp']) result['분석장비'].append(_item['data_from']) result['분석 URI'].append(_item['uri']) result['카테고리'].append(_item['category']) result['분석 결과'].append(_item['result']) return excel.make_response_from_dict(result, "xlsx", file_name="export_data")
def getMalListExcel(): per_page = int(request.form.get('perpage')) # draw = int(request.form.get('draw')) start_idx = int(request.form.get('start')) # search_source = request.form.get('search_source') keyword = request.form.get('search_keyword').strip() # search_type = request.form.get('search_keyword_type').strip() # typeStr = list() # typeStr = [str(item.EName) for item in CommonCode.query.filter_by(GroupCode='an_data_from').all() if # item.Code == search_type] str_dt = "" end_dt = "" search_keyword_type = str(request.form['search_keyword_type']) if request.form.get( 'timeFrom') is not None and request.form.get('timeFrom') != "": str_dt = parser.parse(request.form['timeFrom']).isoformat() if request.form.get('timeTo') is not None and request.form['timeTo'] != "": end_dt = parser.parse(request.form['timeTo']).isoformat() query = malicious_info.query.filter( malicious_info.cre_dt.between(str_dt, end_dt)) # if search_type != '': # query = query.filter_by(rule_type = search_type) # if search_source != '': # query = query.filter_by(source = search_source) # if keyword != "" and not search_keyword_type or typeStr: # if not typeStr: # typeStr = [""] # query = Rules_White_IP_URL.query # if keyword != "" and search_keyword_type == "ip": query = query.filter(malicious_info.ip.like('%' + str(keyword) + '%')) # if keyword != "" and search_keyword_type == "url": query = query.filter(malicious_info.url.like('%' + keyword + '%')) # if keyword != "" and search_keyword_type == "country_code": query = query.filter( malicious_info.country_code.like('%' + keyword + '%')) # if keyword != "" and search_keyword_type == "file_name": query = query.filter(malicious_info.file_name.like('%' + keyword + '%')) if keyword != "" and search_keyword_type == "md5": query = query.filter(malicious_info.md5.like('%' + keyword + '%')) if keyword != "" and search_keyword_type == "detect_info": query = query.filter( malicious_info.detect_info.like('%' + keyword + '%')) if keyword != "" and search_keyword_type == "collect_point": query = query.filter( malicious_info.collect_point.like('%' + keyword + '%')) curpage = int(start_idx / per_page) + 1 rowCount = query.count() cncList = query.order_by(malicious_info.cre_dt.desc()).paginate( curpage, rowCount, error_out=False) result = OrderedDict() result['creation_date'] = list() result['url'] = list() result['ip'] = list() result['country_code'] = list() result['file_name'] = list() result['md5'] = list() result['detection_info'] = list() result['collection_point'] = list() result['comment'] = list() result['stix'] = list() for _item in cncList.items: result['creation_date'].append(_item.cre_dt) result['url'].append(_item.url) result['ip'].append(_item.ip) result['country_code'].append(_item.country_code) result['file_name'].append(_item.file_name) result['md5'].append(_item.md5) result['detection_info'].append(_item.detect_info) result['collection_point'].append(_item.collect_point) result['comment'].append(_item.comment) result['stix'].append(_item.stix) # result['category'].append(_item.category) # result['pattern_uri'].append(_item.pattern_uri) # result['analysis_device'].append(_item.analysis_device) # result['analysis_result'].append(_item.analysis_result) # result['cre_dt'].append(_item.cre_dt) # result['source_name'].append(_item.source) # result['description'].append(_item.description) return excel.make_response_from_dict(result, "xlsx", file_name="export_data")
def getListExcel(): per_page = int(request.form.get('perpage')) start_idx = int(request.form.get('start')) search_source = request.form.get('search_source') keyword = request.form.get('search_keyword').strip() search_type = request.form.get('search_type').strip() typeStr = list() typeStr = [ str(item.EName) for item in CommonCode.query.filter_by(GroupCode='an_data_from').all() if item.Code == search_type ] str_dt = "" end_dt = "" search_keyword_type = str(request.form['search_keyword_type']) if request.form.get( 'timeFrom') is not None and request.form.get('timeFrom') != "": str_dt = parser.parse(request.form['timeFrom']).isoformat() if request.form.get('timeTo') is not None and request.form['timeTo'] != "": end_dt = parser.parse(request.form['timeTo']).isoformat() query = Rules_CNC.query.filter(Rules_CNC.cre_dt.between(str_dt, end_dt)) # if search_type != '': # query = query.filter_by(rule_type = search_type) if search_source != '': query = query.filter_by(source=search_source) if keyword != "" and not search_keyword_type or typeStr: if not typeStr: typeStr = [""] query = query.filter( Rules_CNC.pattern_uri.like('%' + keyword + '%'), Rules_CNC.analysis_device.like('%' + typeStr[0] + '%'), Rules_CNC.cre_dt.between(str_dt, end_dt)) curpage = int(start_idx / per_page) + 1 rowCount = query.count() cncList = query.order_by(Rules_CNC.cre_dt.desc()).paginate(curpage, rowCount, error_out=False) result = OrderedDict() result['category'] = list() result['pattern_uri'] = list() result['analysis_device'] = list() result['analysis_result'] = list() result['cre_dt'] = list() result['source_name'] = list() result['description'] = list() for _item in cncList.items: result['category'].append(_item.category) result['pattern_uri'].append(_item.pattern_uri) result['analysis_device'].append(_item.analysis_device) result['analysis_result'].append(_item.analysis_result) result['cre_dt'].append(_item.cre_dt) result['source_name'].append(_item.source) result['description'].append(_item.description) return excel.make_response_from_dict(result, "xlsx", file_name="export_data")
def download_reporte_pronostico(year=None, month=None, description=None, Excel_file=True): # style="default" import calendar as cld dt_now = datetime.datetime.now() if year is None: year = dt_now.year if month is None: month = dt_now.month - 1 if description is None: description = "Demanda Nacional del Ecuador" n_day, lst_day = cld.monthrange(int(year), int(month)) datetime_ini = datetime.datetime.strptime( str(year) + "-" + str(month) + "-" + "1", "%Y-%m-%d") datetime_fin = datetime.datetime.strptime( str(year) + "-" + str(month) + "-" + str(lst_day), "%Y-%m-%d") date_range = pd.date_range(start=datetime_ini, end=datetime_fin, freq="D") valid_range = [datetime_ini, datetime_fin + datetime.timedelta(days=30)] tmp_name = "download_reporte_pronostico_" + str(date_range[0]) + str( date_range[-1]) + ".pkl" tmp_file = tmp.retrieve_file(tmp_name, datetime_ini) if tmp_file is None: df_result = pd.DataFrame() for d in date_range: df_i = download_pronostico_file(description, str(d._date_repr), "23:30:00", Excel_file=False) df_result = df_result.append(df_i) tmp.save_variables(tmp_name, df_result, valid_range) else: df_result = tmp_file dict_result = df_result.to_dict('list') dict_result['0_Fecha'] = [str(x) for x in df_result.index] columns = ['Despacho programado', 'min', 'max', 'expected', 'real time'] ind = [ '1_Despacho programado', '7_Dmin estimada', '6_Dmax estimada', '5_Demanda esperada', '2_Demanda real' ] for ix, col in zip(ind, columns): dict_result[ix] = dict_result.pop(col) name_file = "pron_" + datetime_ini.strftime( "%Y-%m-%d") + "_" + datetime_fin.strftime("%Y-%m-%d") if Excel_file: return excel.make_response_from_dict(dict_result, file_type="xlsx", status=200, file_name=name_file) else: return df_result
def download_pronostico_file(description, date=None, hour=None, Excel_file=True): # style="default" if date is None: date = datetime.datetime.now().strftime("%Y-%m-%d") if hour is None: hour = datetime.datetime.now().strftime("%H:%M:%S") try: datetime_ini = datetime.datetime.strptime(date, '%Y-%m-%d') datetime_fin = datetime.datetime.strptime(date + " " + hour, '%Y-%m-%d %H:%M:%S') except Exception as e: msg = "Ingrese fecha en el siguiente formato (/yyy-mm-dd/H:M:S) Ejemplo: /2018-01-01/16:32:12" print(e, msg) return {'graph': {}, layout: {}} df_config = pd.read_excel("./hmm_application/config.xlsx") df_config.set_index("description", inplace=True) model_name = df_config.at[description, 'model_name'] tag_name = df_config.at[description, 'tag'] data_name = model_name.replace("hmm_", "") hmm_modelPath_file = hmm_modelPath + model_name file_dataPath_file = file_dataPath + data_name str_date_ini = datetime_ini.strftime("%Y-%m-%d") result = hmm_ap.obtain_expected_area( hmm_modelPath_file, file_dataPath_file, tag_name, str_date_ini, datetime_fin.strftime("%Y-%m-%d %H:%M:%S")) df_despacho = hmm_ap.despacho_nacional_programado(str_date_ini) df_result = result["df_expected_area"] df_result["3.1_Desvio Demanda "] = df_result["real time"] - df_despacho[ 'Despacho programado'] df_error = df_result["3.1_Desvio Demanda "] / df_result["real time"] df_error = df_error.dropna().abs() df_result["3.2_Desvio Demanda (%)"] = df_error * 100 df_result["3.2_Desvio Demanda (%)"] = df_result[ "3.2_Desvio Demanda (%)"].round(2) df_result["4_"] = "" df_result = pd.concat([df_despacho, df_result], axis=1) mask = df_result.index.isin(df_despacho.index) df_result = df_result[mask] # print(df_result.columns) dict_result = df_result.to_dict('list') dict_result['0_Fecha'] = [str(x) for x in df_result.index] columns = ['Despacho programado', 'min', 'max', 'expected', 'real time'] ind = [ '1_Despacho programado', '7_Dmin estimada', '6_Dmax estimada', '5_Demanda esperada', '2_Demanda real' ] for ix, col in zip(ind, columns): dict_result[ix] = dict_result.pop(col) name_file = "pron_" + str_date_ini if Excel_file: return excel.make_response_from_dict(dict_result, file_type="xlsx", status=200, file_name=name_file) else: return df_result
def getWhiteListExcel_url(): per_page = int(request.form.get('perpage')) # draw = int(request.form.get('draw')) start_idx = int(request.form.get('start')) keyword = request.form.get('search_keyword').strip() search_keyword_type = request.form.get('search_keyword_type') columnIndex = request.form.get('columnIndex') sort_style = request.form.get('sort_style') # per_page = int(request.form.get('perpage')) # #draw = int(request.form.get('draw')) # start_idx = int(request.form.get('start')) # keyword = request.form.get('search_keyword').strip() # search_keyword_type = request.form.get('search_keyword_type') query = Rules_White_IP_URL.query if keyword != "" and search_keyword_type == "ip": query = query.filter(Rules_White_IP_URL.ip.like('%' + keyword + '%')) if keyword != "" and search_keyword_type == "mask": query = query.filter(Rules_White_IP_URL.ip.like('%' + keyword + '%')) if keyword != "" and search_keyword_type == "url": query = query.filter(Rules_White_IP_URL.url.like('%' + keyword + '%')) if keyword != "" and search_keyword_type == "type": query = query.filter(Rules_White_IP_URL.type.like('%' + keyword + '%')) if keyword != "" and search_keyword_type == "description": query = query.filter( Rules_White_IP_URL.description.like('%' + keyword + '%')) if keyword != "" and search_keyword_type == "cre_dt": query = query.filter( Rules_White_IP_URL.cre_dt.like('%' + keyword + '%')) if keyword != "" and search_keyword_type == "mod_dt": query = query.filter( Rules_White_IP_URL.mod_dt.like('%' + keyword + '%')) curpage = int(start_idx / per_page) + 1 rowCount = query.count() per_page = rowCount # if rowCount > 10000: # rowCount = 10000 if columnIndex == 'url': if sort_style == 'desc': cncList = query.order_by(Rules_White_IP_URL.url.desc()).paginate( curpage, per_page, error_out=False) else: cncList = query.order_by(Rules_White_IP_URL.url.asc()).paginate( curpage, per_page, error_out=False) elif columnIndex == 'ip': if sort_style == 'desc': cncList = query.order_by(Rules_White_IP_URL.ip.desc()).paginate( curpage, per_page, error_out=False) else: cncList = query.order_by(Rules_White_IP_URL.ip.asc()).paginate( curpage, per_page, error_out=False) elif columnIndex == 'mask': if sort_style == 'desc': cncList = query.order_by(Rules_White_IP_URL.mask.desc()).paginate( curpage, per_page, error_out=False) else: cncList = query.order_by(Rules_White_IP_URL.mask.asc()).paginate( curpage, per_page, error_out=False) elif columnIndex == 'type': if sort_style == 'desc': cncList = query.order_by(Rules_White_IP_URL.type.desc()).paginate( curpage, per_page, error_out=False) else: cncList = query.order_by(Rules_White_IP_URL.type.asc()).paginate( curpage, per_page, error_out=False) elif columnIndex == 'description': if sort_style == 'desc': cncList = query.order_by( Rules_White_IP_URL.description.desc()).paginate( curpage, per_page, error_out=False) else: cncList = query.order_by( Rules_White_IP_URL.description.asc()).paginate(curpage, per_page, error_out=False) elif columnIndex == 'cre_dt': if sort_style == 'desc': cncList = query.order_by( Rules_White_IP_URL.cre_dt.desc()).paginate(curpage, per_page, error_out=False) else: cncList = query.order_by(Rules_White_IP_URL.cre_dt.asc()).paginate( curpage, per_page, error_out=False) elif columnIndex == 'mod_dt': if sort_style == 'desc': cncList = query.order_by( Rules_White_IP_URL.mod_dt.desc()).paginate(curpage, per_page, error_out=False) else: cncList = query.order_by(Rules_White_IP_URL.mod_dt.asc()).paginate( curpage, per_page, error_out=False) else: cncList = query.order_by(Rules_White_IP_URL.cre_dt.desc()).paginate( curpage, per_page, error_out=False) result = OrderedDict() result['registerDate'] = list() result['type'] = list() result['ip'] = list() result['mask'] = list() result['description'] = list() result['url'] = list() result['modifyDate'] = list() for _item in cncList.items: result['registerDate'].append(_item.cre_dt) result['type'].append(_item.type) result['ip'].append(_item.ip) result['mask'].append(_item.mask) result['url'].append(_item.url) result['description'].append(_item.description) result['modifyDate'].append(_item.mod_dt) return excel.make_response_from_dict(result, "csv", file_name="export_data")
def getpcc(): keyword = flask.request.args.get('keyword', "") matches = flask.request.args.get('matches', "2000-2099") order = flask.request.args.get('order', "DESC") limit = flask.request.args.get('limit', 3000) type = flask.request.args.get('type', "geojson") check_geom = flask.request.args.get('requireGeom', False) keyword_sql = "\'%(" + "|".join(keyword.split(" ")) + ")%\'" print("keyword_sql", keyword_sql) search_sql = "" if len(matches) > 0: matches = matches.split(",") for match in matches: if "-" in match: match = match.split("-") if len(match[0]) <= 5: match[0] += "0101" if len(match[1]) <= 5: match[1] += "1231" search_sql += f" ((data ->> 'date')::int >= {match[0]} and (data ->> 'date')::int <= {match[1]}) " else: search_sql += f" (data ->> 'date') like '{match}%' " search_sql += "or" search_sql = search_sql[:-2] else: search_sql = "TRUE" print("search_sql", search_sql) dict = {"type": "FeatureCollection", "features": []} rs = None if check_geom is not False: if len(keyword) > 1: rs = db.session.execute( f"select ST_AsGeoJSON(geom),data from pccgis where geom IS NOT NULL and (data ->> 'title') SIMILAR TO {keyword_sql} and ({search_sql}) ORDER BY (data ->> 'date') {order} limit {limit}" ) else: rs = db.session.execute( f"select ST_AsGeoJSON(geom),data from pccgis where geom IS NOT NULL and ({search_sql}) ORDER BY (data ->> 'date') {order} limit {limit}" ) else: if len(keyword) > 1: rs = db.session.execute( f"select ST_AsGeoJSON(geom),data from pccgis where (data ->> 'title') SIMILAR TO {keyword_sql} and ({search_sql}) ORDER BY (data ->> 'date') {order} limit {limit}" ) else: rs = db.session.execute( f"select ST_AsGeoJSON(geom),data from pccgis where {search_sql} ORDER BY (data ->> 'date') {order} limit {limit}" ) for row in rs: d = { "type": "Feature", "geometry": { "type": "Point", "coordinates": [] }, "properties": {} } if row['st_asgeojson'] is not None: d['geometry'] = json.loads(row['st_asgeojson']) if row['data'] is not None: d['properties'] = row['data'] else: continue dict['features'].append(d) if type == "geojson": return dict else: csv_columns = dict['features'][0]['properties'].keys() exdata = {} for col in csv_columns: exdata[col] = [] for row in dict['features']: exdata[col].append(row['properties'][col]) excel.init_excel(app) extension_type = type filename = f"大河小溪_{keyword}{matches}.{extension_type}" return excel.make_response_from_dict(exdata, file_type=extension_type, file_name=filename)
def getBlackListExcel(): per_page = int(request.form.get('perpage')) start_idx = int(request.form.get('start')) search_source = request.form.get('search_source') keyword = request.form.get('search_keyword').strip() search_type = request.form.get('search_type').strip() typeStr = list() typeStr = [ str(item.EName) for item in CommonCode.query.filter_by(GroupCode='an_data_from').all() if item.Code == search_type ] str_dt = "" end_dt = "" if request.form.get( 'timeFrom') is not None and request.form.get('timeFrom') != "": str_dt = parser.parse(request.form['timeFrom']).isoformat() if request.form.get('timeTo') is not None and request.form['timeTo'] != "": end_dt = parser.parse(request.form['timeTo']).isoformat() search_keyword_type = str(request.form['search_keyword_type']) sortedESresult = list() resultList = [] query = Rules_BlackList.query.filter( Rules_BlackList.cre_dt.between(str_dt, end_dt)) if search_source != '': query = query.filter_by(source=search_source) if keyword != "" and search_keyword_type or typeStr: if search_keyword_type == 'md5': if not typeStr: typeStr = [""] query = query.filter( Rules_BlackList.md5.like('%' + keyword + '%'), Rules_BlackList.analysis_device.like('%' + typeStr[0] + '%'), Rules_BlackList.cre_dt.between(str_dt, end_dt)) elif search_keyword_type == 'collect_uri': if not typeStr: typeStr = [""] query = query.filter( Rules_BlackList.uri.like('%' + keyword + '%'), Rules_BlackList.analysis_device.like('%' + typeStr[0] + '%'), Rules_BlackList.cre_dt.between(str_dt, end_dt)) curpage = int(start_idx / per_page) + 1 rowCount = query.count() cncList = query.order_by(Rules_BlackList.cre_dt.desc()).paginate( curpage, rowCount, error_out=False ) #page=None, per_page=None, error_out=False, max_per_page=None result = OrderedDict() result['rule_name'] = list() result['uri'] = list() result['md5'] = list() result['mal_file_name'] = list() result['description'] = list() result['analysis_result'] = list() result['cre_dt'] = list() result['detection_source'] = list() result['analysis_device'] = list() for _item in cncList.items: result['rule_name'].append(_item.rule_name) result['uri'].append(_item.uri) result['md5'].append(_item.md5) result['mal_file_name'].append(_item.mal_file_name) result['description'].append(_item.description) result['analysis_result'].append(_item.analysis_result) result['cre_dt'].append(_item.cre_dt) result['detection_source'].append(_item.detection_source) result['analysis_device'].append(_item.analysis_device) return excel.make_response_from_dict(result, "xlsx", file_name="export_data")