def result(): Logger.info("Result Chat Page") if request.method == 'POST': code = -1 code = request.form['code'] cur = g.db.cursor().execute(f'''SELECT B.DESCRIPT FROM NUMBER_CODE A LEFT JOIN DESCRIPTION B ON B.COM_ID = A.COM_ID WHERE A.CODE = {code} ORDER BY A.COM_NUMBER, B.DESC_ID''') rows = cur.fetchall() tempString = '' if rows: tempString = '[{name: \'TEST_NAME\',avatar: null,messages: [' for i, row in enumerate(rows): if i != 0: tempString = tempString + ',' tempString = tempString + '{message: \'' + str( row[0]) + '\',sender: false}' tempString = tempString + ',{message: \'\',sender: false}]}];' return render_template("result.html", resultString=tempString, wrongway=False) else: rows = [] return render_template("result.html", wrongway=True)
def send(self,data): try: proxy = xmlrpclib.Server('http://localhost:8000/xmlrpc') #Intento mandar los datos que quedaron pendientes por errores de conectividad f = open("/home/gustavo/workspace/QRDecodedApp/store/store.txt") #Leo el archivo y agrego cada linea del archivo a una lista l = [] while True: line = f.readline() if not line: break l.append(line) #pprint(proxy.Interpreter.readData(line)) f.close() countLine = 0 for i in l: pprint(proxy.Interpreter.readData(i)) del l[0] countLine = countLine + 1 f = open("/home/gustavo/workspace/QRDecodedApp/store/store.txt","w") auxCount = 0 while auxCount < countLine: f.write("") auxCount = auxCount +1 #Luego mando el dato mas nuevo capturado pprint(proxy.Interpreter.readData(data)) except: Logger.info("Error de conexion al servidor") f = open("/home/gustavo/workspace/QRDecodedApp/store/store.txt","a") f.write(data + "\n") f.close()
def manager1(): #숫자코드 - 구분 등록/삭제 Logger.info("Entered manager1") cur = g.db.cursor().execute( 'SELECT A.CODE, A.COM_NUMBER, C.COM_NM FROM NUMBER_CODE A LEFT JOIN COMPOSITION C ON A.COM_ID = C.COM_ID ORDER BY A.CODE' ) rows = cur.fetchall() datas = [] for r in rows: check = False for d in datas: if r[0] == d.code: check = True d.addComposition(r[1], r[2]) break if not check: codeData = CodeData(r[0]) codeData.addComposition(r[1], r[2]) datas.append(codeData) result_data = [] for d in datas: result_data.append(d.getSpreadData()) cur = g.db.cursor().execute( 'SELECT COM_NM FROM COMPOSITION ORDER BY COM_ID') rows = cur.fetchall() rows = [r[0] for r in rows] return render_template("admin1.html", datas=datas, options=rows, layout=1)
def manager2(): Logger.info("Entered manager2") cur = g.db.cursor().execute( 'SELECT C.COM_NM, A.DESC_ID, A.DESCRIPT FROM DESCRIPTION A LEFT JOIN COMPOSITION C ON A.COM_ID = C.COM_ID ORDER BY C.COM_ID' ) rows = cur.fetchall() datas = [] for r in rows: check = False for d in datas: if r[0] == d.composition: check = True d.addDescription(r[1], r[2]) break if not check: composData = CompositionData(r[0]) composData.addDescription(r[1], r[2]) datas.append(composData) result_data = [] for d in datas: result_data.append(d.getSpreadData()) return render_template("admin2.html", datas=datas, layout=2)
def registrationList(self, dataList): Logger.info("CarriageData List Registraion") Logger.debug("regist Data List : " + str(dataList)) check_resp = [] c = False for d in dataList: a = self.registration(d) if not a[0]: check_resp.append( str(a[1]) + "data : " + str(d.CUST_DES) + " : " + str(d.PROD_DES)) msg = QMessageBox() if len(check_resp) > 0: msg.setWindowTitle("판매 등록 실패") msg.setIcon(QMessageBox.Critical) msg.setText("판매 등록에 실패 했습니다. 아래 리스트를 확인해주세요.\n" + "\n".join(check_resp)) else: msg.setWindowTitle("판매 등록 성공") msg.setIcon(QMessageBox.Information) msg.setText("판매 등록에 성공 했습니다.") msg.setDefaultButton(QMessageBox.Escape) msg.exec_()
def ecountLogin(self): Logger.info("ecountLogin") url = 'https://oapi.ecounterp.com/OAPI/V2/Zone' Logger.debug("COM_CODE : " + self.config.ecountComCode) headers = {'Content-Type': 'application/json; charset=utf-8'} post = {'COM_CODE': self.config.ecountComCode} try: response = requests.post(url, data=json.dumps(post), headers=headers) except: Logger.error("ecount 로그인 중 네트워크 연결에 문제가 있습니다. ") sys.exit() Logger.debug("response" + response.text) Logger.debug("Data : " + response.json()["Data"]["ZONE"]) self.ZONE = response.json()["Data"]["ZONE"] url = 'https://oapi{ZONE}.ecounterp.com/OAPI/V2/OAPILogin'.format( ZONE=self.ZONE) post = { 'COM_CODE': self.config.ecountComCode, 'USER_ID': self.config.ecountId, 'API_CERT_KEY': self.config.ecountApiKey, 'LAN_TYPE': 'ko-KR', 'ZONE': self.ZONE } response = requests.post(url, data=json.dumps(post), headers=headers) self.SESSION_ID = response.json()["Data"]["Datas"]["SESSION_ID"]
def _risk_assessment_helper(self, experiment_class, exp_path, debug=False, other=None): dataset_getter = DatasetGetter(None) best_config = self.model_selector.model_selection(dataset_getter, experiment_class, exp_path, self.model_configs, debug, other) # Retrain with the best configuration and test experiment = experiment_class(best_config['config'], exp_path) # Set up a log file for this experiment (I am in a forked process) logger = Logger(str(os.path.join(experiment.exp_path, 'experiment.log')), mode='a') dataset_getter.set_inner_k(None) training_scores, test_scores = [], [] # Mitigate bad random initializations for i in range(3): training_score, test_score = experiment.run_test(dataset_getter, logger, other) print(f'Final training run {i + 1}: {training_score}, {test_score}') training_scores.append(training_score) test_scores.append(test_score) training_score = sum(training_scores)/3 test_score = sum(test_scores)/3 logger.log('TR score: ' + str(training_score) + ' TS score: ' + str(test_score)) with open(os.path.join(self._HOLDOUT_FOLDER, self._ASSESSMENT_FILENAME), 'w') as fp: json.dump({'best_config': best_config, 'HOLDOUT_TR': training_score, 'HOLDOUT_TS': test_score}, fp)
def __init__(self): config = configparser.ConfigParser() try: config.read(self.configFilePath) if (self.lozenHeader in config): self.lozenId = config[self.lozenHeader][self.lozenIdKey] self.lozenPwd = config[self.lozenHeader][self.lozenPwdKey] else: Logger.error("LOZEN 로그인 정보 불러오기 실패 : " + self.configFilePath + " 설정을 불러오는데 실패했습니다.") Logger.info("ecount login") config.read(self.ecountId) if (self.ecountHeader in config): self.ecountId = config[self.ecountHeader][self.ecountIdKey] self.ecountPwd = config[self.ecountHeader][self.ecountPwdKey] self.ecountComCode = config[self.ecountHeader][ self.ecountComKey] self.ecountApiKey = config[self.ecountHeader][ self.ecountApiKeyKey] Logger.debug("apikey: " + self.ecountApiKey) else: Logger.error("ECOUNT 로그인 정보 불러오기 실패 : " + self.configFilePath + " 설정을 불러오는데 실패했습니다.") except: Logger.error("로그인 정보 불러오기 실패 : " + self.configFilePath + " 설정을 불러오는데 실패했습니다.")
def __init__(self, ZONE, SESSION_ID): Logger.info("CarriageRegister.init") self.ZONE = ZONE self.SESSION_ID = SESSION_ID self.registrationUrl = self.registrationUrl.format( ZONE=self.ZONE, SESSION_ID=self.SESSION_ID) self.inquiryUrl = self.inquiryUrl.format(ZONE=self.ZONE, SESSION_ID=self.SESSION_ID)
def progressing(self): Logger.info("startProgressing") self.is_progressing = True pgDialog = Ui_Form() pgDialog.setupUi() pgDialog.progLabel.setText("데이터 크롤링 중..") # process = False Logger.info("stopProgressing")
def deleteDesc(): Logger.info("deleteDesc") Logger.info(str(request.form)) if request.method == 'POST': code = request.form['code'] if code: # g.db.cursor().execute(f'DELETE FROM NUMBER_CODE WHERE CODE = {code}') # g.db.commit() pass return redirect("/manager2")
def run(self): Logger.info("run") self.lozenLogin() self.ecountLogin() if self.lozenLoginData1 == "" or self.lozenLoginData2 == "" or self.ZONE == "" or self.SESSION_ID == "": returnVal = (False) else: returnVal = (True, self.lozenLoginData1, self.lozenLoginData2, self.ZONE, self.SESSION_ID) return returnVal
def checkCode(): Logger.info("checkCode") Logger.info(str(request.form)) check = False if request.method == 'POST': code = request.form['code'] cur = g.db.cursor().execute( f'select * from NUMBER_CODE where CODE = {code}') row = cur.fetchall() if len(row) != 0: check = True return jsonify({'check': check}), 200
def notify(abnormal, hostname, ip_address, options, subject): log = Logger().get_logger() if "mail" in options: ps_names = "<br>".join(abnormal) mail = Mail() mail.send_mail("<>", get_emails(), [], "[" + ip_address + "] " + subject, ps_names, None) log.info("[mail] %s %s %s %s" % (get_emails(), ip_address, subject, ps_names)) if "syslog" in options: ps_names = ",".join(abnormal) message = '%shostname=%s\tprocess=%s\t' % (make_header(ip_address), hostname, ps_names) log.info('[syslog] %shostname=%s\tprocess=%s\t' % (make_header(ip_address), hostname, ps_names)) send_qradar(message) if "db" in options: insert_db = importlib.import_module("insert_db") ps_names = ",".join(abnormal) message = 'hostname=%s\tip=%s\tprocess=%s\t' % (hostname, ip_address, ps_names) log.info('[db] hostname=%s\tip=%s\tprocess=%s\t' % (hostname, ip_address, ps_names)) insert_db.insert_db(message)
def __init__(self, tool_name=None, path_for_log_file='tmp/', parameters=None): """ Class constructor @param tool_name: tool name for debugging purposes @@type tool_name: string @param path_for_log_file: path to save the logs generated by the tool. @@type path_for_log_file: string """ self.tool_name = tool_name self.log = Logger(tool_name, path_for_log_file)
def decoded(zbar, data): """callback invoked when a barcode is decoded by the zbar widget. displays the decoded data in the text box """ buf = results.props.buffer end = buf.get_end_iter() # Get de type code qrType = data.split(":")[0] if qrType == "QR-Code": cd = ContentData(data) buf.insert(end, "Bienvenido " + cd.name + " " + cd.surname + ".\n") results.scroll_to_iter(end, 0) else: buf.insert(end, "Codigo incorrecto.\n") Logger.info("Codigo invalido: " + data)
def reflash(self): Logger.info("reflash") #TODO: start progress # pg = Thread(target=self.progressing) # pg.start() #크롤링 crawler = Parser(self.lozenLoginData1, self.lozenLoginData2, self.lozenLoginSession) ddate = str(self.fromDateEdit.date().toPyDate()) fromDate = "".join(ddate.split("-")) ddate = str(self.toDateEdit.date().toPyDate()) toDate = "".join(ddate.split("-")) self.dataList = crawler.parse(fromDate, toDate) self.spreadData(self.dataList)
class AbstractTool(object): """ This is a abstract class to represent a tool. """ def __init__(self, tool_name=None, path_for_log_file='tmp/', parameters=None): """ Class constructor @param tool_name: tool name for debugging purposes @@type tool_name: string @param path_for_log_file: path to save the logs generated by the tool. @@type path_for_log_file: string """ self.tool_name = tool_name self.log = Logger(tool_name, path_for_log_file) def execute_agent(self, agent): """ Executes the main method of the tool on a agent. @param agent: the agent the method should be executed on. @@type agent: class Agent """ raise NotImplementedError( "This method is abstract and must be implemented in derived classes." ) def execute_model(self, model): """ Executes the main method of the tool on a single model. @param model: the model the method should be executed on. @@type model: class derived from tools.AbstractModel class """ raise NotImplementedError( "This method is abstract and must be implemented in derived classes." ) def _create_log(self, data): print('Saving log...') self.log.log(data)
class FileUtil(object): """ create a private variable __logger to log infomation """ __logger = Logger(sys.modules['__main__']) def getFileListByExtension(self, dirPath, extension): """ @summary: @param dirPath:This is Directory Path @param extension: This is file extension name """ extensionFileList = [] extensionName = '.' + extension # # get directory files fileList = os.listdir(dirPath) for file in fileList: # judge the files of directory,if file extension is equal the giving extension,then to return if os.path.splitext(file)[1] == extensionName: extensionFileList.append(file) return extensionFileList def writeToFile(self, filePath, content): """ @sumary: write the content to the file @param filePath: the file to writing contents and the encoding is utf8 @param content: the contents """ # file oprate model is appending and writing, the encoding is utf8 fileWrObj = codecs.open(filePath, 'a', encoding='utf8') fileWrObj.write(content) fileWrObj.close() def readFromFile(self, filePath): """ @sumary:read file content @param filePath: the file path is to read @param content: the content of file """ content = None try: # just read the content of file using the encoding utf8 fileReadObj = codecs.open(filePath, 'r', encoding='utf8') content = fileReadObj.read() fileReadObj.close() except Exception as e: errMsg = 'read file ' + filePath + ' failure' self.__logger.error(errMsg) return content def getFilePath(self, fileName): """ @summary:file path,os.path.dirname(sys.path[0])+'/conf/teste.txt' @param fileName: """ filePath = os.path.dirname(sys.path[0]) + fileName return filePath
def clickRegistrationButton(self): Logger.info("pressed RegistryButton") register = Register(self.ZONE, self.SESSION_ID) # model = self.tableView.model() # print("model : ", model) data = [] print("!@#!@# rowCount : ", self.tableView.rowCount()) if self.tableView.rowCount() > 0: for row in range(self.tableView.rowCount()): print("!@#!@# check . ", row, ":", self.tableView.item(row, 0).text()) if (self.tableView.item(row, 0).text() == "0"): print("!@#!@# continue") continue else: print("!@#!@# self.dataList[row] : ", self.dataList[row]) self.dataList[row].checkValue = "2" data.append(self.dataList[row]) register.registrationList(data)
def _model_selection_helper(self, dataset_getter, experiment_class, config, exp_config_name, other=None): """ :param dataset_getter: :param experiment_class: :param config: :param exp_config_name: :param other: :return: """ # Create the experiment object which will be responsible for running a specific experiment experiment = experiment_class(config, exp_config_name) # Set up a log file for this experiment (run in a separate process) logger = Logger(str(os.path.join(experiment.exp_path, 'experiment.log')), mode='a') logger.log('Configuration: ' + str(experiment.model_config)) config_filename = os.path.join(experiment.exp_path, self._CONFIG_FILENAME) # ------------- PREPARE DICTIONARY TO STORE RESULTS -------------- # selection_dict = { 'config': experiment.model_config.config_dict, 'TR_score': 0., 'VL_score': 0., } dataset_getter.set_inner_k(None) # need to stay this way training_score, validation_score = experiment.run_valid( dataset_getter, logger, other) selection_dict['TR_score'] = float(training_score) selection_dict['VL_score'] = float(validation_score) logger.log('TR Accuracy: ' + str(training_score) + ' VL Accuracy: ' + str(validation_score)) with open(config_filename, 'w') as fp: json.dump(selection_dict, fp)
def lozenLogin(self): Logger.info("lozenLogin") url = 'http://203.247.141.92:8080/SmartLogen/UserLogin' post = {'userid': self.config.lozenId, 'userpw': self.config.lozenPwd} try: response = self.login_session.post(url, data=post, headers=self.headers_common) except: Logger.error("lozen 로그인 중 네트워크 연결에 문제가 있습니다. ") sys.exit() Logger.debug("response" + response.text) login_data = response.text.split('Ξ') self.lozenLoginData1 = login_data[1] self.lozenLoginData2 = login_data[3]
def insertDesc(): Logger.info("insertDesc") Logger.info(str(request.form)) if request.method == 'POST': compos = request.form['compos'] desc_list = [] for i in range(1, 11): try: desc_list.append(request.form['desc' + str(i)]) except KeyError: Logger.info("[WARN] It Does not exists key") if compos and not '' == compos: for i, desc in enumerate(desc_list): if desc != '': cur = g.db.cursor().execute( f'select COM_ID FROM COMPOSITION WHERE COM_NM=\'{compos}\'' ) row = cur.fetchall() if row: row = row[0][0] g.db.cursor().execute( f'insert or replace into DESCRIPTION(COM_ID, DESC_ID, DESCRIPT) values({row},{i}+1,\'{desc}\')' ) else: cur = g.db.cursor().execute( f'select MAX(COM_ID) FROM COMPOSITION') row = cur.fetchall() if row: row = row[0][0] g.db.cursor().execute( f'insert into COMPOSITION(COM_ID, COM_NM) values({row}+1,\'{compos}\')' ) g.db.cursor().execute( f'insert or replace into DESCRIPTION(COM_ID, DESC_ID, DESCRIPT) values({row}+1,{i}+1,\'{desc}\')' ) else: g.db.cursor().execute( f'insert or replace into DESCRIPTION(COM_ID, DESC_ID, DESCRIPT) values(1,{i}+1,\'{desc}\')' ) g.db.commit() return redirect("/manager2")
def insertCode(): Logger.info("insertCode") Logger.info(str(request.form)) if request.method == 'POST': code = request.form['code'] compos_list = [] for i in range(1, 11): try: compos_list.append(request.form['compos' + str(i)]) except KeyError: Logger.info("[WARN] It Does not exists key") if code and not '' == code: for i, compos in enumerate(compos_list): if compos != '': g.db.cursor().execute( f'insert or replace into NUMBER_CODE(CODE, COM_NUMBER, COM_ID) values({code},{i}+1,(select COM_ID FROM COMPOSITION WHERE COM_NM=\'{compos}\'))' ) g.db.commit() return redirect("/manager1")
from log.Logger import Logger from common import JSON_TEMPLATE, get_answer_object import copy import json import traceback logger = Logger() class JSONGenerator(object): def __init__(self): pass @staticmethod def write_response_to_file(response_content, file_path): try: with open(file_path, 'w') as fp: json.dump(response_content, fp) msg_string = 'saved file content in filepath - {}\n'.format(file_path) logger.info(msg_string) return file_path except Exception: logger.error(traceback.format_exc()) @staticmethod def create_response(response_object): questions_map = response_object.get('question_map', dict()) ques_to_altq_map = response_object.get('altq_map', dict()) tag_term_map = response_object.get('tag_term_map', dict()) response = {'faqs': [], 'synonyms': response_object.get('graph_synonyms', dict())} try:
import re import logging #from share.config.ConfigManager import ConfigManager import datetime #from share.language.StopWords import StopWords from share.language.Lemmatize import Lemmatizer import requests from textblob import TextBlob from log.Logger import Logger # config_manager = ConfigManager() # qna_conf = config_manager.load_config(key='qna') # conf = config_manager.load_config(key='ontology_analyzer') # remote_config = config_manager.load_config(key="remote_config") oa_logger = Logger() #logging.getLogger('ont_analyzer') NODE_ID = 0 NODE_NAME = 1 SYNONYMS = 2 HAS_FAQS = 3 IS_MANDATORY = 4 class OntologyAnalyzer: def __init__(self): self.kt_id = None self.language = None self.doc_id = None # self.db_manager = DBManager() # self.ont_analyzer_db_manager = OADBManager()
class WeiBuClawer(object): ''' claw the site: https://x.threatbook.cn ''' __logger = Logger(sys.modules['__main__']) def clawDomain(self, domain): ''' 通过域名来爬取威胁情报信息 :param domain: :return: ''' self.__logger.info('claw the site ' + domain) cdomain = "https://x.threatbook.cn/domain/" + domain response = requests.get(cdomain) soup = BeautifulSoup(response.text) return soup def clawIp(self, ip): ''' 通过IP来爬取威胁情报信息 :param ip: :return: ''' self.__logger.info('claw the ip : ' + ip) cIp = "https://x.threatbook.cn/ip/" + ip response = requests.get(cIp) soup = BeautifulSoup(response.text) return soup def parseIpGeroInfo(self, soup): ''' 解析得到IP的地理位置信息 :param soup: :return: ''' self.__logger.info('parse the html to get ip gero info') geroInfo = '' infoTab = soup.find( "table", "table table-condensed table-borderless pull-left res-brief") trList = infoTab.find_all('tr') trSize = len(trList) if trSize > 2: geroTrObj = trList[1] geroInfo = geroTrObj.find('td').string.strip() if geroInfo is not None: geroInfo = ''.join(geroInfo.split()) return geroInfo def parseWeibuThread(self, soup): ''' 解析微步情报 :param soup: :return: ''' self.__logger.info('parse the html to get weibu thread') threadStrs = "" # threadTab = soup.find(id="tag_td") # spanTabList = threadTab.find_all("span","tag non-clickable-tag") spanTabList = soup.find_all("span", "tag non-clickable-tag") slistSize = len(spanTabList) limitSize = slistSize - 1 for ii in range(slistSize): threadStrs = threadStrs + spanTabList[ii].string if ii < limitSize: threadStrs = threadStrs + "," return threadStrs def parseComunityThread(self, soup): ''' 解析社区情报 :param soup: :return: ''' self.__logger.info('parse the html to get comunity thread') threadStrs = "" voteList = soup.find_all("span", "vb4-tag voted") slistSize = len(voteList) limitSize = slistSize - 1 for ii in range(slistSize): threadStrs = threadStrs + voteList[ii].string if ii < limitSize: threadStrs = threadStrs + "," return threadStrs def parseThreads(self, soup): ''' 解析威胁情报 :param soup: :return: ''' self.__logger.info('parse the html to get threads') threadStrs = "" threads = soup.find(id="intelli_table") threadList = threads.find_all('td') listSize = len(threadList) limitSize = listSize - 1 for ii in range(listSize): if (ii + 1) % 3 == 0: if threadList[ii].string is None: continue else: threadStrs = threadStrs + threadList[ii].string if ii < limitSize: threadStrs = threadStrs + "," return threadStrs
def _model_selection_helper(self, dataset_getter, experiment_class, config, exp_config_name, other=None): # Set up a log file for this experiment (run in a separate process) logger = Logger(str(os.path.join(exp_config_name, 'experiment.log')), mode='a') logger.log('Configuration: ' + str(config)) config_filename = os.path.join(exp_config_name, self._CONFIG_FILENAME) # ------------- PREPARE DICTIONARY TO STORE RESULTS -------------- # k_fold_dict = { 'config': config, 'folds': [{} for _ in range(self.folds)], 'avg_TR_score': 0., 'avg_VL_score': 0., 'std_TR_score': 0., 'std_VL_score': 0. } for k in range(self.folds): dataset_getter.set_inner_k(k) fold_exp_folder = os.path.join(exp_config_name, 'FOLD_' + str(k + 1)) # Create the experiment object which will be responsible for running a specific experiment experiment = experiment_class(config, fold_exp_folder) training_score, validation_score = experiment.run_valid( dataset_getter, logger, other) logger.log( str(k + 1) + ' split, TR Accuracy: ' + str(training_score) + ' VL Accuracy: ' + str(validation_score)) k_fold_dict['folds'][k]['TR_score'] = training_score k_fold_dict['folds'][k]['VL_score'] = validation_score tr_scores = np.array( [k_fold_dict['folds'][k]['TR_score'] for k in range(self.folds)]) vl_scores = np.array( [k_fold_dict['folds'][k]['VL_score'] for k in range(self.folds)]) k_fold_dict['avg_TR_score'] = tr_scores.mean() k_fold_dict['std_TR_score'] = tr_scores.std() k_fold_dict['avg_VL_score'] = vl_scores.mean() k_fold_dict['std_VL_score'] = vl_scores.std() logger.log('TR avg is ' + str(k_fold_dict['avg_TR_score']) + ' std is ' + str(k_fold_dict['std_TR_score']) + ' VL avg is ' + str(k_fold_dict['avg_VL_score']) + ' std is ' + str(k_fold_dict['std_VL_score'])) with open(config_filename, 'w') as fp: json.dump(k_fold_dict, fp)
def run_final_model(self, outer_k, debug): outer_folder = osp.join(self._ASSESSMENT_FOLDER, self._OUTER_FOLD_BASE + str(outer_k + 1)) config_fname = osp.join(outer_folder, self._SELECTION_FOLDER, self._WINNER_CONFIG) with open(config_fname, 'r') as f: best_config = json.load(f) dataset_getter_class = s2c(self.model_configs.dataset_getter) dataset_getter = dataset_getter_class( self.model_configs.data_root, self.splits_folder, s2c(self.model_configs.dataset_class), self.model_configs.dataset_name, self.outer_folds, self.inner_folds, self.model_configs.num_dataloader_workers, self.model_configs.pin_memory) # Tell the data provider to take data relative # to a specific OUTER split dataset_getter.set_outer_k(outer_k) dataset_getter.set_inner_k(None) # Mitigate bad random initializations for i in range(self.final_training_runs): final_run_exp_path = osp.join(outer_folder, f"final_run{i+1}") final_run_torch_path = osp.join(final_run_exp_path, f'run_{i+1}_results.torch') # Retrain with the best configuration and test # Set up a log file for this experiment (run in a separate process) logger = Logger(osp.join(final_run_exp_path, 'experiment.log'), mode='a') logger.log( json.dumps(dict(outer_k=dataset_getter.outer_k, inner_k=dataset_getter.inner_k, **best_config), sort_keys=False, indent=4)) if not debug: @ray.remote(num_cpus=1, num_gpus=self.gpus_per_task) def foo(): if not osp.exists(final_run_torch_path): experiment = self.experiment_class( best_config['config'], final_run_exp_path) res = experiment.run_test(dataset_getter, logger) torch.save(res, final_run_torch_path) return outer_k, i # Launch the job and append to list of final runs jobs future = foo.remote() self.final_runs_job_list.append(future) self.progress_manager.update_state( dict(type='START_FINAL_RUN', outer_fold=outer_k, run_id=i)) else: if not osp.exists(final_run_torch_path): experiment = self.experiment_class(best_config['config'], final_run_exp_path) training_score, test_score = experiment.run_test( dataset_getter, logger) torch.save((training_score, test_score), final_run_torch_path) if debug: self.process_final_runs(outer_k)
def model_selection(self, kfold_folder, outer_k, debug): """ Performs model selection by launching each configuration in parallel, unless debug is True. Each process trains the same configuration for each inner fold. :param kfold_folder: The root folder for model selection :param outer_k: the current outer fold to consider :param debug: whether to run the procedure in debug mode (no multiprocessing) """ SELECTION_FOLDER = osp.join(kfold_folder, self._SELECTION_FOLDER) # Create the dataset provider dataset_getter_class = s2c(self.model_configs.dataset_getter) dataset_getter = dataset_getter_class( self.model_configs.data_root, self.splits_folder, s2c(self.model_configs.dataset_class), self.model_configs.dataset_name, self.outer_folds, self.inner_folds, self.model_configs.num_dataloader_workers, self.model_configs.pin_memory) # Tell the data provider to take data relative # to a specific OUTER split dataset_getter.set_outer_k(outer_k) if not osp.exists(SELECTION_FOLDER): os.makedirs(SELECTION_FOLDER) # if the # of configs to try is 1, simply skip model selection if len(self.model_configs) > 1: # Launch one job for each inner_fold for each configuration for config_id, config in enumerate(self.model_configs): # I need to make a copy of this dictionary # It seems it gets shared between processes! cfg = deepcopy(config) # Create a separate folder for each configuration config_folder = osp.join( SELECTION_FOLDER, self._CONFIG_BASE + str(config_id + 1)) if not osp.exists(config_folder): os.makedirs(config_folder) for k in range(self.inner_folds): # Create a separate folder for each fold for each config. fold_exp_folder = osp.join( config_folder, self._INNER_FOLD_BASE + str(k + 1)) fold_results_torch_path = osp.join( fold_exp_folder, f'fold_{str(k+1)}_results.torch') # Tell the data provider to take data relative # to a specific INNER split dataset_getter.set_inner_k(k) logger = Logger(osp.join(fold_exp_folder, 'experiment.log'), mode='a') logger.log( json.dumps(dict(outer_k=dataset_getter.outer_k, inner_k=dataset_getter.inner_k, **config), sort_keys=False, indent=4)) if not debug: @ray.remote(num_cpus=1, num_gpus=self.gpus_per_task) def foo(): if not osp.exists(fold_results_torch_path): experiment = self.experiment_class( config, fold_exp_folder) res = experiment.run_valid( dataset_getter, logger) torch.save(res, fold_results_torch_path) return dataset_getter.outer_k, dataset_getter.inner_k, config_id # Launch the job and append to list of outer jobs future = foo.remote() self.outer_folds_job_list.append(future) self.progress_manager.update_state( dict(type='START_CONFIG', outer_fold=outer_k, inner_fold=k, config_id=config_id)) else: # debug mode if not osp.exists(fold_results_torch_path): experiment = self.experiment_class( config, fold_exp_folder) training_score, validation_score = experiment.run_valid( dataset_getter, logger) torch.save((training_score, validation_score), fold_results_torch_path) if debug: self.process_config(config_folder, deepcopy(config)) if debug: self.process_inner_results(SELECTION_FOLDER, config_id) else: # Performing model selection for a single configuration is useless with open(osp.join(SELECTION_FOLDER, self._WINNER_CONFIG), 'w') as fp: json.dump(dict(best_config_id=0, config=self.model_configs[0]), fp, sort_keys=False, indent=4)
class CsvUtil(object): __logger = Logger(sys.modules['__main__']) def readCsvRntnList(self, filePath): ''' 读取csv文件的内容并且返回 :param filePath: :param fileName: :return: ''' # return list rntnList = [] # 以rb的方式打开csv文件 file = open(filePath, 'r', encoding="utf-8") reader = csv.reader(file) for line in reader: rntnList.append(line) file.close() return rntnList def readCsvRntnDictList(self, filePath): ''' 读取csv并且返回List[dict,dict,...,dict] :param filePath: :return: ''' # rntnDictList = [] # file = open(filePath, 'r', encoding="utf-8") reader = csv.reader(file) # csv line : reader.line_num headRow = next(reader) totalColumns = len(headRow) for row in reader: dict = {} for ii in range(totalColumns): dict[headRow[ii]] = row[ii] rntnDictList.append(dict) return rntnDictList def writeContent2Csv(self, filePath, valueList): ''' 往csv文件中写内容 :param filePath: :param valueList: :return: ''' file = open(filePath, 'w', newline='') writer = csv.writer(file) # writer.writerows(valueList) file.close() def writeList2Csv(self, filePath, valueList, headList): ''' 往csv文件中写内容 :param filePath: :param valueList: :param headList: :return: ''' file = open(filePath, 'w', newline='') writer = csv.writer(file) # writer.writerow(headList) writer.writerows(valueList) file.close() def writeDictList2Csv(self, filePath, dictList, headList): ''' 往csv文件中写内容 :param filePath: :param dictList: :param headList: :return: ''' file = open(filePath, 'w', newline='') writer = csv.writer(file) # writer.writerow(headList) totalColumns = len(headList) for dict in dictList: row = [] for ii in range(totalColumns): row.append(dict[headList[ii]]) writer.writerow(row) file.close()
def registration(self, data): Logger.info("CarriageData Registraion") print("!@#!@# data : ", data) post = """{ "SaleList": [""" for i in range(len(data.PROD_CD)): post += """ {{ "Line": "0", "BulkDatas": {{ "IO_DATE": "{IO_DATE}", "UPLOAD_SER_NO": "", "CUST": "{CUST}", "CUST_DES": "{CUST_DES2}", "WH_CD": "00002", "PROD_CD": "{PROD_CD}", "PROD_DES": "{PROD_DES}", "QTY": "{QTY}", "U_MEMO3": "{CUST_DES1} / {PHONE}", "U_MEMO4": "{ADDRESS}", "U_MEMO5": "{ECT}", }} }} """.format( IO_DATE=data.IO_DATE, CUST=data.CUST, CUST_DES2=data.CUST_DES if str(data.CUST) != "TRA2008008" else "택배발송", # UPLOAD_SER_NO=data.UPLOAD_SER_NO CUST_DES1=data.CUST_DES, PROD_CD=data.PROD_CD[i], PROD_DES=data.PROD_DES[i], QTY=data.QTY[i], PHONE=data.phoneNumber, ADDRESS=data.address, ECT="") if (i != len(data.PROD_CD) - 1): post += """, """ post += """] }""" post = post.encode("utf-8") Logger.debug("post: " + str(post)) response = requests.post(self.registrationUrl, data=post, headers=self.headers) Logger.debug("response : " + response.text) status = response.json()["Status"] success_cnt = "" fail_cnt = "" error_msg = "" if (status == "200"): success_cnt = response.json()["Data"]["SuccessCnt"] fail_cnt = response.json()["Data"]["FailCnt"] if (fail_cnt == 0): return (True, success_cnt) else: return ( False, response.json()["Data"]["ResultDetails"][0]["TotalError"]) else: error_msg = response.json()["Error"]["Message"] return (False, error_msg)