def log_validation_results(trainer): evaluator.run(valid_data_loader) ms = evaluator.state.metrics logger.info( "Validation Results - Epoch: [{}/{}] Avg accuracy: {:.6f} Avg loss: {:.6f}" .format(trainer.state.epoch, trainer.state.max_epochs, ms['accuracy'], ms['nll']))
def comm_close(self): try: self.S48.serial.close() # logger.info("HMT_S48 com port %s addr %s closed!" % (self.port,self.addr)) except: logger.info("HMT_S48 com port %s addr %s close failed!" % (self.port, self.addr))
def on_SC_start_clicked(self): """ Slot documentation goes here. """ # TODO: not implemented yet # raise NotImplementedError logger.info("test start!") self.xls_name = str(self.save_path.text() + '/'+ self.sample_id.text() + '-'+ time.strftime("%m%d%H%M")+ ".xls") self.xtime = 0 self.meas_mode = 0 self.test_times = 1 self.c_index = 0 self.dataC = [0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9] self.Sens = [] self.dataS = [] self.dataX = [] self.dataY = [] self.dataX.append([]) self.dataY.append([]) self.Rt_MPL.clear_curve() self.SC_MPL.clear_curve() self.get_inst_conf() if self.vi_mode: Keithley2400.measure_voltage(self.src_curr, self.vlimit, self.meas_type) else: Keithley2400.measure_current(self.src_vol, self.ilimit, self.meas_type) # self.ref_time = int(time.time()) self.timer.start(1000)
def removeFinishedTasks(self, threadedTasks, queuedTasks): markDelete = [] i = 0 for temp in threadedTasks: if self.dbConn.selectDialTaskById( temp.get('task_id')).get('task_status') != 1: markDelete.append(i) logger.info("task %s removed from threadedTasks" % (temp.get('task_id'))) i += 1 if len(markDelete) > 0: length = len(markDelete) for i in range(len(markDelete)): threadedTasks.pop(markDelete[length - i - 1]) markDelete = [] i = 0 for temp in queuedTasks: if self.dbConn.selectDialTaskById( temp.get('task_id')).get('task_status') != 1: markDelete.append(i) logger.info("task %s removed from queuedTasks" % (temp.get('task_id'))) break i += 1 if len(markDelete) > 0: length = len(markDelete) for i in range(len(markDelete)): queuedTasks.pop(markDelete[length - i - 1]) return
def on_SC_stop_clicked(self): """ Slot documentation goes here. """ # TODO: not implemented yet logger.info("test stop!") self.meas_stop()
def run(self): # tools.lock_ logger.info(f'启动数据库写入线程<{self.name}>') while True: try: position_info = self.data_queue.get() if position_info is None: break # 加锁,防止在判断到计数之间发生中断导致最终判断错误 with tools.db_count_lock: break_flag = self.upsert(position_info) # 若所有数据已满 if break_flag == 1: break # 如果某一个区域的目标数量已满足则跳过该条数据 if break_flag == 2: continue except Exception as e: logger.error(f'<{self.name}>:\n' f'- 插入记录{position_info}到数据库失败\n' f'- 错误信息: {e}') finally: self.data_queue.task_done() # tools.lock_ logger.info(f'结束数据库写入线程<{self.name}>')
def get_session(self, retry_times: int=0): if retry_times > settings.RETRY_TIMES: # tools.lock_ logger.error( f'<{self.name}>: \n- ' f'访问次数 {settings.HOME_URL} 超过{settings.RETRY_TIMES}次,' f'获取cookies失败!') return False else: try: # tools.lock_ logger.info(f'尝试访问 {settings.HOME_URL} 获得cookies') # 由于settings.HOME_HEADERS和settings.SEARCH_HEADERS使用了fake_useragent,这里重新赋值以确保在子线程中固定值 home_headers = settings.HOME_HEADERS self.headers = settings.SEARCH_HEADERS self.session = requests.Session() self.session.get(settings.HOME_URL, headers=home_headers) return True except Exception as e: # tools.lock_ logger.warning(f'<{self.name}>: \n- ' f'尝试访问 {settings.HOME_URL} 获取cookies失败,将重试' f'\n- 错误信息: {e}') time.sleep(settings.REQUEST_GAP) return self.get_session(retry_times + 1)
def __init__(self, page_queue: Queue, data_queue: Queue,): super().__init__() self.page_queue = page_queue self.data_queue = data_queue logger.info(f'创建页面解析线程<{self.name}>')
def post(self, msgdic, encode, rand): code = "application/json;charset=%s" % (encode) headers = {'content-type': code} url = "%s?sdkappid=%s&random=%s"%(self.adminInfo.sgwDic.get("sgwurl"), \ self.adminInfo.sgwDic.get("sgwaccount"), rand) logger.info("url:" + url) json_str = json.dumps(msgdic) res = requests.post(url, data=json_str, headers=headers) logger.info(json.loads(res.text))
def load_from_data(self, data): if type(data) != dict: logger.info('%s object can not be load as a graph, please load a dict' % str(type(data))) return try: self.graph = nx.node_link_graph(data) except Exception as e: logger.error(e) else: return self.graph
def on_log_state_toggled(self, checked): """ Slot documentation goes here. """ # TODO: not implemented yet if checked: self.log_flag = 1 else: self.log_flag = 0 logger.info(self.log_flag)
def __init__(self, # session: requests.Session, # headers: dict, urls_queue: Queue, page_queue: Queue): super().__init__() self.session = None self.headers = None self.urls_queue = urls_queue self.page_queue = page_queue logger.info(f'创建页面请求线程<{self.name}>')
def on_ST_save_clicked(self): """ Slot documentation goes here. """ # TODO: not implemented yet logger.info("test save!") self.xls_write() self.xls.save(self.xls_name) # pic_path = self.xls_name[:-4] + ".png" # self.Rt_MPL.save_curve(str(pic_path)) QtGui.QMessageBox.information(self,u'提示', (u'测试数据%s保存成功!' % self.xls_name))
def get_gpibport(self): GPIB_port = "" rm_list = self.rm.list_resources() print rm_list for io in rm_list: if io.find('GPIB') != -1: GPIB_port = io if GPIB_port != "": logger.info("Find GPIB port:%s" % GPIB_port) else: logger.warning("CAN NOT FOUND GPIB PORT!") return GPIB_port
def on_ST_start_clicked(self): """ Slot documentation goes here. """ # TODO: not implemented yet # raise NotImplementedError logger.info("test start!") self.xls_name = str(self.save_path.text() + '/'+ self.sample_id.text() + '-'+ time.strftime("%m%d%H%M")+ ".xls") self.xtime = 0 self.meas_mode = 0 self.temp_index = 0 self.test_times = 1 self.temp_flag = 0 self.stable_time = 0 self.t1_set_flag = 0 self.t2_set_flag = 0 self.t3_set_flag = 0 self.t4_set_flag = 0 self.Sens = [] self.dataS = [] self.dataX = [] self.dataY = [] self.dataX.append([]) self.dataY.append([]) self.Rt_MPL.clear_curve() self.RT_MPL.clear_curve() self.ST_MPL.clear_curve() self.get_inst_conf() if self.get_gas_conf() == False: return if self.RT_test: if len(qmdz_const.temp_list) > 0: temp_start = ai518p_api.get_now_temp() + 0.5 temp_end = qmdz_const.temp_list[-1] ai518p_api.set_518p_constmode(temp_start,temp_end,self.up_slot,self.down_slot) else: if len(qmdz_const.temp_list) > 0: temp_now = ai518p_api.get_now_temp() + 0.5 self.exp_temp = qmdz_const.temp_list[0] ai518p_api.set_518p_constmode(temp_now,self.exp_temp,self.up_slot,self.down_slot) if self.vi_mode: Keithley2400.measure_voltage(self.src_curr, self.vlimit, self.meas_type) else: Keithley2400.measure_current(self.src_vol, self.ilimit, self.meas_type) self.ref_time = int(time.time()) self.timer.start(1000)
def dir_process(db, collection_number): collection_name = 'Medicine_author_data_%d' % collection_number db_collection = db[collection_name] path = os.path.join('C:/', 'Users', 'xzk09', 'Desktop', 'Med_author_d&p', 'data', 'data%d' % collection_number) file_list = os.listdir(path) for n, f in enumerate(file_list): f_id = int(f.split('.')[0]) file_path = os.path.join(path, f) with open(file_path, 'r') as fr: temp = json.load(fr) temp['_id'] = f_id db_collection.insert_one(temp) if n % 100 == 0: logger.info('INDEX: %d \t FILE: %d/%d' % (collection_number, n, len(file_list)))
def connect_mongo(): try: logger.info('正在连接数据库...') client = pymongo.MongoClient( host=settings.MONGODB_HOST, port=settings.MONGODB_PORT, username=settings.MONGODB_USER, password=settings.MONGODB_PSW, authSource=settings.MONGODB_DB, ) logger.info('数据库连接成功!') return client except Exception as e: logger.error(f'数据库连接失败! \n- 错误信息: {e}') return None
def get_gas_conf(self): self.res_offset = int(read_config(qmdz_const.ST_CONF_PATH, 'GAS', 'res_offset')) self.res_hold = int(read_config(qmdz_const.ST_CONF_PATH, 'GAS', 'res_hold')) self.t1 = int(read_config(qmdz_const.ST_CONF_PATH, 'GAS', 't1')) self.t2 = int(read_config(qmdz_const.ST_CONF_PATH, 'GAS', 't2')) self.t3 = int(read_config(qmdz_const.ST_CONF_PATH, 'GAS', 't3')) self.t4 = int(read_config(qmdz_const.ST_CONF_PATH, 'GAS', 't4')) logger.info("GAS SET PARA:%d %d %d %d %d" % (qmdz_const.ST_GAS_MODE,self.t1,self.t2,self.t3,self.t4)) if self.RT_test==0 and qmdz_const.t1_gas == []: QtGui.QMessageBox.warning(self,u'警告', u'请先设置气体参数!') return False return True
def run(self): # tools.lock_ logger.info(f'启动生产url线程<{self.name}>') for urls_info in self.urls_info: if tools.check_db_position_count(): break try: self.urls_queue.put(urls_info) except Exception as e: # tools.lock_ logger.error( f'<{self.name}>:' f'\n- 生产url出错,将退出生产线程' f'\n- 错误信息: {e}') break # tools.lock_ logger.info(f'生产url线程线程<{self.name}>结束')
def gen_urls(self): page_no = 1 while page_no <= settings.MAX_PAGE: for city in settings.CITIES: params = { 'city': city, 'positionName': settings.POSITION, 'pageNo': page_no, 'pageSize': settings.PAGE_SIZE, } full_url = (settings.SEARCH_URL + '?' + '&'.join(f'{k}={v}' for k, v in params.items())) # tools.lock_ logger.info(f'<{self.name}>:\n- 生成url: {full_url}') yield {'url': full_url, 'city': city, 'retry_times': 0} page_no += 1
def buildTXsmsMsg(self, param, tpl_id, phone, rand): nowTime = (int)(time.time()) data = "appkey=%s&random=%s&time=%s&mobile=%s" % ( self.adminInfo.sgwDic.get("sgwpwd"), rand, nowTime, phone) logger.info("data:" + data) dicPhone = {} dicPhone["mobile"] = phone dicPhone["nationcode"] = "86" dic = {} dic["ext"] = "" dic["extend"] = "" dic["params"] = param dic["sig"] = hashlib.sha256(data.encode('utf-8')).hexdigest() dic["tel"] = dicPhone dic["time"] = nowTime dic["tpl_id"] = tpl_id logger.info(dic) return dic
def run(): my_client = pymongo.MongoClient("mongodb://localhost:27017/") db = my_client["Med_co-author_0_data"] # E:\数据备份\学者知识图谱\学者信息(data_paper_MS)\1_co-author\Medicine co-author\med_co-author_0_data dir_path = os.path.join('E:/', '数据备份', '学者知识图谱', '学者信息(data_paper_MS)', '1_co-author', 'Medicine co-author', 'med_co-author_0_data') tar_list = os.listdir(dir_path) for file in tar_list: gz = None try: gz = file.split('.')[-1] except Exception as e: logger.error(e) if gz == 'gz': tar_path = os.path.join(dir_path, file) logger.info(file) db_collection = db[file.split('.')[0]] tar_process(tar_path, db_collection)
def tar_process(tar_path, db_collection): with tarfile.open(tar_path, "r") as tar: names = tar.getnames() for n, name in enumerate(names): member = tar.getmember(name) f = tar.extractfile(member) if not f: continue temp = json.load(f) if not isinstance(temp, dict): continue name = name.split('/')[1] _id = int(name.split('.')[0]) temp['_id'] = _id try: db_collection.insert_one(temp) except Exception as e: logger.error(e) if n % 100 == 0: logger.info('COL: %s \t FILE: %d/%d' % (db_collection.name, n, len(names)))
def run(self): # tools.lock_ logger.info(f'启动页面请求线程<{self.name}>') # 获取会话并判断是否成功 if not self.get_session(): # tools.lock_ logger.error(f'<{self.name}>:\n- 获取会话失败,将退出线程') else: while True: try: urls_info = self.urls_queue.get() # 有可能存在所有页面数据不够需求数量 if urls_info is None: break retry_times = urls_info['retry_times'] url = urls_info['url'] city = urls_info['city'] break_flag = self.request(url, city, retry_times) # 全部收集完成,则退出 if break_flag == 1: break # 相应城市收集完,则跳过 if break_flag == 2: continue except Exception as e: # tools.lock_ logger.warning( f'<{self.name}>:' f'\n- 请求页面 {url} 失败,后续将重试' f'\n- 错误信息: {e}') if retry_times <= settings.RETRY_TIMES: self.urls_queue.put({ 'url': url, 'city': city, 'retry_times': retry_times + 1}) finally: self.urls_queue.task_done() # 最终关闭会话 self.session.close() # tools.lock_ logger.info(f'页面请求线程<{self.name}>结束')
def cleanUp(self, locks, col): txnInfos = dict( ) #: : type txnInfos:dict[int, int], dict[startTS]=commitTS startKey = None endKey = None for l in locks: # : :type l: kvrpcpb.LockInfo if l.lock_version not in txnInfos: commitTS = self.GetTxnStatus(l.primary_lock, l.lock_version) txnInfos[l.lock_version] = commitTS key = l.key if startKey is None or key < startKey: startKey = key if endKey is None or key > endKey: endKey = key logger.debug('CleanUp Start, db=%s,startKey=%s,endKey=%s', col, startKey, endKey) db = self.store.GetMvccDB(col) err = db.BatchResolveLock(startKey, endKey + '\1', txnInfos) logger.info("CleanUp End, db=%s, txnInfos=%s, err=%s", col, txnInfos, err) return False if err else True
def parser(self, content): if tools.check_db_position_count(): return 1 json_page = content.json() city = json_page['content']['data']['custom']['city'] if tools.check_db_position_count(city): return 2 # tools.lock_ logger.info(f'<{self.name}>: \n-' f' 解析页面 {unquote_plus(content.request.url)} ') infos = json_page['content']['data']['page']['result'] for info_tag in infos: position_info = { 'position_name': info_tag['positionName'], 'city': info_tag['city'], 'salary': info_tag['salary'], } self.data_queue.put(position_info) return 0
def sendMsgOutTime(self, phone, taskid): if self.adminInfo.getSgw() == 0: return self.adminInfo.getAdminInfo() rand = (int)(random.random() * 1000000000) #self.adminInfo.printAdminInfo() for v in self.adminInfo.userList: #在提醒时间内,且与上次发送时间间隔超过self.interval 分钟 ,发送短信 if not self.cpt.isTimeSlotProhibit(v.get("time")) and (v.get("lastSendTime") == 0 or \ (time.time() - v.get("lastSendTime") >= self.interval*60)): v["lastSendTime"] = time.time() param = [] param.append(v.get("name")) param.append(time.strftime("%H:%M:%S", time.localtime())) param.append(phone) param.append(taskid) self.post( self.buildTXsmsMsg(param, 182702, v.get("phone"), rand), "utf-8", rand) else: logger.info("sendMsg check false: inslot:%d lastSendTime:%d now:%d phone:%s"%\ (self.cpt.isTimeSlotProhibit(v.get("time")), v.get("lastSendTime"), \ time.time(), v.get("phone")))
def request(self, url: str, city: str, retry_times: int): if tools.check_db_position_count(): return 1 if tools.check_db_position_count(city): return 2 if retry_times <= settings.RETRY_TIMES: # tools.lock_ logger.info(f'<{self.name}>: \n- 开始下载 {url} ...') content = self.session.get( url=url, headers=self.headers, cookies=self.session.cookies, # proxies=settings.PROXY ) # 适当降低频率 time.sleep(settings.REQUEST_GAP) if content.json()['state'] == 1: # tools.lock_ logger.info(f'<{self.name}>: \n- 页面{url}下载完成!') self.page_queue.put(content) else: # tools.lock_ logger.warning( f'<{self.name}>: ' f'\n- 未获取到目标页面 {url},正在更换cookies重试') self.change_cookie() self.urls_queue.put((0, { 'url': unquote_plus(content.request.url), 'city': city, 'retry_times': retry_times + 1})) else: # tools.lock_ logger.warning( f'<{self.name}>:' f'\n- 重复请求 {url} 超过{settings.RETRY_TIMES}次,' f'放弃该页面') return 0
def run(self): # tools.lock_ logger.info(f'启动页面解析线程<{self.name}>') # 当收集数据未达到目标值时,执行循环 while True: try: page_info = self.page_queue.get() # 有可能存在所有页面数据不够需求数量 if page_info is None: break break_flag = self.parser(page_info) # 全部收集完成,则退出 if break_flag == 1: break # 当前城市收集完成,则跳过 if break_flag == 2: continue except Exception as e: # tools.lock_ logger.error(f'<{self.name}>: \n- 解析页面出错: {e}') finally: self.page_queue.task_done() # tools.lock_ logger.info(f'页面解析线程<{self.name}>结束')
def Commit(self, connID=0): ''' execute a 2pc commit to kv-server. if 2pc failed, cleanup txn. @param connID: int @return: BaseError None : success ErrInvalidTxn: txn is invalid. ErrKeyExists: if mutation op is Insert and the key already exists. ErrRetry: suggests that client may restart the txn again, and this txn is abort. ''' logger.debug('*** 2PC Start *** con=%d,startTS=%d', connID, self.startTS) err = self.twoPhaseCommit(connID) if err is not None: err1 = self.cleanup() if err1 != nil: logger.error("con:%d 2PC cleanup err: %s, tid: %d" % (connID, err1.ERROR(), self.startTS)) else: logger.info("con:%d 2PC clean up done, tid: %d", connID, self.startTS) self.Close() logger.debug('*** 2PC End *** con=%d,startTS=%d,commitTS=%d,err=%s', connID, self.startTS, self.commitTS, err) return err
def Rt_meas_manul(self): if(self.now_time <= self.ref_time + self.t1 and self.t1_set_flag==0): logger.info("******t1******") self.sys_state.setText(u"响应前期试验中。。。") self.open_flow(qmdz_const.t1_gas) self.t1_set_flag = 1 elif(self.now_time > self.ref_time + self.t1 and self.now_time <= self.ref_time + self.t1 + self.t2 and self.t2_set_flag==0): logger.info("******t2******") self.sys_state.setText(u"响应后期试验中。。。") self.open_flow(qmdz_const.t2_gas) self.t2_set_flag = 1 elif(self.now_time > self.ref_time + self.t1 + self.t2 and self.now_time <= self.ref_time + self.t1 + self.t2 + self.t3 and self.t3_set_flag == 0): logger.info("******t3******") self.sys_state.setText(u"恢复前期试验中。。。") self.open_flow(qmdz_const.t3_gas) self.t3_set_flag = 1 elif(self.now_time > self.ref_time + self.t1 + self.t2 + self.t3 and self.now_time <= self.ref_time + self.t1 + self.t2 + self.t3 + self.t4 and self.t4_set_flag == 0): logger.info("******t4******") self.sys_state.setText(u"恢复后期试验中。。。") self.open_flow(qmdz_const.t4_gas) self.t4_set_flag = 1 elif(self.now_time > self.ref_time + self.t1 + self.t2 + self.t3 + self.t4): self.stop_all_flow() self.ref_time = int(time.time()) self.t1_set_flag = 0 self.t2_set_flag = 0 self.t3_set_flag = 0 self.t4_set_flag = 0 self.test_times += 1 self.res_high = self.dataY[self.temp_index][-1] self.res_low = self.dataY[self.temp_index][self.t1 + self.t2] self.Sens.append(self.res_high/self.res_low) print "Senstive:",self.Sens self.get_resistance()
def get_inst_info(self): self.devinfo = self.inst.query("*IDN?") logger.info(self.devinfo) return self.devinfo
return line if task_type == 'single': file_name = config.get('single_file_conf','in_file') if file_name is '' : print 'please config in_file name!' logger.error('conf error: in_file name is null') sys.exit(1) in_file = data_dir+'/'+file_name print 'in_file path:'+in_file logger.info('in_file path:'+in_file) print func_read(in_file,separator) if task_type == 'multi': if file_pattern is '' : print 'please config file_pattern!' logger.error('conf error: pattern error') sys.exit(1) all_files = [i for i in os.listdir(data_dir)] file_pattern = '^'+file_pattern+'$' files = [i.string for i in [re.match(file_pattern,j) for j in all_files ] if i] #if i ? files.sort() cnt = 0 for fl in files: cnt += cnt
from mylog import logger logger.warning('I warn you.') logger.info('Please be informed that this is just a test.') logger.error('Found error on your smile.')
def on_log_state_toggled(self, checked): """ Slot documentation goes here. """ # TODO: not implemented yet logger.info("log!")
def on_f3_open_toggled(self, checked): if checked: logger.info("f3 open") else: logger.info("f3 close")
def on_f2_open_toggled(self, checked): if checked: logger.info("f2 open") else: logger.info("f2 close")
def on_f1_open_toggled(self, checked): if checked: logger.info("f1 open") else: logger.info("f1 close")