def voiceRecord(): rate = 44100 duration = 5 targetAudio = recorder.rec(int(duration * rate), samplerate=rate, channels=2) recorder.wait() write(audioName, rate, targetAudio) encodedAudioString = encodeBase64(audioName) # audioAnalyse() log = "[ Date: " + getDateAndTime("date") \ + ", Time: " + getDateAndTime("time") \ + ", ID: " + stringGenerator(9) \ + ", EncodedAudioString: " + asString(encodedAudioString, "double") \ + " ]" logTitle = "audio_analyse_" + getDateAndTime( "date") + "_" + getDateAndTime("time") + ".log" return createLogFile("audio_analyse_logs", logTitle, log)
def Logging(): log = "[ Date: " + getDateAndTime("date") \ + ", Time: " + getDateAndTime("time") \ + ", IPAddress: " + getHostIPaddress() \ + ", ID: " + stringGenerator(9) + " ]" return storeData("record.log", log)
def PostWord(): datas = request.json data = datas[0] # get word targetWord = str(data["word"]) # get voice voice = data["currentVoice"] if voice == "0": DefaultTTS(data) else: TTS(targetWord) space = " " # save log log = "[\n" + space + "Date: " + asString(getDateAndTime("date"), "double") \ + ", \n" + space + "Time: " + asString(getDateAndTime("time"), "double") \ + ", \n" + space + "IPAddress: " + getHostIPaddress() \ + ", \n" + space + "ID: " + asString(stringGenerator(9), "double") \ + ", \n" + space + "ServerHost: " + asString(request.host, "double") \ + ", \n" + space + "RequestedAPI: " + asString(request.path, "double") \ + ", \n" + space + "RequestMethod: " + asString(request.method, "double") \ + ", \n" + space + "Origin: " + asString(request.origin, "double") \ + ", \n" + space + "TargetMessage: " + asString(targetWord, "double") \ + ", \n" + space + "Voice: " + asString(voice, "double") \ + "\n]" resp = jsonify(success=True) StoreHistory(targetWord) return str(resp.status_code) + ": " + storeData("api_request.log", log)
def OnFinishRegister(self): self.new_register.Enable(True) # self.finish_register.Enable(False) self.cap.release() self.bmp.SetBitmap(wx.Bitmap(self.pic_index)) if self.flag_registed == True: dir = PATH_FACE + '{}_{}_{}_{}'.format(str(self.id), self.unit, self.name, self.gender) for file in os.listdir(dir): os.remove(dir + '/' + file) print('已删除已录入人脸的图片', dir + '/' + file) os.rmdir(dir) print('已删除已录入人脸的姓名文件夹', dir) self.initData() return if self.pic_num > 0: # face_descriptor = prepare_one_facebank(conf, # learner.model, # mtcnn, # PATH_FACE + '{}_{}_{}_{}'.format(str(self.id), self.unit, self.name, self.gender)) # insert to database self.insertRow([self.id, self.unit, self.name, self.gender], 1) self.infoText.AppendText('[{}]数据已保存:{}\r\n'.format( getDateAndTime(hms=1), self.name)) self.OnUpdateRegister() else: os.rmdir(dir) print('已删除空文件夹', dir) self.initData()
def OnUpdateRegister(self): self.initDatabase(update=1) self.targets, self.names = prepare_facebank(conf, learner.model, mtcnn, True) for i in range(len(self.targets)): id_, unit, name, gender = self.names[i + 1].split('_') self.insertRow([id_, unit, name, gender], 1) print('人脸数据更新成功') self.infoText.AppendText('[%s]' % getDateAndTime(hms=1) + '人脸数据库已更新\r\n') self.loadDataBase(1)
def initInfoText(self): # 少了这两句infoText背景颜色设置失败,莫名奇怪 resultText = wx.StaticText(parent=self, pos=(10, 20), size=(90, 60)) resultText.SetBackgroundColour('red') self.info = '\r\n' + '[%s]' % getDateAndTime(hms=1) + '初始化成功\r\n' # 第二个参数水平混动条 self.infoText = wx.TextCtrl(parent=self, size=(320, 500), style=(wx.TE_MULTILINE | wx.HSCROLL | wx.TE_READONLY)) # 前景色,也就是字体颜色 self.infoText.SetForegroundColour('ORANGE') self.infoText.SetLabel(self.info) # API:https://www.cnblogs.com/wangjian8888/p/6028777.html # 没有这样的重载函数造成'par is not a key word',只好Set font = wx.Font() font.SetPointSize(12) font.SetWeight(wx.BOLD) font.SetUnderlined(True) self.infoText.SetFont(font) self.infoText.SetBackgroundColour('TURQUOISE')
def StoreHistory(word): now = datetime.datetime.now() hr = "" min = "" if len(str(now.minute)) == 1: min = "0" + str(now.minute) else: min = str(now.minute) if len(str(now.hour)) == 1: hr = "0" + str(now.hour) else: hr = str(now.hour) historyData = '{' \ + '"message": ' + '"' + word + '"' \ + ', "date": ' + asString(getDateAndTime("date"), "double") \ + ', "time": ' + '"' + str(hr) + ":" + str(min) + '"' \ + '},' sh = open("history.log", "a") sh.write(historyData) sh.close()
def main(arguments): parser = argparse.ArgumentParser( description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument('-l', '--launch_directory', action=readable_dir, default="") parser.add_argument( '-f', '--ftp', action=readable_dir, default="/ebi/ftp/pub/databases/metabolights/compounds/", help="FTP directory") args = parser.parse_args(arguments) global destinationDirectory global ftp workingDirectory = args.launch_directory ftp = args.ftp if (workingDirectory == ""): workingDirectory = os.getcwd() # log file configuration st = utils.getDateAndTime() randomInt = str(randint(1, 1000)) logDirectory = workingDirectory + "/logs/exporter_" + st if not os.path.exists(logDirectory): os.makedirs(logDirectory) logging.basicConfig(filename=logDirectory + "/log_" + randomInt + ".log", level=logging.DEBUG) utils.init(logging) logging.info("-----------------------------------------------") logging.info('# Run started -' + utils.getDateAndTime()) metabolightsFlagsJSONFile = ftp + "ml_flags.json" with open(metabolightsFlagsJSONFile) as flags_file: metabolightsFlagsData = json.load(flags_file) query = "" for metabolite in metabolightsFlagsData: has_species = int( str(metabolightsFlagsData[metabolite]["flags"] ["hasSpecies"]).lower() == "true") has_pathways = int( str(metabolightsFlagsData[metabolite]["flags"] ["hasPathways"]).lower() == "true") has_reactions = int( str(metabolightsFlagsData[metabolite]["flags"] ["hasReactions"]).lower() == "true") has_nmr = int( str(metabolightsFlagsData[metabolite]["flags"]["hasNMR"]).lower() == "true") has_ms = int( str(metabolightsFlagsData[metabolite]["flags"]["hasMS"]).lower() == "true") has_literature = int( str(metabolightsFlagsData[metabolite]["flags"] ["hasLiterature"]).lower() == "true") query += "update mmimtbldev.isatab.ref_metabolite set has_species = " + str( has_species) + ", has_pathways = " + str( has_pathways) + ", has_reactions = " + str( has_reactions ) + ", has_nmr= " + str(has_nmr) + ", has_ms= " + str( has_ms) + ", has_literature= " + str( has_literature) + " where acc = '" + metabolite.strip( ) + "';" + "\n" file = open(workingDirectory + "/query.txt", "w") file.write(query)
def OnChangeTimeClicked(self, event): # cur_hour = datetime.datetime.now().hour # print(cur_hour) # if cur_hour>=8 or cur_hour<6: # wx.MessageBox(message='''您错过了今天的签到时间,请明天再来\n # 每天的签到时间是:6:00~7:59''', caption='警告') # return # pass pw = wx.GetPasswordFromUser('请输入管理密码', caption="修改", default_value="", parent=self.bmp) if pw == '': print('取消修改') elif pw != PASSWORD: wx.MessageBox(message='密码错误,退出修改', caption='警告') else: # 输入修改时间,如果输入为空,则取消修改,如果输入格式错误,则重新修改 flag = 1 time_type = ['签到时间', '签退时间'] index = -1 while flag and index == -1: # 选择时间 choice_time_type = wx.SingleChoiceDialog( parent=self.bmp, message='请选择您要修改的时间', caption='修改', choices=time_type, ) if choice_time_type.ShowModal() == wx.ID_CANCEL: print('取消修改') flag = 0 break else: index = choice_time_type.GetSelection() # break # 输入时间 # TODO 确认是否更新今日签到记录 while flag and index != -1: change_time = wx.GetTextFromUser( message='请输入修改的时间(格式HH:MM:SS)', caption='修改{}'.format(time_type[index]), default_value=self.puncard_time[index], parent=self.bmp) if change_time.strip( ) == '' or change_time == self.puncard_time[index]: self.infoText.AppendText('[{}]取消修改(上一次为:{})\r\n'.format( getDateAndTime(hms=1), self.puncard_time[index])) print('取消修改') index = -1 break else: try: # 判断时间大小 changed = datetime.datetime.strptime( change_time, '%H:%M:%S') original = datetime.datetime.strptime( self.puncard_time[1 - index], '%H:%M:%S') if changed > original if index else changed < original: self.puncard_time[index] = change_time np.save(conf.data_path / 'time.npy', self.puncard_time) self.infoText.AppendText('[{}]{}已修改:{}\r\n'.format( getDateAndTime(hms=1), time_type[index], self.puncard_time[index])) print('修改成功%s' % self.puncard_time[index]) index = -1 break else: print('修改错误:签到时间需早于签退时间') wx.MessageBox(message='签到时间需早于签退时间,请重新输入', caption='警告') except ValueError: print('格式错误(格式为HH:MM:SS)') wx.MessageBox(message='格式错误(格式为HH:MM:SS)', caption='警告')
def punchcard_cap(self, event): self.cap = cv2.VideoCapture(0) # cap是否初始化成功 flag_print_info = [1, 1] flag_repeat = 0 while self.cap.isOpened() and not self.start_punchcard.IsEnabled(): flag, im_rd = self.cap.read() # 检测人脸 try: image = Image.fromarray(im_rd[..., ::-1]) # bgr to rgb bboxes, faces = mtcnn.align_multi(image, 2, conf.min_face_size) bboxes = bboxes[:, : -1] # shape:[10,4],only keep 10 highest possibiity faces bboxes = bboxes.astype(int) bboxes = bboxes + [-1, -1, 1, 1] # personal choice results, score = learner.infer(conf, faces, self.targets, True) # print(results[0]) except: continue # cv2.putText(im_rd, getDateAndTime(), (10, 50), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 2, # cv2.LINE_AA) # img_height, img_width = im_rd.shape[:2] # image_ = cv2.cvtColor(im_rd, cv2.COLOR_BGR2RGB) # pic = wx.Bitmap.FromBuffer(img_width, img_height, image_) # self.bmp.SetBitmap(pic) # print('请对准摄像头') # # 取占比最大的脸 biggest_face = bboxes[0] if results[0] == -1: cv2.rectangle(im_rd, (biggest_face[0], biggest_face[1]), (biggest_face[2], biggest_face[3]), (0, 0, 255), 2) cv2.putText(im_rd, 'Unknown', (biggest_face[0], biggest_face[1]), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 2, cv2.LINE_AA) cv2.putText(im_rd, getDateAndTime(), (10, 50), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 255, 255), 2, cv2.LINE_AA) img_height, img_width = im_rd.shape[:2] image_ = cv2.cvtColor(im_rd, cv2.COLOR_BGR2RGB) pic = wx.Bitmap.FromBuffer(img_width, img_height, image_) # 显示图片在panel上 self.bmp.SetBitmap(pic) else: for j, logcat_id in enumerate(self.logcat_id): # 名字一样且当日日期一样,判定为签退 # print(logcat_id==self.knew_id[results[0]]) # print(getDateAndTime(ymd=True) == self.logcat_datetime_in[j][0:self.logcat_datetime_in[j].index(' ')]) same_id = logcat_id == self.knew_id[results[0]] same_day = getDateAndTime( ymd=1) == self.logcat_datetime_in[j][ 0:self.logcat_datetime_in[j].index(' ')] if same_id and same_day: delta_seconds = (datetime.datetime.now() - datetime.datetime.strptime( self.logcat_datetime_in[j], '%Y-%m-%d %H:%M:%S')).seconds if delta_seconds > 3 * 3: # 间隔一段时间才能签退 after_check_out_time = int( getDateAndTime(hms=1).replace(':', '')) >= int( self.puncard_time[1].replace(':', '')) self.updateRow([ getDateAndTime(), '否' if after_check_out_time else '是', self.logcat_datetime_in[j], self.knew_name[results[0]] ], 1) if flag_print_info[1]: self.infoText.AppendText( '[{}]已签退:{}\r\n'.format( getDateAndTime(hms=1), self.knew_name[results[0]])) flag_print_info[1] = 0 else: if flag_print_info[0]: self.infoText.AppendText( '[{}]重复签到:{}\r\n'.format( getDateAndTime(hms=1), self.knew_name[results[0]])) flag_print_info[0] = 0 flag_repeat = 1 # color = (0, 255, 0) if self.logcat_late[j] == '否' else (0, 0, 255) if not flag_repeat: # 判断此刻的时间是否在签到时间之前 before_check_in_time = int( getDateAndTime(hms=1).replace(':', '')) <= int( self.puncard_time[0].replace(':', '')) self.infoText.AppendText('[{}]{}: {}\r\n'.format( getDateAndTime(hms=1), '已签到' if before_check_in_time else '迟到了', self.knew_name[results[0]])) self.insertRow([ self.knew_id[results[0]], self.knew_unit[results[0]], self.knew_name[results[0]], getDateAndTime(), '否' if before_check_in_time else '是', '-', '-' ], 2) # color = (0, 255, 0) if condition else (0, 0, 255) # flag_print_info = 0 # rectangle()绘制矩形框 # utText(): 图像,文字内容, 坐标 ,字体,大小,颜色,字体厚度 cv2.rectangle(im_rd, (biggest_face[0], biggest_face[1]), (biggest_face[2], biggest_face[3]), (0, 0, 255), 2) cv2.putText(im_rd, 'No. %d' % self.knew_id[results[0]], (biggest_face[0], biggest_face[1]), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 2, cv2.LINE_AA) cv2.putText(im_rd, getDateAndTime(), (10, 50), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 255, 255), 2, cv2.LINE_AA) img_height, img_width = im_rd.shape[:2] image_ = cv2.cvtColor(im_rd, cv2.COLOR_BGR2RGB) pic = wx.Bitmap.FromBuffer(img_width, img_height, image_) # 显示图片在panel上 self.bmp.SetBitmap(pic) self.loadDataBase(2) self.cap.release() self.bmp.SetBitmap(wx.Bitmap(self.pic_index)) _thread.exit()
def register_cap(self, event): self.cap = cv2.VideoCapture(0) # cap是否初始化成功 while self.cap.isOpened(): # cap.read() # 返回两个值: # 一个布尔值true/false,用来判断读取视频是否成功/是否到视频末尾 # 图像对象,图像的三维矩阵 flag, im_rd = self.cap.read() # 每帧数据延时1ms,延时为0读取的是静态帧 cv2.waitKey(1) if flag: # 检测人脸 detect = 0 try: image = Image.fromarray(im_rd[..., ::-1]) # bgr to rgb bboxes, faces = mtcnn.align_multi(image, 1, conf.min_face_size) bboxes = bboxes[:, : -1] # shape:[10,4],only keep 10 highest possibiity faces bboxes = bboxes.astype(int) bboxes = bboxes + [-1, -1, 1, 1] # personal choice # # 取占比最大的脸 biggest_face = bboxes[0] # 绘制矩形框 cv2.rectangle(im_rd, (biggest_face[0], biggest_face[1]), (biggest_face[2], biggest_face[3]), (0, 0, 255), 3) cv2.putText(im_rd, getDateAndTime(), (10, 50), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 2, cv2.LINE_AA) img_height, img_width = im_rd.shape[:2] image_ = cv2.cvtColor(im_rd, cv2.COLOR_BGR2RGB) pic = wx.Bitmap.FromBuffer(img_width, img_height, image_) # 显示图片在panel上 self.bmp.SetBitmap(pic) detect = 1 except: print('检测错误') # 识别 if detect: # try: results, score = learner.infer(conf, faces, self.targets, True) print('识别结果:{}({})'.format(self.knew_name, results[0])) if results[0] != -1: self.infoText.AppendText('[{}]人脸已注册:{}\r\n'.format( getDateAndTime(hms=1), self.knew_name[results[0]])) self.flag_registed = True self.OnFinishRegister() _thread.exit() cv2.imencode( '.jpg', np.array(faces[0])[..., ::-1])[1].tofile( PATH_FACE + '{}_{}_{}_{}'.format(str(self.id), self.unit, self.name, self.gender) + '/{}.jpg'.format(self.pic_num)) # cv2.imwrite(PATH_FACE + self.name+'/{}.jpg'.format(self.pic_num), np.array(faces[0])[..., ::-1]) self.pic_num += 1 print( '写入本地:', PATH_FACE + self.name + '/{}.jpg'.format(self.pic_num)) self.infoText.AppendText('[{}]数据已采集:{}{}张图像\r\n'.format( getDateAndTime(hms=1), self.name, self.pic_num)) cv2.waitKey(5) # except: # print('请对准摄像头') if self.new_register.IsEnabled(): _thread.exit() if self.pic_num == 5: self.OnFinishRegister() _thread.exit()
def main(arguments): parser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument('-l', '--launch_directory', action=readable_dir, default = "" ) parser.add_argument('-w', '--destination', action=readable_dir, help="Output directory", default="/nfs/www-prod/web_hx2/cm/metabolights/prod/reference/") parser.add_argument('-c', '--compound', help="- MetaboLights Compound Identifier", default="all") parser.add_argument('-s', '--stars', help="- Compounds import level", default="0") parser.add_argument('-f', '--ftp', action=readable_dir, default="/ebi/ftp/pub/databases/metabolights/compounds/", help="FTP directory") parser.add_argument('-p', '--process', action=readable_dir, default="false", help="Use parallel threads") args = parser.parse_args(arguments) global workingDirectory global destinationDirectory global requestedCompound global ftp global importLevel batch = 10 workingDirectory = args.launch_directory destinationDirectory = args.destination requestedCompound = args.compound.replace('"','') ftp = args.ftp importLevel = args.stars parallelProcessing = args.process if(workingDirectory == ""): workingDirectory = os.getcwd(); # log file configuration st = utils.getDateAndTime(); randomInt = str(randint(1, 1000)) logDirectory = workingDirectory + "/logs/" + st if not os.path.exists(logDirectory): os.makedirs(logDirectory) logging.basicConfig(filename= logDirectory + "/log_" +randomInt +".log",level=logging.DEBUG) utils.init(logging) logging.info("-----------------------------------------------") logging.info('# Run started -' + utils.getDateAndTime()) logging.info('Reading MetaboLights Study - Compound Mapping file') global mlSCMappingFile mlSCMappingFile = ftp + "mapping.json" logging.info('Reading Reactome data') global reactomeJSONFile reactomeJSONFile = ftp + "reactome.json" with open(reactomeJSONFile) as reactome_file: global reactomeData reactomeData = json.load(reactome_file) with open(mlSCMappingFile) as mapping_file: global mlMapping mlMapping = json.load(mapping_file) if (requestedCompound != "all") : # metabolightsFlagsJSONFile = ftp + "ml_flags.json" # with open(metabolightsFlagsJSONFile) as flags_file: # metabolightsFlagsData = json.load(flags_file) # for metabolite in metabolightsFlagsData: # print metabolite # logging.info("-----------------------------------------------") # logging.info("Fetching compound: " + metabolite) # if metabolightsFlagsData[metabolite]['rating'] <= int(importLevel): utils.fetchCompound(requestedCompound.strip(), workingDirectory, destinationDirectory, reactomeData, mlMapping) else: requestCompoundsList = utils.fetchMetaboLightsCompoundsList() for compound in requestCompoundsList: logging.info("-----------------------------------------------") logging.info("Fetching compound: " + compound) try: utils.fetchCompound(compound.strip(), workingDirectory, destinationDirectory, reactomeData, mlMapping) except: logging.info("Error: " + compound) pass
def main(arguments): parser = argparse.ArgumentParser( description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument('-l', '--launch_directory', action=readable_dir, default="") parser.add_argument( '-w', '--destination', action=readable_dir, help="Output directory", default="/nfs/www-prod/web_hx2/cm/metabolights/prod/reference/") parser.add_argument( '-f', '--ftp', action=readable_dir, default="/ebi/ftp/pub/databases/metabolights/compounds/", help="FTP directory") args = parser.parse_args(arguments) global workingDirectory global destinationDirectory global ftp global globalReport workingDirectory = args.launch_directory destinationDirectory = args.destination ftp = args.ftp if (workingDirectory == ""): workingDirectory = os.getcwd() # log file configuration st = utils.getDateAndTime() randomInt = str(randint(1, 1000)) logDirectory = workingDirectory + "/logs/exporter_" + st if not os.path.exists(logDirectory): os.makedirs(logDirectory) logging.basicConfig(filename=logDirectory + "/log_" + randomInt + ".log", level=logging.DEBUG) utils.init(logging) logging.info("-----------------------------------------------") logging.info('# Run started -' + utils.getDateAndTime()) fieldnames = [ "MetabolightsId", "InChIKey", "HasPathways", "KEGGPathways", "WikiPathways", "ReactomePathways" ] rows = [] requestCompoundsList = utils.fetchMetaboLightsCompoundsList() i = 0 for compound in requestCompoundsList: i = i + 1 print str(i) + "-" + str(len(requestCompoundsList)) compoundURL = "http://www.ebi.ac.uk/metabolights/webservice/beta/compound/" + compound response = urllib2.urlopen(compoundURL) compoundData = json.loads(response.read()) row = {} chebiString = compound.replace("MTBLC", "CHEBI:") row["MetabolightsId"] = compound row["InChIKey"] = compoundData['inchiKey'] row["HasPathways"] = int( compoundData["flags"]["hasPathways"] == 'true') row["KEGGPathways"] = "" row["WikiPathways"] = "" row["ReactomePathways"] = "" try: if (compoundData["flags"]["hasPathways"]): for resource in compoundData["pathways"]: if resource == "WikiPathways": for species in compoundData["pathways"][resource]: for pathway in compoundData["pathways"][resource][ species]: if row[resource] != "": row[resource] = row[ resource] + "," + pathway["id"] else: row[resource] = pathway["id"] elif resource == "ReactomePathways": for species in compoundData["pathways"][resource]: for pathway in compoundData["pathways"][resource][ species]: if row[resource] != "": row[resource] = row[ resource] + "," + pathway["reactomeId"] else: row[resource] = pathway["reactomeId"] else: for pathway in compoundData["pathways"][ "KEGGPathways"]: if row[resource] != "": row[resource] = row[resource] + "," + pathway[ "KO_PATHWAY"] else: row[resource] = pathway["KO_PATHWAY"] except: logging.error(compound) rows.append(row) with open('data/metabolites_inchikey.tsv', 'w') as csvfile: writer = csv.DictWriter(csvfile, delimiter='\t', fieldnames=fieldnames) writer.writeheader() for row in rows: writer.writerow(row)
def main(arguments): parser = argparse.ArgumentParser( description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument('-l', '--launch_directory', action=readable_dir, default="") parser.add_argument( '-w', '--destination', action=readable_dir, help="Output directory", default="/nfs/www-prod/web_hx2/cm/metabolights/prod/reference/") parser.add_argument( '-f', '--ftp', action=readable_dir, default="/ebi/ftp/pub/databases/metabolights/compounds/", help="FTP directory") args = parser.parse_args(arguments) global workingDirectory global destinationDirectory global ftp global globalReport workingDirectory = args.launch_directory destinationDirectory = args.destination ftp = args.ftp if (workingDirectory == ""): workingDirectory = os.getcwd() # log file configuration st = utils.getDateAndTime() randomInt = str(randint(1, 1000)) logDirectory = workingDirectory + "/logs/exporter_" + st if not os.path.exists(logDirectory): os.makedirs(logDirectory) logging.basicConfig(filename=logDirectory + "/log_" + randomInt + ".log", level=logging.DEBUG) utils.init(logging) logging.info("-----------------------------------------------") logging.info('# Run started -' + utils.getDateAndTime()) requestCompoundsList = utils.fetchMetaboLightsCompoundsList() for compound in requestCompoundsList: logging.info("-----------------------------------------------") try: logging.info("Exporting: " + compound) tempCompoundReport = { "rating": 5, "flags": { "hasInchiKey": False, "hasLiterature": False, "hasReactions": False, "hasNMR": False, "hasSpecies": False, "hasMS": False, "hasPathways": False, "has3d": False } } filePath = destinationDirectory + compound + "/" + compound + "_data.json" tempCompoundReport = checkIfFileEmptyOrNotExist( filePath, tempCompoundReport) if tempCompoundReport["rating"] != 0: tempCompoundReport = setFlags(filePath, tempCompoundReport) else: logging.warning("WARNING: Missing data - " + compound) globalReport[compound] = tempCompoundReport except: logging.warning("Error: " + compound) pass utils.writeDataToFile(ftp + "ml_flags.json", globalReport)