def __init__(self, isOpen=True, saveInfo=True, saveWarning=True, saveError=True, logDirName="log"): self.saveInfo = saveInfo self.saveError = saveError self.saveWarning = saveWarning # 获取当前"运行"目录 logDir = os.getcwd() + "/" + logDirName + "/" if not os.path.exists(logDir): os.mkdir(logDir) timeFormat = "%Y_%m_%d_%H_%M_%S" timeStr = time.strftime(timeFormat, time.localtime()) fileNameStr = self.__get_parent_filename() fileNameStr = os.path.basename(fileNameStr) fileNameStr, ext = os.path.splitext(fileNameStr) self.infoFilename = logDir + "INFO_" + fileNameStr + "_" + timeStr + ".log" self.infoNowName = logDir + "INFO_" + fileNameStr + ".log" self.errorFilename = logDir + "ERROR_" + fileNameStr + "_" + timeStr + ".log" self.errorNowName = logDir + "ERROR_" + fileNameStr + ".log" self.warningFilename = logDir + "WARNING_" + fileNameStr + "_" + timeStr + ".log" self.warningNowName = logDir + "WARNING_" + fileNameStr + ".log" self.fileHelper = FileHelper() self.isOpen = isOpen return
def run_process(self): if self._config.cleanup: try: if self._config.cleanuplogs: FileHelper().remove_all_temp_files(prefix_list=["_ags", "Context_BP_"]) else: FileHelper().remove_all_temp_files(prefix_list=["_ags", "Context_BP_"], exclude=".log") FileHelper().remove_all_temp_files(file_ext="sde") except Exception as e: self.errorlog(str(e))
def __init__(self): self.settings = Settings() try: self._excel = openpyxl.load_workbook(self.settings.my_path + '\\' + self.settings.active_xl) self._sheet = self._excel.active except IOError: # Need to create an excel sheet. FileHelper.create_current(self.settings) self._excel = openpyxl.load_workbook(self.settings.my_path + '\\' + self.settings.active_xl) self._sheet = self._excel.active self.gen_dates() if self._new_sheet(): Popups.full_warning()
def publish(self): with ProcessMonitor() as pm: try: ReportMissingAssets().execute_validation() except Exception as e: self.errorlog(e.message) SendEmail("ProcessFailure").send_email_with_files([self._logger.log_file], "CSV Validation Failure", "The CSV to GIS validation failed. Please review attached log file. The FWP has not been published") pm.log_failure("CSV Validation Failure", e.message) return try: for i in [PublishFactoryEnum.FWP, PublishFactoryEnum.GEODB, PublishFactoryEnum.RAMM]: PublishHelperFactory.factory(i).publish() except Exception as e: self.errorlog(e.message) SendEmail("ProcessFailure").send_email_with_files([self._logger.log_file], "%s Publishing Failure" % i, "The %s publish failed. Please review attached log file." % i) pm.log_failure("%s Publishing Failure" % i, e.message) return pm.log_success("Full Publish Process") # once a month clean up the temp directory if datetime.datetime.now().day == 1: self.log("Time of the month to remove all _ags files.") FileHelper().remove_all_temp_files() self.log("{0} {1} {0}".format("=" * 15, "Process has completed", "=" * 15))
def divideIntoChunks(self,filenameGenerator): # input : a file with one input/line # output : chunks with one word/lines (chunks == give size) stackOfValues = [] counter = 0; for InputfileName in self.files: filePointer = open(InputfileName, "r") for line in filePointer: stackOfValues.append(line) if(sys.getsizeof(stackOfValues)>536870912): # more than 64Mo FileHelper.writeListInFile(filenameGenerator(counter),stackOfValues) counter = counter + 1 filePointer.close() if len(stackOfValues) > 0: # Just in case ... FileHelper.writeListInFile(filenameGenerator(counter),stackOfValues) counter = counter + 1; self.nChunks = counter return
def publish(self): #get the latest RAMM data - add to file geodatabase and run model # TODO put the shelf into common config s = None try: s = shelve.open( "%s%s%s" % (TempFileName.get_temp_directory(), "/", self._config.shelf)) except: raise Exception("Cannot publish RAMM FWP due to missing data") file_geodb = None if 'fwpgeodb' in s: file_geodb = s['fwpgeodb'] s.close() if file_geodb: with FileGeodatabaseHelper() as file_geodb_helper: file_geodb_helper.clean_up = False # debug file_geodb_helper.current_file_geodb = file_geodb with ArcGISOnlineHelper() as agol_helper: agol_helper.download_hosted_feature_service_layer( self._config.rammfwpdata, file_geodb, self._config.output_fc) with ProximityLayerCreator(file_geodb) as proximity_creator: new_file_geodb = proximity_creator.create_proximity_layer( self._config.output_fc) zip_file = TempFileName.generate_temporary_file_name( ".zip") ZipArchive.ZipArchive(zip_file, new_file_geodb) publishparams = json.dumps(self._config.publishparams) item_id, service_id = agol_helper.publish_file_item( "WDC_RAMM_FWP", "File Geodatabase", "fileGeodatabase ", zip_file, "filegeodbkeywords", publish_params={"publishParameters": publishparams}) # dont pass through a dictonary - pass through a str for the vals - use eval maybe to ensure quotes! # depending on the group type we may have to do one or the other - so I'm just going to do both if service_id: agol_helper.share_items("%s,%s" % (item_id, service_id), groups=self._config.fwpgroup) else: agol_helper.share(item_id, groups=self._config.fwpgroup) FileHelper().delete_file( "%s%s%s" % (TempFileName.get_temp_directory(), "/", self._config.shelf)) else: raise Exception( "Cannot publish RAMM FWP due to missing data - missing key in shelf" )
def upload(): try: file_handler = bottle.request.files.get('file') name, ext = os.path.splitext(file_handler.filename) if ext not in C.File_EXT: return C.ERROR_EXT fh = FileHelper() __filepath = fh.get_file_path(file_handler.filename) file_handler.save(__filepath) e_rw = ExcelRW() e_rw.get_result(__filepath) return C.SUCCESS_MESSAGE_BOTTLE.format(__filepath) except Exception as e: print(str(e))
def _authenticate(self, email, password): loaded_auth = FileHelper.load_file('auth') if loaded_auth is not None: token = jwt.decode(loaded_auth["access_token"], algorithms="HS512", options={"verify_signature": False}) if email == token["sub"] and token["exp"] > time.time(): print("Using saved access-token") return loaded_auth else: print("Saved token is invalid or expired", token) content = { 'email': email, 'password': password } auth = requests.post( BringAPI.BASE_URL + 'bringauth', content).json() FileHelper.save_file("auth", auth) return auth
def store_subreddit(self, subreddit): """ Writes segue to next post to file. :type subreddit: str """ if len(self.subreddits) > 1: self.bot_log('Storing subreddit.') content = '\nIn r/{}...'.format(subreddit) self.store(content) self.store(FileHelper.get_dictation_pause(1000), False)
def store_post(self, post): """ Writes subreddit post data to file. :type post: Submission """ self.bot_log('Storing post.') self.store('\n***{}***\n {}\n\n'.format(post.title, post.selftext)) self.store('[[{} - {}]]'.format(post.author, post.score), False) self.store(FileHelper.get_dictation_pause(1000), False)
def store_comment(self, comment): """ Writes comment data from subreddit post to file. :type comment: Comment """ self.bot_log('Storing comment.') self.store('\n* {}\n'.format(comment.body)) self.store('[[{} - {}]]'.format(comment.author, comment.score), False) self.store(FileHelper.get_dictation_pause(), False)
def globalGrouper(listSaveState,listGrouperNum,listLastCallNum,listOfDirectory,globalGrouperDirectory): # We read all grouper's last savestates, and put all nodefilename into a global nodefile. globalDictListNodeFile = dict() globalDictFromKeyToGlobalNodeFile = dict() for i in range(0,len(listSaveState)): saveStateName = listSaveState.pop() grouperNum = listGrouperNum.pop() grouperLastCallNum = listLastCallNum.pop() directory = listOfDirectory.pop() globalDictListNodeFile = Grouper.mergeDictionaries(globalDictListNodeFile,Grouper.readSaveStateIntoDictionnary(saveStateName.rstrip('\n'),grouperNum,grouperLastCallNum,directory)) counter = 0; for key, listOfValues in globalDictListNodeFile.iteritems(): counter = counter + 1; globalNodeFileName = Grouper.genericGlobalNodeFileName(counter,globalGrouperDirectory) globalDictFromKeyToGlobalNodeFile[key] = globalNodeFileName open(globalNodeFileName, 'w+').close(); # create empty file for nodeFileName in listOfValues: FileHelper.appendFileInFile(nodeFileName,globalNodeFileName) return globalDictFromKeyToGlobalNodeFile
def store_intro(self): """ Writes intro for the file about the subreddits. """ self.bot_log('Storing intro.') content = "Here's what's happening with " content += self.get_human_readable_subreddit_list() content += "\nLet's get started." self.store(content) self.store(FileHelper.get_dictation_pause(1000), False)
def _send_email_helper(settings, excel): """ Connects to SMTP email server and sends an email using the users credentials. :return: """ try: server = smtplib.SMTP(settings.smtp_server, str(settings.smtp_port)) server.starttls() server.login(settings.user, settings.password) dest = [str(settings.user), str(settings.dest_addr)] server.sendmail(settings.user, dest, Email._set_email(settings, excel).as_string()) server.quit() FileHelper.archive(settings, excel) excel.clear_sheet() excel.gen_dates() Popups.email_sent() except Exception: print("Send email failed.")
class BingHelper: #self.credentials = json.loads("./keys/keys.json") def __init__(self): self.fileHelper = FileHelper() self.credentials = self.fileHelper.loadJSON("./keys/keys.json") def search(self, search_string): api_key = self.credentials["bingSearchAPIKey"] endpoint = "https://knightsmartsearch.cognitiveservices.azure.com/bing/v7.0/search" params = {"q": search_string} headers = { "Content-Type": "application/x-www-form-urlencoded", "Ocp-Apim-Subscription-Key": api_key } return requests.get(endpoint, headers=headers, params=params)
def SaveBoard(self): cellActiveList = [] minSizeX = 0 minSizeY = 0 for i in range(self.Layout.VerticalLength): for j in range(self.Layout.HorizontalLength): if (self.Board[i][j] == 1): cellActiveList.append((j, i)) if (minSizeX == 0 or minSizeX < j): minSizeX = j if (minSizeY == 0 or minSizeY < i): minSizeY = i path = FileHelper.SaveFile({ "minSizeX": minSizeX, "minSizeY": minSizeY, "Active": cellActiveList })
def HandleInput(self, events, board): if (self._debounce > 0): self._debounce -= self._debounce for event in events: if event.type == pygame.QUIT: self.GameState.Done = True if event.type == pygame.MOUSEBUTTONUP: self.SelectBox(pygame.mouse.get_pos(), board) keys = pygame.key.get_pressed() if keys[pygame.K_SPACE]: if (self._debounce == 0): self._debounce = 10 self.GameState.RunMode = RunMode.MANUAL self.GameState.AllowNextStep = True if keys[pygame.K_r]: self.GameState.RunMode = RunMode.MANUAL self.GameState.AllowNextStep = False self.GameState.Reset = True if keys[pygame.K_a]: if self.GameState.RunMode == RunMode.MANUAL: self.GameState.RunMode = RunMode.AUTO else: self.GameState.RunMode = RunMode.MANUAL if keys[pygame.K_s]: board.SaveBoard() if keys[pygame.K_o]: path = FileHelper.OpenFile() board.LoadFigure(path) if self.GameState.RunMode == RunMode.AUTO: seconds = (pygame.time.get_ticks() - self.StartTicks) / 1000 if seconds >= board.Layout.AutoRunTime: self.StartTicks = pygame.time.get_ticks() self.GameState.AllowNextStep = True else: self.GameState.AllowNextStep = False
def writeDictio(self,dicFromKeyToListOfValues): # At first, we check if all keys are ascociate to a node file. # If yes, we copy the old node file # Id not, we create an empty node file # Then, we write all values in a chunk. # At the end, we append the chunkname into the correct node file # Finally, all nodes ascociated with keys which are not in the actual chunk (but already seen) are copied. for key, listOfValues in dicFromKeyToListOfValues.iteritems(): # We write the list of value into a chunk (for a given key) self.nDifferentChunks = self.nDifferentChunks+1; chunkFilename = self.chunkNameGenerator(self.nDifferentChunks) FileHelper.writeListInFile(chunkFilename,listOfValues) # We check if the NodeFile has been created before. # If yes : we copy it into a new file # If not : we create a new empty file if self.oldDictFromKeyToNodeFile.has_key(key): # the node file exist nodeFileIdx = self.oldDictFromKeyToNodeFile[key]; self.dictFromKeyToNodeFile[key] = nodeFileIdx; oldNodeFileName = self.oldNodeFileNameGenerator(nodeFileIdx); nodeFileName = self.nodeFileNameGenerator(nodeFileIdx); FileHelper.copyFile(oldNodeFileName,nodeFileName) else: self.nDifferentKeys = self.nDifferentKeys+1; nodeFileIdx = self.nDifferentKeys self.dictFromKeyToNodeFile[key] = nodeFileIdx; nodeFileName = self.nodeFileNameGenerator(nodeFileIdx); open(nodeFileName, 'w+').close(); # create empty file # We append the new chunk name (without the directory) into the node file with open(nodeFileName, 'a') as nodePointer: nodePointer.write(chunkFilename + "\n") for key, listOfValues in self.oldDictFromKeyToNodeFile.iteritems(): if not dicFromKeyToListOfValues.has_key(key): nodeFileIdx = self.oldDictFromKeyToNodeFile[key]; self.dictFromKeyToNodeFile[key] = nodeFileIdx; oldNodeFileName = self.oldNodeFileNameGenerator(nodeFileIdx); nodeFileName = self.nodeFileNameGenerator(nodeFileIdx); FileHelper.copyFile(oldNodeFileName,nodeFileName) return;
def saveUrls(self, dir, reportUrl, detailUrl): filename = os.path.join(dir, "urls.txt") if (not os.path.isfile(filename)): FileHelper.saveToFile(filename, '%s\r\b%s' % (reportUrl, detailUrl))
# -*- encoding:UTF-8 -*- import sys, os import ErrorCode from Log import Log from FileHelper import FileHelper if __name__ == "__main__": log = Log() log.info("start test FileHelper") testContent = "hello abc" fileHelper = FileHelper() rtn = fileHelper.write("hello.txt", testContent) log.check_rtn(rtn) rtn, content = fileHelper.read("hello.txt") log.check_rtn(rtn) log.assert_eq(testContent, content) testContentNew = "def" rtn = fileHelper.append("hello.txt", testContentNew) log.check_rtn(rtn) rtn, content = fileHelper.read("hello.txt") log.check_rtn(rtn) log.assert_eq(testContent + testContentNew, content) rtn = fileHelper.append("hello2.txt", testContentNew) log.check_rtn(rtn) rtn, content = fileHelper.read("hello2.txt") log.check_rtn(rtn) log.assert_eq(testContentNew, content) os.remove("hello.txt")
def saveProfile(self, profileName, dir, reportUrl, detailUrl, overallEntry): assetdir = os.path.join(dir, "files" + os.sep) if (not os.path.isdir(dir)): os.makedirs(dir) if (not os.path.isdir(assetdir)): os.makedirs(assetdir) status, page = self.downloader.download_page(reportUrl, dir, assetdir, '%s_origin.htm' % (profileName), css=False, javascript=False, image=False) #self.downloader.clear_cache() if (page != None): reporter = None reportContent = "" #headers items = page.xpath(u"//*[@id='maincontent']//article/header/hgroup/*") for item in items: header = StrHelper.trim(item.text_content()) if (header != None and header.startswith(profileName)): header = StrHelper.trim(header[len(profileName):]) reportContent += header + os.linesep break reportContent += os.linesep #content reg = re.compile(ur"^基金會編號.*$", re.MULTILINE) allsymbols = ur" ,、。.?!~$%@&#*‧;︰…‥﹐﹒˙·﹔﹕‘’“”〝〞‵′〃├─┼┴┬┤┌┐╞═╪╡│▕└┘╭╮╰╯╔╦╗╠═╬╣╓╥╖╒╤╕║╚╩╝╟╫╢╙╨╜╞╪╡╘╧╛﹣﹦≡|∣∥–︱—︳╴¯ ̄﹉﹊﹍﹎﹋﹌﹏︴﹨∕╲╱\/↑↓←→↖↗↙↘〔〕【】﹝﹞〈〉﹙﹚《》(){}﹛﹜『』「」<>≦≧﹤﹥︵︶︷︸︹︺︻︼︽︾︿﹀∩∪﹁﹂﹃﹄" regReporters = [ #re.compile(ur"[。:」\s]+(.{3,4})口述.?記者(.{3,4})(?:採訪整理)?$", re.MULTILINE), re.compile(allsymbols + ur"[\s]+(.{2,4})[口筆]述\s?.?\s?記者(.{2,4})(?:採訪整理)?$", re.MULTILINE), #[\u4e00-\u9fa5] 英文字符之外的字符,包括中文漢字和全角標點 re.compile(ur"報導.攝影.(.{2,4})記者$", re.MULTILINE), re.compile(ur"報導.攝影.(.{2,4})$", re.MULTILINE), re.compile(ur"攝影.報導.(.{2,4})$", re.MULTILINE), re.compile(ur"攝影.(.{2,4})$", re.MULTILINE), re.compile(ur"報導.(.{2,4})$", re.MULTILINE), re.compile(ur"報導.(.{2,4})$", re.MULTILINE), re.compile(ur"記者(.{2,4})採訪整理$", re.MULTILINE), re.compile(ur"^【(.{2,4})╱.{2,4}報導】", re.MULTILINE), ] #preserve <br> tags as \n brs = page.xpath(u"//div[@class='articulum']//br") if (len(brs) == 0): brs = page.xpath(u"//div[@class='articulum trans']//br") for br in brs: br.tail = "\n" + br.tail if br.tail else "\n" items = page.xpath(u"//div[@class='articulum']/*") if (len(items) == 0): items = page.xpath(u"//div[@class='articulum trans']/*") for item in items: tag = item.tag.lower() id = self.get_attrib(item, "id", None) # if (tag == "figure"): continue # if (tag == "iframe"): break if (id == "bcontent" or id == "bhead" or id == "introid"): text = StrHelper.trim(item.text_content()) if (text == None or text == ""): continue if (id != "bhead"): for regReporter in regReporters: list = regReporter.findall(text) if (len(list) == 1): if (not isinstance(list[0], basestring)): reporter = "/".join(list[0]) else: reporter = list[0] text = StrHelper.trim(regReporter.sub('', text)) break if (reporter): overallEntry.reporter = reporter else: self.logger.warn("error: parsing reporter: %s" % reportUrl) text = StrHelper.trim(reg.sub('', text)) reportContent += text + os.linesep + os.linesep FileHelper.saveToFile(os.path.join(dir, reportFileName), reportContent) status, page = self.downloader.download_page(detailUrl, dir, assetdir, detailSrcFileName, css=False, javascript=False, image=False) if (page != None): items = page.xpath(u"//div[@id='charitysidebox3'][1]/div[@id='inquiry3']/table//tr") maxDate = None if (len(items) > 0): file = None try: file = open(os.path.join(dir, detailFileName), "wb") csvwriter = csv.writer(file) for index, item in enumerate(items): if (index > 1): cols = item.xpath(u".//td") if (len(cols) == 4): no = StrHelper.trim(cols[0].text) name = StrHelper.trim(cols[1].text) amount = StrHelper.trim(cols[2].text) dateStr = StrHelper.trim(cols[3].text) try: date = datetime.datetime.strptime(dateStr, "%Y/%m/%d") if (maxDate == None or date > maxDate): maxDate = date except Exception as ex: self.logger.warn("error date format:%s in %s" % (dateStr, detailUrl)) csvwriter.writerow([no, dateStr, amount, name]) overallEntry.enddate = maxDate.strftime("%Y/%m/%d") if maxDate != None else "" overallEntry.doners = len(items) - 2 except Exception as ex: self.logger.exception(LogHelper.getExceptionMsg(ex, "error paring detail.html")) finally: if (file): file.close()
from MapperMatrixVector import MapperMatrixVector from ReducerMatrixVector import ReducerMatrixVector from MapReduce import MapReduce from FileHelper import FileHelper # Create instances for mapper and reducer # Note that the vector is stored in the instance theReducerMatrixVector = ReducerMatrixVector(); theMapperMatrixVector = MapperMatrixVector('dataFiles/b'); # the file where the matrix is stored matrixFile = ['dataFiles/A']; # MapReduce theMapReducerMatrixVector = MapReduce(theMapperMatrixVector,theReducerMatrixVector,matrixFile,0,1) resultDict = theMapReducerMatrixVector.execute(); # Write output outFileFirectory = 'outputs/' outfileName = 'matrixVectorResults.txt'; FileHelper.writeDictionnary(outFileFirectory+outfileName,resultDict)
class urldownloader: def __init__(self): #logging.basicConfig(level=logging.WARN) self.logger = logging.getLogger(LogHelper.LoggerName) self.dictPools = {} self.dictAssetFiles = {} self.assetFileIndex = 0 self.session = requests.session() # def get_http_pool(self, url): # return urllib3.connection_from_url(url) # def get_http_pool(self, url): # parser = urlparse.urlparse(url) # scheme = str(parser.scheme).lower() # key = scheme + "://" + parser.netloc # if(key in self.dictPools): # return self.dictPools[key] # # if (scheme == "http"): # http_pool = urllib3.HTTPConnectionPool(parser.netloc) # elif (scheme == "https"): # http_pool = urllib3.HTTPSConnectionPool(parser.netloc) # else: # http_pool = None # if(http_pool!=None): # self.dictPools[key]=http_pool # return http_pool def get_attrib(self, node, name, default=None): if (node == None): return default return node.attrib[name] if name in node.attrib else default def set_attrib(self, node, name, value): if (node == None): return node.attrib[name] = value def saveToFile(self, filename, data): file = None try: file = open(filename, mode='wb') file.write(data) except Exception as ex: self.logger.exception(LogHelper.getExceptionMsg(ex, "unable to save file: %s" % (filename))) finally: if (file): file.close() # def saveResponseFile(self, filename, response): # file = None # try: # file = open(filename, mode='w') # while True: # data = response.read(102400) # if data is None: # break # file.write(data) # except Exception as ex: # self.logger.error("unable to save file: %s\n\t%s" % (filename, str(ex))) # finally: # if (file): # file.close() def saveTextToFile(self, filename, data): file = None try: #file = codecs.open(filename, mode='w', encoding="utf-8") #file = codecs.open(filename, mode='wb') file = open(filename, mode='w') #file.write(u'\ufeff') #codecs.BOM_UTF8 file.write(data) except Exception as ex: self.logger.exception(LogHelper.getExceptionMsg(ex, "unable to save file: %s" % filename)) finally: if (file): file.close() # def getNextAssetFilename(self, ext=""): # self.assetFileIndex += 1 # return "file%d%s" % (self.assetFileIndex, ext) # #return os.path.join(self.assetDir, "file%d" % (self.assetFileIndex)) def saveAssetFile(self, url, assetDir): result=False response = None if (url in self.dictAssetFiles): assetEntry = self.dictAssetFiles[url] if(assetEntry.path!=assetDir): copyfile(os.path.join(assetEntry.path, assetEntry.filename), os.path.join(assetDir, assetEntry.filename)) return try: response = self.session.get(url) except Exception, ex: self.logger.warn("url download error: %s" % url) self.logger.warn("\t %s" % str(ex)) #parser = urlparse.urlparse(url) #name, ext = os.path.splitext(parser.path) #assetFilename = self.getNextAssetFilename(ext) assetFilename = FileHelper.getValidFilename(url) filename = os.path.join(assetDir, assetFilename) if (response != None and response.status_code == 200): #self.saveTextToFile(filename, data) self.saveToFile(filename, response.content) result=True else: self.saveTextToFile(filename, "") pass self.dictAssetFiles[url] = AssetEntry(assetDir, assetFilename) #self.dictAssetFiles[url] = "file:" + urllib.pathname2url(filename) return result
def __init__(self): self.fileHelper = FileHelper() self.credentials = self.fileHelper.loadJSON("./keys/keys.json")
from MapperCountingWords import MapperCountingWords from ReducerCountingWords import ReducerCountingWords from MapReduce import MapReduce from FileHelper import FileHelper # Create instances for mapper and reducer theMapper = MapperCountingWords(); theReducer = ReducerCountingWords(); # parse the file : one word/line inFiles = ['dataFiles/text']; # we can have more than one text file inFileParsed = 'dataFiles/textParsed'; FileHelper.transformTextIntoListOfWords(inFiles,inFileParsed) # MapReduce theMapReducer = MapReduce(theMapper,theReducer,[inFileParsed],silent=-1,nThreads=5) resultDict = theMapReducer.execute() # Write output outFileFirectory = 'outputs/' outfileName = 'coutingWordsResults.txt'; FileHelper.writeDictionnary(outFileFirectory+outfileName,resultDict)
def __set_sheet_data(self, ent, g86, p86, o86, g87, p87, o87): try: fh = FileHelper() filepath = fh.get_file_path(C.OUTPUT_FILE) self.file = filepath wb = self.__load_workbook() sheet = self.__get_sheet(wb, 'Pools') i = 7 for el in ent.EntityCode: sheet['A' + str(i)].value = el if el in ent.Name.keys(): sheet['B' + str(i)].value = ent.Name[el] # 86 General if el in g86.dic_Post_86_General_EP_959c3.keys(): sheet['D' + str(i)].value = g86.dic_Post_86_General_EP_959c3[el] else: sheet['D' + str(i)].value = 0 if el in g86.dic_Post_86_General_EP_959c2.keys(): sheet['E' + str(i)].value = g86.dic_Post_86_General_EP_959c2[el] else: sheet['E' + str(i)].value = 0 if el in g86.dic_Post_86_General_EP_959c1.keys(): sheet['F' + str(i)].value = g86.dic_Post_86_General_EP_959c1[el] else: sheet['F' + str(i)].value = 0 if el in g86.dic_Post_86_General_TAX_959c3Tax.keys(): sheet['G' + str( i)].value = g86.dic_Post_86_General_TAX_959c3Tax[el] else: sheet['G' + str(i)].value = 0 if el in g86.dic_Post_86_General_TAX_959c2Tax.keys(): sheet['H' + str( i)].value = g86.dic_Post_86_General_TAX_959c2Tax[el] else: sheet['H' + str(i)].value = 0 if el in g86.dic_Post_86_General_TAX_959c1Tax.keys(): sheet['I' + str( i)].value = g86.dic_Post_86_General_TAX_959c1Tax[el] else: sheet['I' + str(i)].value = 0 # 86 Passive if el in p86.dic_Post_86_Passive_EP_959c3.keys(): sheet['K' + str(i)].value = p86.dic_Post_86_Passive_EP_959c3[el] else: sheet['K' + str(i)].value = 0 if el in p86.dic_Post_86_Passive_EP_959c2.keys(): sheet['L' + str(i)].value = p86.dic_Post_86_Passive_EP_959c2[el] else: sheet['L' + str(i)].value = 0 if el in p86.dic_Post_86_Passive_EP_959c1.keys(): sheet['M' + str(i)].value = p86.dic_Post_86_Passive_EP_959c1[el] else: sheet['M' + str(i)].value = 0 if el in p86.dic_Post_86_Passive_TAX_959c3Tax.keys(): sheet['N' + str( i)].value = p86.dic_Post_86_Passive_TAX_959c3Tax[el] else: sheet['N' + str(i)].value = 0 if el in p86.dic_Post_86_Passive_TAX_959c2Tax.keys(): sheet['O' + str( i)].value = p86.dic_Post_86_Passive_TAX_959c2Tax[el] else: sheet['O' + str(i)].value = 0 if el in p86.dic_Post_86_Passive_TAX_959c1Tax.keys(): sheet['P' + str( i)].value = p86.dic_Post_86_Passive_TAX_959c1Tax[el] else: sheet['P' + str(i)].value = 0 # 86 Other if el in o86.dic_Post_86_Other_EP_959c3.keys(): sheet['R' + str(i)].value = o86.dic_Post_86_Other_EP_959c3[el] else: sheet['R' + str(i)].value = 0 if el in o86.dic_Post_86_Other_EP_959c2.keys(): sheet['S' + str(i)].value = o86.dic_Post_86_Other_EP_959c2[el] else: sheet['S' + str(i)].value = 0 if el in o86.dic_Post_86_Other_EP_959c1.keys(): sheet['T' + str(i)].value = o86.dic_Post_86_Other_EP_959c1[el] else: sheet['T' + str(i)].value = 0 if el in o86.dic_Post_86_Other_TAX_959c3Tax.keys(): sheet[ 'U' + str(i)].value = o86.dic_Post_86_Other_TAX_959c3Tax[el] else: sheet['U' + str(i)].value = 0 if el in o86.dic_Post_86_Other_TAX_959c2Tax.keys(): sheet[ 'V' + str(i)].value = o86.dic_Post_86_Other_TAX_959c2Tax[el] else: sheet['V' + str(i)].value = 0 if el in o86.dic_Post_86_Other_TAX_959c1Tax.keys(): sheet[ 'W' + str(i)].value = o86.dic_Post_86_Other_TAX_959c1Tax[el] else: sheet['W' + str(i)].value = 0 i += 1 #TODO: Save data for the next tab wb.save(self.file) except Exception as e: print(str(e))
class Log: def __init__(self, isOpen=True, saveInfo=True, saveWarning=True, saveError=True, logDirName="log"): self.saveInfo = saveInfo self.saveError = saveError self.saveWarning = saveWarning # 获取当前"运行"目录 logDir = os.getcwd() + "/" + logDirName + "/" if not os.path.exists(logDir): os.mkdir(logDir) timeFormat = "%Y_%m_%d_%H_%M_%S" timeStr = time.strftime(timeFormat, time.localtime()) fileNameStr = self.__get_parent_filename() fileNameStr = os.path.basename(fileNameStr) fileNameStr, ext = os.path.splitext(fileNameStr) self.infoFilename = logDir + "INFO_" + fileNameStr + "_" + timeStr + ".log" self.infoNowName = logDir + "INFO_" + fileNameStr + ".log" self.errorFilename = logDir + "ERROR_" + fileNameStr + "_" + timeStr + ".log" self.errorNowName = logDir + "ERROR_" + fileNameStr + ".log" self.warningFilename = logDir + "WARNING_" + fileNameStr + "_" + timeStr + ".log" self.warningNowName = logDir + "WARNING_" + fileNameStr + ".log" self.fileHelper = FileHelper() self.isOpen = isOpen return def __del__(self): try: os.remove(self.infoNowName) os.remove(self.errorNowName) os.remove(self.warningNowName) except: self.info("no old log file to remove") os.symlink(self.infoFilename, self.infoNowName) os.symlink(self.errorFilename, self.errorNowName) os.symlink(self.warningFilename, self.warningNowName) return # @param maeeage string # @return None def info(self, message=""): if not self.isOpen: return outputInfo = "[INFO] " + self.__get_parent_filename( ) + " " + self.__get_parent_lineno() + ": " + message print outputInfo self.fileHelper.append(self.infoFilename, outputInfo + "\n", isAbs=True) return # @param message string # @return None def warning(self, message=""): if not self.isOpen: return outputInfo = "[WARNING] " + self.__get_parent_filename( ) + " " + self.__get_parent_lineno() + ": " + message print outputInfo self.fileHelper.append(self.warningFilename, outputInfo + "\n", isAbs=True) return # @param message string # @return None def error(self, message=""): if not self.isOpen: return outputInfo = "[ERROR] " + self.__get_parent_filename( ) + " " + self.__get_parent_lineno() + ": " + message print outputInfo self.fileHelper.append(self.errorFilename, outputInfo + "\n", isAbs=True) return # @param errorStatus ErrorCode def check_rtn(self, errorStatus, message=""): if errorStatus != ErrorCode.Status.SUCC: print "[ERROR RTN] " + self.__get_parent_filename( ) + " " + self.__get_parent_lineno( ) + ": error " + errorStatus + " " + message def assert_eq(self, input1, input2): if input1 != input2: print "[ASSERT EQ] " + self.__get_parent_filename( ) + " " + self.__get_parent_lineno() + ": [expect] " + str( input1) + ", [actual] " + str(input2) #def __get_parent_class_name(self): # return sys._getframe().f_back.f_code def __get_parent_function_name(self): return sys._getframe().f_back.f_back.f_code.co_name def __get_parent_filename(self): filename = sys._getframe().f_back.f_back.f_code.co_filename return os.path.expanduser(filename) def __get_parent_lineno(self): return str(sys._getframe().f_back.f_back.f_lineno)
def __init__(self, dnaFileName: str, bufferSize: int): FileHelper.verifyFileExist(dnaFileName) self.__dnaFileName = dnaFileName self.__bufferSize = bufferSize
def clean_up_temp_csvs(self): # for python 2.7 - use glob for 3 self.log("Cleaning up temp csvs") FileHelper().remove_all_temp_files("csv")
def get_catalog_locale(self): return FileHelper.load_or_lamda("locale-catalog", requests.get('https://web.getbring.com/locale/catalog.de-DE.json').json)
def get_articles_locale(self): return FileHelper.load_or_lamda("locale-articles", requests.get('https://web.getbring.com/locale/articles.de-DE.json').json)
def get_mean_bp(sbp_and_dbp_mat): # 获取到mbp # 给两列分别为SBP和DBP的matrix添加第三列(2dbp+sbp)/3 mbp = (2 * sbp_and_dbp_mat[:, 0] + sbp_and_dbp_mat[:, 1]) / 3 return mbp # return np.concatenate((sbp_and_dbp_mat, mbp), axis=1) if __name__ == "__main__": # a = [(0.098, 'RBW10'), (0.095, 'RBW25'), (0.089, 'RBW33'), (0.087, 'kte_delta'), (0.076, 'RBW50'), (0.069, 'pwtt_mean'), (0.067, 'RBW66'), (0.064, 'kte_skew'), (0.058, 'RBW75'), (0.058, 'DBW50'), (0.057, 'SLP2'), (0.057, 'SLP1'), (0.056, 'h_miu'), (0.055, 'kte_iqr'), (0.055, 'PRT'), (0.055, 'DBW33'), (0.053, 'DBW25'), (0.051, 'DBW10'), (0.049, 'DBW66'), (0.042, 'KVAL'), (0.042, 'DBW75'), (0.034, 'hr_delta'), (0.028, 'hr_miu'), (0.028, 'PH'), (0.027, 'ppg_fed_ar_5'), (0.027, 'RBAr'), (0.027, 'AmBE'), (0.026, 'kte_miu'), (0.026, 'PWA'), (0.021, 'DfAmBE'), (0.017, 'hr_skew'), (0.014, 'hr_iqr'), (0.013, 'ppg_fed_ar_4'), (0.009, 'ppg_fed_ar_1'), (0.009, 'h_delta'), (0.008, 'ppg_fed_ar_2'), (0.008, 'loge_delta'), (0.006, 'loge_iqr'), (0.005, 'h_iqr'), (0.002, 'ppg_fed_ar_3'), (0.001, 'h_skew'), (-0.0, 'loge_ar_5'), (-0.0, 'loge_ar_4'), (-0.0, 'loge_ar_3'), (-0.0, 'loge_ar_2'), (0.0, 'loge_ar_1')] # b = [(0.086, 'kte_delta'), (0.064, 'hr_miu'), (0.055, 'kte_iqr'), (0.054, 'RBW75'), (0.052, 'RBW66'), (0.049, 'RBW50'), (0.045, 'hr_delta'), (0.043, 'RBW33'), (0.037, 'RBW25'), (0.035, 'PWA'), (0.035, 'DBW66'), (0.034, 'kte_miu'), (0.034, 'RBW10'), (0.034, 'DBW50'), (0.033, 'AmBE'), (0.032, 'pwtt_mean'), (0.032, 'DfAmBE'), (0.028, 'RBAr'), (0.028, 'PH'), (0.028, 'DBW75'), (0.026, 'DBW33'), (0.026, 'DBW25'), (0.026, 'DBW10'), (0.024, 'ppg_fed_ar_2'), (0.023, 'ppg_fed_ar_1'), (0.022, 'kte_skew'), (0.022, 'KVAL'), (0.021, 'SLP1'), (0.017, 'hr_skew'), (0.017, 'PRT'), (0.015, 'SLP2'), (0.014, 'hr_iqr'), (0.013, 'ppg_fed_ar_5'), (0.012, 'h_miu'), (0.01, 'ppg_fed_ar_3'), (0.009, 'loge_delta'), (0.009, 'h_iqr'), (0.007, 'loge_iqr'), (0.007, 'h_delta'), (0.003, 'ppg_fed_ar_4'), (0.002, 'loge_ar_1'), (0.002, 'h_skew'), (0.001, 'loge_ar_2'), (0.0, 'loge_ar_5'), (0.0, 'loge_ar_4'), (-0.0, 'loge_ar_3')] # disp_map(a) # print('*****************') # disp_map(b) # exit() fh = FileHelper() all_csv_names = fh.get_all_csv_names() full_set_arr = [] full_set_res = [] for csv_file_name in all_csv_names: is_valid, arr, res = fh.read_file(csv_file_name) if not is_valid: continue full_set_arr = np.concatenate((full_set_arr, arr.tolist()), 0) if len(full_set_arr) > 0 else arr.tolist() res = get_mean_bp(res) full_set_res = np.concatenate((full_set_res, res.tolist()), 0) if len(full_set_res) > 0 else res.tolist() # print '************' + fh.colsRes[0] + '***************' # print rank_features(full_set_arr, full_set_res[:, 0], fh.cols) # print '************' + fh.colsRes[1] + '***************' # print rank_features(full_set_arr, full_set_res[:, 1], fh.cols) kf = KFold(full_set_res.shape[0], 10)