def do(allCmdArgs): checkAndSetUpDir() InputReader.setWrapUpFunc(wrapUp) FileReader.setFileLocations(recurringTasksFile, oneOffTasksFile, confFile) FileWriter.setFileLocations(recurringTasksFile, oneOffTasksFile, confFile) options = FileReader.fetchConfOptions() return parseArgsAndDecide(allCmdArgs)
def deleteTask(deleterArgs): argAmount = len(deleterArgs) if argAmount >= 1 and deleterArgs[0] == 'help': OutputWriter.deleterHelp() return 0 if argAmount >= 1: taskToDelName = deleterArgs[0] nameLocation = FileReader.findTaskLocationWithName(taskToDelName) if nameLocation is None: OutputWriter.wrongInputMessage( taskToDelName + ' Task name does not exist', '-d') return 1 else: while (True): taskToDelName = InputReader.promptForTaskName( "Which task would you like to delete?\n") nameLocation = FileReader.findTaskLocationWithName(taskToDelName) if nameLocation is not None: break else: print( "Task name does not exist, try again or press Ctrl-D to exit" ) if nameLocation == recurringTasksFile: FileWriter.deleteRecurrWithName(taskToDelName) elif nameLocation == oneOffTasksFile: FileWriter.deleteOneOffWithName(taskToDelName) print("Task '{}' successfully deleted.".format(taskToDelName)) return 0
def solve(self, out_path): domains = {} names = [] # Develop list of possible shifts shift_ids = [] for s in self.shifts: shift_ids.append(str(s.id)) # Develop list of domains for people participating in shifts for p in self.people: domains[p.attributes['Name']]=fd.FiniteDomain(shift_ids) names.append(p.attributes['Name']) names = tuple(names) constraints = [] # Develop list of constraints for when people can't work in shifts for p in self.people: for c in p.constraints: constraints.append(fd.make_expression((p.attributes['Name'],), "%s[0] != '%s'"%(p.attributes['Name'],c.id))) # Submit names of workers, domains for workers, and constraints for workers to solver r = Repository(names,domains,constraints) solutions = Solver().solve(r) if not solutions: print 'No solutions found' return # Print solutions f = FileWriter(solutions, self.shifts) f.writeFile(out_path)
def run(self): self.gpsPath = FileReader.parseGpsLog(self.gpsLog) if(self.gpsPath == -1): return print self.gpsPath if self.coreLog == None: self.coreLog = self.runCamSim() self.pathList = FileReader.parseCoreLogs(self.coreLog, self.gpsPath[0].longitude, self.gpsPath[0].latitude) if(self.pathList == -1): return optimalPath = self.getOptimalPath(self.gpsPath, self.pathList) distances = self.calculateDistances(optimalPath) self.calculateMetrics(optimalPath, distances) FileWriter.createDataSheet(self, self.totalResult, self.twentyMinuteResults) FileWriter.export(self, self.gpsPath, [optimalPath]) print "Minimum Distance" print min(distances) print "Maximum Distance" print max(distances) print 'id: ', self.scenarioID print 'core log file: ', self.coreLog print 'gps log file: ', self.gpsLog print 'time offset: ', self.timeOffset print 'maximum radius: ', self.maxRadius print 'number of paths: ', len(self.pathList)
def sheep_killed(self, log_level): fw.write_to_log( f"The function Sheep.sheep_killed(self, log_level) invoked with argument self={self}, log_level={log_level}.", logging.DEBUG, log_level) fw.write_to_log(f"The sheep with id={self.id} has been eaten.", logging.INFO, log_level) self.alive = False
def clickSaveMeasurementBtn(): """ Calls for a measurement to be saved to database and as .txt. """ global on if on: tbOthers.insert( 1.0, 'Mätning är igång, vänligen stoppa mätningen för att utföra detta. \n \n') tbOthers.update() else: path = getSavePath() try: setDcToSave() DC.insertMeasurementToDb() tbOthers.insert(1.0, 'Mätningen har sparats till databasen\n \n') tbOthers.update() except: tbOthers.insert( 1.0, 'Kunde inte spara mätningen till databasen\n \n') tbOthers.update() try: fileWriter = FileWriter(DC, path) fileWriter.createTxtFile() tbOthers.insert( 1.0, 'Mätningen har sparats som en .txt-fil\n' + path + '\n \n') tbOthers.update() except: tbOthers.insert( 1.0, 'Kunde inte spara mätningen som .txt-fil\n \n') tbOthers.update()
def get_alive_sheeps(sheeps): fw.write_to_log( f"The function get_alive_sheeps(sheeps) invoked with argument sheeps={sheeps}.", logging.DEBUG, log_level) i = 0 for sheep in sheeps: if sheep.alive == True: i = i + 1 return i
def save(self): if self.resultlist is None or len(self.resultlist) == 0: self.showToast('No Result') return filename, filetype = QFileDialog.getSaveFileName( self, 'save file', '/E.txt', "Text Files (*.txt)") if filename == "": return FileWriter.write(filename, self.resultlist) self.showToast('Save Success:' + filename)
def exportCSV(self, dataset): """ @return """ outputDir = filedialog.askdirectory() if os.path.isdir(outputDir): FileWriter.write_csv(outputDir, dataset) else: print("did not export to:" + outputDir)
def setup(sheep_no, init_pos_lim): fw.write_to_log( f"The function setup(sheep_no, init_pos_lim) invoked with arguments sheep_no={sheep_no}, init_pos_lim={init_pos_lim}, log_level={log_level}", logging.DEBUG, log_level) sheep_list = [ Sheep(init_pos_lim, sheep_move_dist, x, log_level) for x in range(sheep_no) ] wolf = Wolf(wolf_move_dist, log_level) return sheep_list, wolf
def __init__(self, init_pos_lim, move_range, id, log_level): super().__init__(move_range) self.position = [ rand.uniform(-init_pos_lim, init_pos_lim), rand.uniform(-init_pos_lim, init_pos_lim) ] self.id = id self.alive = True fw.write_to_log( f"The starting position of the sheep with id={self.id} has been set to ({self.position[0]}, {self.position[1]}).", logging.INFO, log_level)
def log(round_count, wolf, sheeps, closest_sheep, alive_sheeps): fw.write_to_log( f"The function log(round_count, wolf, sheeps, closest_sheep, alive_sheeps) invoked with arguments round_count={round_count}, wolf={wolf}, sheeps={sheeps}, closest_sheep={closest_sheep}, alive_sheeps={alive_sheeps}.", logging.DEBUG, log_level) print(f"Round no: {round_count}") print( f"Wolf position: ({round(wolf.position[0], 3)}, {round(wolf.position[1], 3)})" ) if closest_sheep.alive == False: print(f"The sheep with number {closest_sheep.id} has been eaten!") print(f"Sheep alive: {alive_sheeps}") print("------------------------------\n")
def analyse(data, key): filepath = FileWriter.get_file() splits, datas = split_into_sections(data) if key == ord("g"): simple_graphing(data, splits, filepath) caller_function(datas, filepath)
def __init__(self, governor): self.governor = governor self.table = {} self.table_metadata = {} self.key_index = {} self.data_index = {} self.index = {} self.file_writer = FileWriter.FileWriter()
def move(self, log_level): fw.write_to_log( f"The function Sheep.move(self, log_level) invoked with argument self={self}, log_level={log_level}.", logging.DEBUG, log_level) x0, y0 = self.position direction = rand.randint(1, 4) if direction == 1: self.position[0] += self.move_range elif direction == 2: self.position[0] -= self.move_range elif direction == 3: self.position[1] += self.move_range elif direction == 4: self.position[1] -= self.move_range fw.write_to_log( f"The sheep with id={self.id} has moved from ({x0}, {y0}) to ({self.position[0]}, {self.position[1]}).", logging.INFO, log_level)
def finalFileWriting(): decade = 2010 endYear = 2013 setFolders() print "Downloading files..." getPrograms() unzipPrograms() getDecade(decade) unzipDecade(decade) print "Making play-by-play files..." FileWriter.moveTeamFiles(decade, endYear) FileWriter.makeRosters(decade, endYear) FileWriter.makeEventFiles(decade, endYear) print "Setting up events..." for letter in "abcdefghijklmnopqrstuvwxyz": FileWriter.makeBatterAlphabetizedFiles(letter) FileWriter.makePitcherAlphabetizedFiles(letter) print "Done!"
def createTask(creatorArgs): recurrProperArgs = ['recur', 'r', 'recurring'] oneOffProperArgs = ['once', 'o', 'oneoff'] argAmount = len(creatorArgs) silentMode = False if argAmount >= 1 and creatorArgs[0] == 'help': OutputWriter.creatorHelp() return 0 if argAmount >= 1 and creatorArgs[0] == 's': silentMode = True creatorArgs = creatorArgs[1:] argAmount = argAmount - 1 if argAmount == 0: isRecurring = InputReader.promptForIsRecurring() else: isRecurringCandidate = creatorArgs[0] if isRecurringCandidate in recurrProperArgs: isRecurring = True elif isRecurringCandidate in oneOffProperArgs: isRecurring = False else: OutputWriter.wrongInputMessage(creatorArgs[0], '-c') return 1 taskDets = creatorArgs[1:] argsOk, wrongArg = InputReader.verifyAndPrepRecurrArgs( taskDets) if isRecurring else InputReader.verifyAndPrepOneOffArgs( taskDets) if not argsOk: OutputWriter.wrongInputMessage(wrongArg, '-c') return 1 else: taskToAdd = Task( True, InputReader.createRecurring(taskDets)) if isRecurring else Task( False, InputReader.createOneOff(taskDets)) if isRecurring: if argAmount <= 1 or (silentMode or InputReader.askToAddRecurr(taskToAdd)): FileWriter.addRecurring(taskToAdd) else: if argAmount <= 1 or (silentMode or InputReader.askToAddOneOff(taskToAdd)): FileWriter.addOneOff(taskToAdd) return 0
def open_file(self, filepath): self.resultlist.clear() crashDir = FileWriter.mkcrashfile(filepath) cgitb.enable(display=1, logdir=crashDir, format='text') self.inputdata(filepath) self.filelist.clear() self.filelist.append(filepath) self.fileindex = 0 self.setfileIndexAandCount() self.settings.setValue('isOpenFile', True)
def GenerateMaps(noOfMaps, noOfStartAndEndIdx): for i in range(noOfMaps): instanceOfMap = MapData() instanceOfMap.runSuite() ReadyMapForWrite(instanceOfMap.map) for j in range(noOfStartAndEndIdx): start, end = instanceOfMap.generateStartAndEndIndices() FileWriter(start, end, instanceOfMap.hardToTraverseIndices, instanceOfMap.map, f"Maps/map_{i}_{j}.txt")
def downloadAndSave(url, savePath): try: response = requests.get(url, proxies=proxies) html_source = response.text soup = BeautifulSoup(html_source, "html.parser") # 获取文章名称 title_entity = soup.select('head > title')[0] title = title_entity.text seed_urls = re.findall(r_seed_link_match, html_source) seed_save = [] for link in seed_urls: print(link) seed_save.append(head_seed + link) file_task = FileWriter.FileWriteMission( FileWriter.FileWriteMission.MISSION_NOMAL, title, url, seed_save) FileWriter.add_file_write_task(file_task) except Exception as e: print(e) pass
def parse_config_file(file): fw.write_to_log( f"The function parse_config_file(file) invoked with arguments file={file}.", logging.DEBUG, log_level) config = ConfigParser() config.read(file) init_pos = config.get('Terrain', 'InitPosLimit') sheep_move = config.get('Movement', 'SheepMoveDist') wolf_move = config.get('Movement', 'WolfMoveDist') try: f_init_pos = float(init_pos) f_sheep_move = float(sheep_move) f_wolf_move = float(wolf_move) except (ValueError, TypeError): fw.write_to_log( f"At least one of the variables in the chosen config file could not be converted to a float.", logging.CRITICAL, log_level) raise ValueError("An input provided in a config file must be a float.") if f_init_pos <= 0 or f_sheep_move <= 0 or f_wolf_move <= 0: fw.write_to_log( f"At least one of the variables in the chosen config file was less then or equal to 0.", logging.CRITICAL, log_level) raise ValueError( "An input provided in a config file must be greater than 0.") return f_init_pos, f_sheep_move, f_wolf_move
def clickSaveAsPdfBtn(): """ Calls for a measurement to be saved to database and as .pdf. """ global on if on: tbOthers.insert( 1.0, 'Mätning är igång, vänligen stoppa mätningen för att utföra detta. \n \n') tbOthers.update() else: try: path = getSavePath() setDcToSave() fileWriter = FileWriter(DC, path) fileWriter.createPdfFile() tbOthers.insert( 1.0, 'Mätningen har sparats som en .pdf-fil\n' + path + '\n \n') tbOthers.update() except: tbOthers.insert( 1.0, 'Kunde inte spara mätningen som .pdf-fil\n \n') tbOthers.update()
def OnGeoExtract(self,evt): try: wikiextractor = Extractor.wikiextractor() data_dict={} members=self.GetGeoList() wikiextractor.get_data_dict_from_pageid(members, data_dict,'f') filewriter=FileWriter.filewriter() filewriter.SaveToSQLite(data_dict) #filewriter.SaveToExcel(data_dict) self.statusbar.SetStatusText("保存成功,请检查excel文件",0) except Exception,e: self.statusbar.SetStatusText(e.message,0)
def OnExtract(self,evt): try: categoryname = self.categoryname.GetValue() wikiextractor = Extractor.wikiextractor() data_dict = {} if self.extractsubcategoryck.Get3StateValue() == wx.CHK_CHECKED: wikiextractor.parse_members(categoryname, data_dict,'t') else: wikiextractor.parse_members(categoryname, data_dict,'f') filewriter=FileWriter.filewriter() #filewriter.SaveToSQLite(data_dict) filewriter.SaveToExcel(data_dict) self.statusbar.SetStatusText(u"保存成功,请检查excel文件",0) except Exception,e: self.statusbar.SetStatusText(e.message,0)
def find_closest_sheep(self, sheeps, log_level) -> Sheep: fw.write_to_log(f"The function Wolf.find_closest_sheep(self, sheeps, log_level) invoked with argument self={self}, sheeps={sheeps}, log_level={log_level}.", logging.DEBUG, log_level) for sheep in sheeps: if sheep.alive: lowest_distance = self.distance_to(sheep, log_level) fw.write_to_log(f"The function Wolf.distance_to(self, animal, log_level) returned {lowest_distance}.", logging.DEBUG, log_level) closest_sheep = sheep break for sheep in sheeps: distance = self.distance_to(sheep, log_level) fw.write_to_log(f"The function Wolf.distance_to(self, animal, log_level) returned {distance}.", logging.DEBUG, log_level) if sheep.alive and distance < lowest_distance: lowest_distance = self.distance_to(sheep, log_level) fw.write_to_log(f"The function Wolf.distance_to(self, animal, log_level) returned {lowest_distance}.", logging.DEBUG, log_level) closest_sheep = sheep return closest_sheep
def move(self, sheep, log_level): fw.write_to_log(f"The function Wolf.move(self, sheep, log_level) invoked with argument self={self}, sheep={sheep}, log_level={log_level}.", logging.DEBUG, log_level) distance = self.distance_to(sheep, log_level) fw.write_to_log(f"The function Wolf.distance_to(self, animal, log_level) returned {distance}.", logging.DEBUG, log_level) x0, y0 = self.position x1, y1 = sheep.position self.position = [x0 + self.move_range * (x1 - x0) / distance, y0 + self.move_range * (y1 - y0) / distance] fw.write_to_log(f"The wolf has moved from ({x0}, {y0}) to ({self.position[0]}, {self.position[1]}).", logging.INFO, log_level)
def __init__(self, n, k, time=1080): # initializing self.CPU = Resources.CPU(0.005, time, 0) self.CPU.set_k(k) self.CPU.set_elapsed_time(0) self.SysDisc1 = Resources.SysDisc(0.012, time, 1) self.SysDisc1.set_k(k) self.SysDisc2 = Resources.SysDisc(0.015, time, 2) self.SysDisc2.set_k(k) self.UserDiscList = [ Resources.UserDisc(0.02, time, i + 3) for i in range(k) ] for ud in self.UserDiscList: ud.set_k(k) self.f1 = FileWriter.BlockingFileWriter("out1.txt") # linking resources self.cpu_next_res = [] self.cpu_next_res.append(self.SysDisc1) self.cpu_next_res.append(self.SysDisc2) for ud in self.UserDiscList: self.cpu_next_res.append(ud) self.CPU.add_res(self.cpu_next_res) self.sys_next_res = [] self.sys_next_res.append(self.CPU) for ud in self.UserDiscList: self.sys_next_res.append(ud) self.SysDisc1.add_res(self.sys_next_res) self.SysDisc2.add_res(self.sys_next_res) self.user_next_res = [] for ud in self.UserDiscList: self.user_next_res.append(self.CPU) ud.add_res(self.user_next_res) #loading jobs for i in range(n): self.jobList = [Resources.Job() for _ in range(n)] for j in self.jobList: self.CPU.accept_job(j) print('Initialization done')
def main(directory,dest_directory,threshold, blur_radius ): # print("Usage: directory(path) threshold(int, ~160) blur_radius(~4) light/dark(string) )") global avg_size_iso global min_size_iso global max_size_iso background_dir_name = directory #checks if there is a need to calculate the background backgournd_file = Path(os.path.join(background_dir_name,("background.png"))) background = None if not backgournd_file.is_file(): background = create_background(background_dir_name) #black and white background else: background = sol1.read_image(os.path.join(background_dir_name,("background.png")),1) cur_file_writer = FileWriter.FileWriter(dest_directory,directory) #find the circles- a list of (x, y, r ) with x y as center of circles background_image_name = os.path.join(background_dir_name,("background.png")) if Path(os.path.join(background_dir_name,("background_with_circles.png"))).is_file(): background_image_name = os.path.join(background_dir_name,("background_with_circles.png")) circles = find_circles(background_image_name) tagged_circles = tag_circles(circles, directory) create_circled_background(circles, background, background_dir_name) cur_file_writer.write_background(*tagged_circles) scorpion_x, scorpion_y, scorpion_r = tagged_circles[0] # radiuses = circles[:,2] for filename in os.listdir(directory): if filename.endswith(".jpg") and not filename.startswith("detected") and not filename.startswith("background"): coordinates, num_iso = check_iso_loc( directory, filename ,background, threshold,blur_radius, tagged_circles[0]) cur_file_writer.write_image_data(filename, coordinates=coordinates, num_isopods= num_iso) print("avg iso size is : "+ str(avg_size_iso)) print("min iso size is : "+ str(min_size_iso)) print("max iso size is : "+ str(max_size_iso)) flat_list = [item for sublist in iso_sizes for item in sublist] plt.hist(flat_list,bins = list(range(min_size_iso, max_size_iso+10, 10)), range = (np.min(flat_list), np.max(flat_list)))
import os.path, sys sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir)) from Coordinate import Coordinate from Scenario import Scenario import FileReader import FileWriter import time gps_entries = FileReader.parseGpsLog('kmlPrintTestGpsFile.log') core_entries = FileReader.parseCoreLog('kmlPrintTestCoreFile.log') print 'Test1: valid input' scenario = Scenario(1, 5.0, '', sys.path[0], '') FileWriter.export(scenario, gps_entries, core_entries) print 'kml file printed successfully\n' print 'Test2: no core data' scenario = Scenario(1, 5.0, '', sys.path[0], '') FileWriter.export(scenario, gps_entries, list()) print 'kml file printed successfully\n' print 'Test3: no GPS data' scenario = Scenario(1, 5.0, '', sys.path[0], '') FileWriter.export(scenario, list(), core_entries) print 'kml file printed successfully\n' print 'Test4: no core or GPS data' scenario = Scenario(1, 5.0, '', sys.path[0], '') FileWriter.export(scenario, list(), list()) print 'kml file printed successfully\n'
def OnGeoSave(self,evt): filewriter=FileWriter.filewriter() filewriter.SaveToCSV(self.resultdict)
def __init__(self, canvas): super(DataThread, self).__init__() self.canvas = canvas self.file_writer = FileWriter.FileWriter()
def decoder(file_dir_name, file_name): # open file file = open(file_dir_name, "rb") full_content = file.read() length = len(full_content) print("Size of the packet: " + str(length)) pointer = 0 # point to the location where the packet is parsed number = 0 # number of packets dump = b'\x00\x00\x00\x00' content = b'' pointer += 24 for i in full_content: if pointer >= length: print("^^^^^^^^^^^^^^^^^^^^^^^^^^^") print("Reach the end of the file!") print("~~~~~~~~~~~~~~~~~~~~~~~~~~~~") break number += 1 print("++++++++++++++++++++++++++++++++++++++++++++++++") print("Packet NO," + str(number)) original_length = full_content[pointer + 8:pointer + 12] included_length = full_content[pointer + 12:pointer + 16] print("included: " + str(included_length)) timestamp_seconds = full_content[pointer:pointer + 4] timestamp_microseconds = full_content[pointer + 4:pointer + 8] data_length = FileWriter.little_endian_to_int(included_length) print("\tData length: " + str(data_length)) packet_data = full_content[pointer + 20:pointer + 20 + data_length] dup_len = struct.pack('<H', data_length) print("\tDuplex length: " + str(dup_len)) if number == 1: orgin_time = timestamp_seconds time_stamp = timestamp_seconds time_base = FileWriter.little_endian_to_int(timestamp_seconds) * 1000000 + FileWriter.little_endian_to_int( timestamp_microseconds) time_to_add = time_base - FileWriter.little_endian_to_int(orgin_time) * 1000000 print("\tTime to add: " + str(time_to_add)) time_plus = FileWriter.int_to_little_endian(time_to_add) content += time_plus + dump + dup_len + dup_len + dump * 7 + packet_data pointer = pointer + data_length + 20 print("$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$") pkt_counter = FileWriter.int_to_little_endian(number) temp_length = len(content) content_length = FileWriter.int_to_little_endian(temp_length + 128) print("\tContent Length: " + str(FileWriter.little_endian_to_int(content_length))) print("\tExpected Length: " + str(FileWriter.little_endian_to_int(b'\x59\x48\x00\x00'))) print("\tOrigin Time: " + str(FileWriter.little_endian_to_int(time_stamp))) package = [file_name, time_stamp, pkt_counter, content_length, content] FileWriter.file_writer(package) return True
def GenBackendFromFiles( \ inputFiles, \ outputPath, \ backend, \ optimizeAlpha, \ optimizeBeta ): # read raw solution times psTimesRaw = {} for inputFile in inputFiles: print "TensileGen: Reading " + os.path.basename(inputFile) FileReader.getSolutionsFromXML(inputFile, psTimesRaw, optimizeAlpha, optimizeBeta) # print "status: created dictionary - " + str(psTimes) # structures needed to write backend psTimes = {} kernelSet = set() solutionSet = set() for deviceProfile, exactMatches in psTimesRaw.iteritems(): psTimes[deviceProfile] = {} #print "DeviceProfile: " + str(deviceProfile) for exactMatch, problems in exactMatches.iteritems(): rangeProblems = problems[0] exactProblems = problems[1] #print len(rangeProblems), len(exactProblems) psTimes[deviceProfile][exactMatch] = [[], []] #print "ExactMatch: " + str(exactMatch) #print len(problems) for rangeProblem, solutionCandidates in rangeProblems.iteritems(): for solution, solutionBenchmark in solutionCandidates.iteritems( ): avgTime = 1e100 if len(solutionBenchmark.times ) > 0 and solutionBenchmark.validationStatus != -1: avgTime = sum(solutionBenchmark.times) / len( solutionBenchmark.times) psTimes[deviceProfile][exactMatch][0].append( [rangeProblem, solution, avgTime]) for exactProblem, solutionCandidates in exactProblems.iteritems(): for solution, solutionBenchmark in solutionCandidates.iteritems( ): avgTime = 1e100 if len(solutionBenchmark.times ) > 0 and solutionBenchmark.validationStatus != -1: avgTime = sum(solutionBenchmark.times) / len( solutionBenchmark.times) psTimes[deviceProfile][exactMatch][1].append( [exactProblem, solution, avgTime]) # if this exact match didn't have any psps with times, remove if len(psTimes[deviceProfile][exactMatch][0]) < 1 and len( psTimes[deviceProfile][exactMatch][1]) < 1: print "TensileGenBackend: ExactMatch %s has no benchmark times; removing." % str( exactMatch) psTimes[deviceProfile].pop(exactMatch, None) # if this device profile didn't have any exact matches with times, remove if len(psTimes[deviceProfile]) < 1: print "TensileGenBackend: Device Profile %s has no benchmark times; removing." % str( deviceProfile) psTimes.pop(deviceProfile, None) # kernelSet.remove(None) fileWriter = FileWriter.FileWriter(outputPath, backend, False) fileWriter.writeBackendFiles(psTimes)
def getData(files): list = files # 返回的左下列表的数据 Datas = [] length = len(list) if length <= 0: return Datas ##indentation m_index = cf.getHeightColumn() ##applied v_index = cf.getFdColumn() ##time s_index = cf.getTimeColumn() m_unit = cf.getHeightUnitNo() v_unit = cf.getFDUnitNo() s_unit = cf.getTimeUnitNo() timeCoefficient = np.array(cf.getTimeCoefficient(), dtype=np.float64) appliedCoefficient = np.array(cf.getAppliedCoefficient(), dtype=np.float64) indentationCoefficient = np.array(cf.getIndentationCoefficient(), dtype=np.float64) for n in range(len(list)): fname = list[n] fileinfo = 'file: ' + os.path.basename(fname) + '\n' fileinfo = fileinfo + 'settings: ' + '\n' fileinfo = fileinfo + "%-20s\t\t%-20s\t\t%-20s\t\t%-20s\n" % ( "type", "index", "Coefficient", "unit") + "-" * 50 fileinfo = fileinfo + '\n' fileinfo = fileinfo + "%-20s\t\t%-20s\t\t%-20s\t\t%-20s\n" % ( "time", str(s_index + 1), str(timeCoefficient), str(cf.getTimeUnit())) fileinfo = fileinfo + "%-20s\t\t%-20s\t\t%-20s\t\t%-20s\n" % ( "applied", str(v_index + 1), str(appliedCoefficient), str(cf.getFdUnit())) fileinfo = fileinfo + "%-20s\t\t%-20s\t\t%-20s\t\t%-20s\n" % ( "indentation", str(m_index + 1), str(indentationCoefficient), str(cf.getHeightUnit())) spstr = cf.getDefalutSplit() if spstr == ' ': spstr = 'Space' elif spstr == '\t': spstr = 'Tab' fileinfo = fileinfo + '\nsplit by: ' + spstr + '\n\n\n' try: retracttime = -1 last_retract_index = -1 T_back_list = [] m_back_list = [] V_back_list = [] sensitivity = -1 springConstant = -1 if fname.endswith('.txt'): lines = linecache.getlines(fname) line_len = len(lines) for i in range(line_len): linedata = lines[i].lower() if sensitivity == -1 and 'sensitivity' in linedata: sensitivity = getFirstNumber(linedata) if springConstant == -1 and 'springconstant' in linedata: springConstant = getFirstNumber(linedata) values = linedata.split(cf.getDefalutSplit()) try: m_back_data = np.array( values[m_index], dtype=np.float64) * indentationCoefficient V_back_data = np.array( values[v_index], dtype=np.float64) * appliedCoefficient T_back_data = np.array( values[s_index], dtype=np.float64) * timeCoefficient m_back_list.append(m_back_data * m_unit) V_back_list.append(V_back_data * v_unit) T_back_list.append(T_back_data * s_unit) except Exception as e1: fileinfo = fileinfo + 'line ' + str( i + 1) + ' ' + lines[i] + ' cause:' + str(e1) + '\n' elif fname.endswith('.xlsx') or fname.endswith('.xls'): df = pd.read_excel(fname, header=None, usecols=[m_index, v_index, s_index]) data = df.apply(pd.to_numeric, errors='coerce').dropna(how='any') m_back_list = np.array( data[m_index] * m_unit, dtype=np.float64) * indentationCoefficient V_back_list = np.array(data[v_index] * v_unit, dtype=np.float64) * appliedCoefficient T_back_list = np.array(data[s_index] * s_unit, dtype=np.float64) * timeCoefficient Datas.append( SuccessData(n, retracttime, sensitivity, springConstant, m_back_list, V_back_list, T_back_list, last_retract_index)) except Exception as e: print(e) fileinfo = fileinfo + str(e) FileWriter.write_log_info(fname, fileinfo) return Datas
def decoder(file_dir_name, file_name): # open file file = open(file_dir_name, "rb") full_content = file.read() length = len(full_content) print("Size of the packet: " + str(length)) pointer = 0 # point to the location where the packet is parsed counter = 0 # count the number of the block in the packet number = 0 # number of packets dump = b'\x00\x00\x00\x00' content = b'' # parsing the packet for b in full_content: print("----------------------------") if pointer >= length: # in case of reaching the end of the file print("Reach the end of the file!!!") break block_type = full_content[pointer:pointer + 4] # block type counter += 1 print("No." + str(counter) + " Block: ") flag = block_checker(block_type) # whether the block is useful block_length = int( FileWriter.little_endian_to_int(full_content[pointer + 4:pointer + 8])) # the length of the block print("\tBlock Length: " + str(block_length)) capture_len = FileWriter.little_endian_to_int(full_content[pointer + 20:pointer + 24]) print("\tCapture length: " + str(capture_len)) time_stamp = full_content[pointer + 12: pointer + 20] # check the parsing result is correspond to what we expected if full_content[pointer + 4:pointer + 8] != full_content[pointer + block_length - 4:pointer + block_length]: print("Block parse error!") else: print("Block parse succeed!") # output useful information if flag == 1: number += 1 if number == 1: orig_time = time_stamp print("Origin Time: ") print(bit_64_hex_to_int(orig_time)) pkt_content = full_content[pointer + 28:pointer + 28 + capture_len] dup_len = struct.pack('<h', capture_len) print("Duplex length: " + str(dup_len)) time_base = orig_time time_to_add = bit_64_hex_to_int(time_stamp) - bit_64_hex_to_int(time_base) time_plus = FileWriter.int_to_little_endian(time_to_add) print("Time to add: " + str(time_to_add)) print("Time stamp: " + str(bit_64_hex_to_int(time_stamp))) content += orig_time + dump + dup_len + dup_len + dump * 7 + pkt_content print("Timestamp is: " + str(time_stamp)) print("Collected position: " + str(pointer + block_length)) # move pointer pointer += block_length print("Pointer now: " + str(pointer)) temp_length = len(content) content_length = FileWriter.int_to_little_endian(temp_length + 128) print("The number of packets: " + str(number)) print("Content Length: " + str(FileWriter.little_endian_to_int(content_length))) print("Target Length: " + str(FileWriter.little_endian_to_int(b'\x59\x48\x00\x00'))) pkt_counter = FileWriter.int_to_little_endian(number) package = [file_name, orig_time, pkt_counter, content_length, content] FileWriter.file_writer(package) print("TESTTTTT: ") print(bit_8_hex_to_int(b'\xff')) return True
def caller_function(datas, filepath): import FileWriter areas = get_areas(datas) maxes = get_maxes(datas) FileWriter.write_to_file(areas, maxes, filepath)
def decoder(file_dir_name, file_name): file = open(file_dir_name, "rb") full_content = file.read() length = len(full_content) orig_time = full_content[24:28] print("Size of the packet: " + str(length)) time_stamp = orig_time print("Timestamp is: ") print(time_stamp) tag = b'\x48\x74\x74\x70\x2d\x50\x6f\x72\x74\x3a' print(tag) pointer = 24 counter = 1 content = b'' dump = b'\x00\x00\x00\x00' for i in full_content: print("************************************************") if pointer >= length: print("Reach the end of the file!") break print("No." + str(counter) + " packet: ") ts_sec = full_content[pointer:pointer + 4] print("\tTime sec is: " + str(ts_sec)) print(FileWriter.little_endian_to_int(ts_sec)) ts_usec = full_content[pointer + 4:pointer + 8] print("\tTime microsec is: " + str(ts_usec)) print(FileWriter.little_endian_to_int(ts_usec)) incl_len = full_content[pointer + 8:pointer + 12] print("\tNumber of octects of packet saved in file: " + str(incl_len)) orig_len = full_content[pointer + 12:pointer + 16] print("\tActual length of packet: " + str(orig_len)) dup_len = full_content[pointer + 8:pointer + 10] if FileWriter.little_endian_to_int(incl_len) > FileWriter.little_endian_to_int(orig_len): print("Packet Length Exception!") break pkt_length = FileWriter.little_endian_to_int(incl_len) print("\tPacket Length is: " + str(pkt_length)) pkt_content = full_content[pointer + 16:pointer + 16 + pkt_length] pointer = pointer + 16 + pkt_length counter += 1 time_base = FileWriter.little_endian_to_int(ts_sec) * 1000000 + FileWriter.little_endian_to_int(ts_usec) time_to_add = time_base - FileWriter.little_endian_to_int(orig_time) * 1000000 print("\tTime to add: " + str(time_to_add)) time_plus = FileWriter.int_to_little_endian(time_to_add) print(time_plus) content += time_plus + dump + dup_len + dup_len + dump * 7 + pkt_content pkt_counter = FileWriter.int_to_little_endian(counter) print("\tPacket Number: ") print(pkt_counter) temp_length = len(content) content_length = FileWriter.int_to_little_endian(temp_length + 128) print("\tContent Length: ") print(FileWriter.little_endian_to_int(content_length)) print(FileWriter.little_endian_to_int(b'\x59\x48\x00\x00')) print("\tOrigin Time: " + str(FileWriter.little_endian_to_int(time_stamp))) package = [file_name, time_stamp, pkt_counter, content_length, content] FileWriter.file_writer(package) return True
def distance_to(self, animal, log_level) -> float: fw.write_to_log( f"The function Animal.distance_to(self, animal) invoked with arguments self={self}, animal={animal}, log_level={log_level}.", logging.DEBUG, log_level) return math.sqrt((self.position[0] - animal.position[0])**2 + (self.position[1] - animal.position[1])**2)
def clean(self): newproxies = [] infile = open('ProxyList.raw', 'r') outfile = FileWriter('ProxyList', 100, False) unused = 0 toTest = [] latencies = {} ## reads proxies from raw file for line in infile: ## proxy address is the first item in the row try: proxy = ((line.split())[0]).strip() toTest.append(proxy) except IndexError: infile.close() outfile.close() print 'no proxies' return toTest = list(set(toTest)) ntested = len(toTest) ## multi-threaded calls to twitter api rate limit api page pool = eventlet.GreenPool() starttime = clock() for proxytuple in pool.imap(self.fetch, toTest): if proxytuple == '': continue proxydata, proxy = proxytuple try: proxydata = (((((proxydata.split('\"remaining_hits\":'))[1]) .split(','))[0]).split('}'))[0] except: if self.verbose: print '\t', proxy + ' parse error, removing' continue ## writes proxy iff successful lookup and > 50 API calls remaining if int(proxydata) > 50: newproxies.append(proxy) self.timeout -= self.TIMEOUT_ADJ if self.verbose: print '\t', proxy + ' has ' + proxydata + ' hits' latencies[clock() - starttime] = proxy else: if self.verbose: print '\t', proxy + ' is spent, removing' ## writes best 250 working proxies to file times = latencies.keys() times.sort() if len(times) > 250: bestTimes = times[0:250] else: bestTimes = times newproxies = [] for thisTime in bestTimes: newproxies.append(latencies[thisTime]) unused = len(newproxies) for proxy in newproxies: outfile.write(proxy + '\n') infile.close() outfile.close() ## if self.verbose: print 'proxies: ', unused, 'timeout: ', self.timeout
from Scenario import Scenario from MetricsResult import MetricsResult import FileWriter totalResult = MetricsResult(23*3600 + 00*60 + 00.000, 00*3600 + 13*60 + 01.000, 60, 4, 2.1, 7.5, 5.35, 33.333) twentyMinuteResults = list() twentyMinuteResults.append(MetricsResult(23*3600 + 00*60 + 00.000, 23*3600 + 19*60 + 59.999, 66.666, 5.3, 6.6, 7.5, 5.35, 33.333)) twentyMinuteResults.append(MetricsResult(23*3600 + 20*60 + 00.000, 23*3600 + 39*60 + 59.999, 0, 99999, -1, -1, -1, -1)) twentyMinuteResults.append(MetricsResult(23*3600 + 40*60 + 00.000, 23*3600 + 59*60 + 59.999, 100, 2.1, 7.5, 4.8, 1, 50)) twentyMinuteResults.append(MetricsResult(00*3600 + 13*60 + 01.000, 00*3600 + 13*60 + 01.000, 66.666, 4.3, 6.3, 5.3, 1, 50)) print 'Test1: Valid input' scenario = Scenario(1, 5.0, sys.path[0], '', 'example_gps.log', 'example_core.log', 4) FileWriter.createDataSheet(scenario, totalResult, twentyMinuteResults) print 'Results file printed successfully\n' """ EXPECTED: taken from first successful test Scenario ID: 1 Date: 2014-10-27 02:44:24.171000 Time taken: 0:00:00 GPS log files used: example_gps.log Asterisk file used: example_core.log Time offset: 4 Maximum radius of detection: 5.0 meters Overall testing results : Detection percentage: 60%