def custom_task(uuid, box, actions_list): ret = False try: folder = path.join(box["temp"], uuid) file_recording(uuid, box["vm"], path.join(folder, box["screen_recorder"])) virtual_machine = VirtualBox().find_machine(box["vm"]) with virtual_machine.create_session() as session: session.unlock_machine() file_recording(uuid, box["vm"], path.join(folder, box["screen_recorder"])) proc = virtual_machine.launch_vm_process(session, "headless", "") proc.wait_for_completion(timeout=-1) #do not timeout the session.console.guest.create_session timeout_ms=5*1000 (some vms) with session.console.guest.create_session(box["user"], box["pass"]) as gs: #fix issues with display if box["os"] == "Linux": gs.environment_schedule_set("DISPLAY", ":0") sleep(1) session.machine.recording_settings.enabled = True sleep(1) parse_actions(uuid, box, folder, session, gs, actions_list) sleep(1) session.machine.recording_settings.enabled = False take_screenshot(uuid, box["vm"], path.join(folder, box["screenshot"])) sleep(1) ret = True session.console.power_down() except Exception as e: log_string(uuid, "custom_task Failed {}".format(e), "Red") return ret
def save_embedding(column, dp, uid, save_path): print("Saving embedding") effective_examples = dp.get_n_examples( ) - dp.get_n_examples() % dp.shape()[0] embedding = np.zeros((effective_examples, column.top_shape()[-1])) labels = np.zeros((effective_examples), dtype=np.uint32) i = 0 mb_size = dp.shape()[0] for mb in dp.get_mb(): embedding[i:i + mb_size, :] = column.encode_mb(mb[0]) labels[i:i + mb_size] = mb[1] np.save(path.join(save_path, "embedding_level" + str(uid)), embedding) np.save(path.join(save_path, "embedding_labels_level" + str(uid)), labels)
def save(video,format='txt'): for case in switch(format): if case('xls'): import xlwt if len(video['comments']) > 0: wbk = xlwt.Workbook() sheet = wbk.add_sheet(tech.validate(video['title'])) bigs = wbk.add_sheet('Bigrams') tris = wbk.add_sheet('Trigrams') context = wbk.add_sheet('Context') for_nlp = tech.flatten(video['comments'][0]) for idx,comment in enumerate(video['comments'][0]): sheet.write(idx,0,' '.join(comment)) for idx,bigram in enumerate(tech.bigrams(for_nlp,self.term)): bigs.write(idx,0,' '.join(bigram)) for idx,trigram in enumerate(tech.trigrams(for_nlp,self.term)): tris.write(idx,0,' '.join(trigram)) for idx,con in enumerate(tech.context(for_nlp,self.term)): context.write(idx,0,' '.join(con)) wbk.save(tech.validate(video['title'])+'.xls') print 'Videos, trigrams, bigrams, and contexts saved to XLS files.' #indexing is zero-based, row then column break if case('txt'): if len(video['comments']) > 0: with open(path.join(dir_path, tech.validate(video['title'])),'a') as f: f.write(video['comments']) print 'Saved %s as text' % video['title'] break
def test_task(uuid, box): ret = False try: folder = path.join(box["temp"], uuid) virtual_machine = VirtualBox().find_machine(box["vm"]) with virtual_machine.create_session() as session: session.unlock_machine() proc = virtual_machine.launch_vm_process(session, "gui", "") proc.wait_for_completion(timeout=-1) with session.console.guest.create_session(box["user"], box["pass"]) as gs: if box["os"] == "Linux": gs.environment_schedule_set("DISPLAY", ":0") process, stdout, stderr = gs.execute("bin/ls") if len(stdout) > 0: ret = True elif box["os"] == "Windows": process, stdout, stderr = gs.execute( "%WINDIR%\\system32\\cmd.exe", ["/C", "dir"]) if len(stdout) > 0: ret = True session.console.power_down() except Exception as e: log_string(uuid, "custom_task Failed {}".format(e), "Red") return ret
def testing(): """ A test function for running the experiment manager. Calls all functions. :return: Nothing. """ # Quick scrip to run multiple consecutive experiments for x in range(10): path_1 = path.join(getcwd(), "..", "parameters/progsys.txt") new_string = "" with open(path_1, 'r') as readf: for line in readf: if "NOVELTY_FACTOR" in line: line = "NOVELTY_FACTOR: " + str(x) + "\n" elif "EXPERIMENT_NAME" in line: line = "EXPERIMENT_NAME: NoveltySearchLevi" + str(x) + "\n" new_string += line with open(path_1, "w") as writef: writef.write(new_string) # Setup run parameters. set_params(sys.argv[1:], create_files=False) # Check the correct parameters are set for this set of runs. check_params() # Execute multiple runs. execute_runs() # Save spreadsheets and all plots for all runs in the 'EXPERIMENT_NAME' # folder. parse_stats_from_runs(params['EXPERIMENT_NAME'])
def setup_clothing(size_factor, root_output, root_input): build_gen = tuple(init_build(path.join(root_input, "data/body_parts/humanoid.fa"))) build_act = tuple(parse_actions(path.join(root_input, "data/actions/humanoid.txt"))) output_images = path.join(root_output, "clothing") if not path.exists(output_images): makedirs(output_images) output_data = path.join(root_output, "clothing/data") if not path.exists(output_data): makedirs(output_data) resource_image_path = path.join(root_input, "images/characters/humanoid.unnamed/") appendage_files = {app: AppendageFile(path.join(output_data, app.bodypart + ".txt")) for app in build_gen} complete_file = path.join(output_images, "COMPLETE") if not path.exists(complete_file): p = ProgressBar(len(build_act) * len(build_gen), "Making pictures...") for app in build_gen: # Order is preserved when called with no change in between # joints = app.joints.keys() coords = app.joints.values() if app.bodypart[-2:] == "_l": image_filename = "".join((resource_image_path, app.bodypart[:-2] + "_r", ".png")) boxed_pixels, box, boxed_points = boxit(image_filename, points=coords) # box also fits now boxed_pixels = tint(boxed_pixels, 20) else: image_filename = "".join((resource_image_path, app.bodypart, ".png")) boxed_pixels, box, boxed_points = boxit(image_filename, points=coords) # box also fits now small_pixels, small_size, small_points = smallerize(boxed_pixels, size_factor, box, points=boxed_points) for act_struct in build_act: p.update() angle_list = act_struct.movements[app.bodypart] for angle in angle_list: output_name = path.join(output_images, "".join((app.bodypart, "_", str(angle), ".png"))) rotate_pixels, rotate_size, rotate_points = rotate( small_pixels, app.origin, angle, small_size, points=small_points ) # save(output_name, small_pixels, small_size)#rotate_pixels, rotate_size) appendage_files[app].add(angle, dict(parallel(joints, rotate_points))) for appfile in appendage_files.itervalues(): appfile.push() t = localtime() write(complete_file, "%d/%d/%d@%d:%d:%d" % (t.tm_year, t.tm_mon, t.tm_mday, t.tm_hour, t.tm_min, t.tm_sec)) p.close() else: pass
def process_vocab(self, vera_path): #Accerss all the vera.ai files under the given path and generate the superset - vocab vocab_path = join(vera_path, 'vocab.vera') vocab = set([]) for f in glob(path.join(vera_path, '*-vera.ai')): #for each vera.ai file create a set of words lines = open(f).readlines() if len(lines) > 0: vocab = vocab | set(lines) if path.exists(vocab_path): head, tail = path.split(vocab_path) tail = tail.replace(tail.rfind('.'), '-backup.') if path.exists(path.join(head, tail)): os.unlink(path.join(head, tail)) os.rename(vocab_path, path.join(head, tail)) hf = open(vocab_path, 'w') for item in vocab: hf.write(item) hf.close()
def restoreGame(self): if self.appSettings.conatains('pickleFilename'): self.appSettings.value('pickleFilename', type=str) with open( path.join( path.dirname(path.realpath(__file__)), self.appSettings.value('pickleFilename', type=str)), 'rb') as pickleFile: return load(pickleFile) else: self.logger.critical('No pickle Filename')
def create_wordcloud(self): file = open("snippet_twitter.txt", "r", encoding='utf8') text = file.read() currdir = path.dirname(__file__) # create numpy araay for wordcloud mask image mask = np.array(Image.open(path.join(currdir, "cloud.png"))) # create set of stopwords stopwords = set(STOPWORDS) # create wordcloud object wc = WordCloud(background_color="white", max_words=200, mask=mask, stopwords=stopwords) # generate wordcloud wc.generate(text) # save wordcloud wc.to_file(path.join(currdir, "static/img/h.jpg"))
def saveGame(self): if self.createLogFile: self.logger.debug("Saving Game") saveItem = () if self.appSetting.contain('pickleFilename'): with open( path.join( path.dirname(path.realpath(__file__)), self.appSettings.value('pickleFilename', type=str)), 'wb') as pickleFile: dump(saveItem, pickleFile) return load(pickleFile) else: self.logger.critical("No pickle Filename")
def stidy(structure, ang, d1, d2, d3): PLATON = find_executable('platon') if not PLATON: PLATON = '../bin/platon' with NamedTemporaryFile(suffix='.cif') as temp_file: # write temporary cif file CifWriter(structure).write_file(temp_file.name) temp_file.flush() # run ADDSYM_SHX to make PLATON recognize symmetries addsym_shx_process = Popen(['platon', '-o', temp_file.name], stdout=PIPE, stderr=STDOUT, stdin=PIPE) try: addsym_shx_process.communicate( input=b'ADDSYM_SHX {} {} {} {}'.format(ang, d1, d2, d3)) except TimeoutExpired as t: return ExitCode(408, 'ADDSYM_SHX timed out: {}'.format(t)) except Exception as e: return ExitCode(500, 'ADDSYM_SHX crashed: {}'.format(e)) # call STIDY on the ADDSYM_SHX output temp_file_dirname, temp_file_basename = path.split(temp_file.name) temp_file_basename_extless, _ = path.splitext(temp_file_basename) temp_file_basename_spf = temp_file_basename_extless + '_pl.spf' temp_file_spf = path.join(temp_file_dirname, temp_file_basename_spf) if not os.path.isfile(temp_file_spf): return ExitCode(500, 'ADDSYM_SHX failed to write *_pl.spf file') stidy_process = Popen(['platon', '-o', temp_file_spf], stdout=PIPE, stderr=STDOUT, stdin=PIPE) try: stidy_data = stidy_process.communicate(input=b'STIDY') except TimeoutExpired as t: return ExitCode(408, 'STIDY timed out: {}'.format(t)) except Exception as e: return Exitcode(500, 'STIDY crashed: {}'.format(e)) stidy_output = stidy_data[0].decode('utf-8') # clean up files if path.isfile('check.def'): remove('check.def') return stidy_output
def process_inputs(): dir = 'C:/Workspace/Bills/input' ext = '*-raw.txt' inputs = list() dirs = ['train', 'test'] for d1 in dirs: files = glob(join(dir, d1, ext)) for f in files: d = set([]) if path.isfile(f) == True: txtfile = open(f).readlines() raw = list() for line in txtfile: emails = regex_email.findall(line) if len(emails): for email in emails: raw.append(email) else: flag, txt = process_txt(line) if flag: if len(txt) > 2: raw.append(txt) if len(raw) > 0: for sentence in raw: emails = regex_email.findall(sentence) if len(emails): words = emails else: words = nltk.word_tokenize(sentence) d = d | set(words) sd = set(sorted(d)) vocab = set([]) porter = nltk.PorterStemmer() for stemming in sd: if not is_key_word(stemming): stemmed_word = porter.stem(stemming) else: stemmed_word = stemming vocab.add(stemmed_word) head, tail = path.split(f) find_idx = tail.rfind('.txt') if find_idx != -1: s = tail.replace('-raw.txt', '-input.txt') txtfilepath = path.join(head, s) txtf = open(txtfilepath, 'w+') for item in vocab: txtf.write(item) txtf.write('\n') txtf.close()
def run(): ext = '*.jpg' dir = 'C:/Workspace/Bills' ext_txt = 'txt' files = glob(join(dir, 'image', ext)) txtdir = path.join(dir, 'input') for f in files: if path.isfile(f) == True: img = Image.open(f) txt = pytesseract.image_to_string(img) head, tail = path.split(f) find_idx = tail.rfind('.jpg') new_tail = tail if find_idx != -1: new_tail = tail.replace('.jpg', '-raw.txt') txtfilepath = path.join(txtdir, new_tail) flag, raw = process_txt(txt) if flag: txtf = open(txtfilepath, 'w+') for i in raw: txtf.write(i) txtf.write('\n') txtf.close() err = 0
def check_classifications(main_directories, model_file_path, n_stars): from os import path from IPython.display import display for i in range(n_stars): star_sub_directories = [] for directory in main_directories: star_sub_directories.append(path.join(directory, str(i))) for star_directory in star_sub_directories: player_star_directories = glob(star_directory + '/*/') for player_dir in player_star_directories: image_paths, predictions = classify_images( player_dir, model_file_path) if image_paths != []: image_paths = np.array(image_paths) indices = np.where(predictions != i) pil_images = pil_images_from_paths(image_paths[indices], True) print(image_paths[indices]) for img in pil_images: display(img)
def parse_actions(uuid, box, folder, session, gs, _list): encoded_list = literal_eval(dumps(_list)) for action in encoded_list: find_set_update_item(mongo_settings_docker["worker_db"], mongo_settings_docker["worker_col_logs"], { "uuid": uuid, "actionslist.uuid": action["uuid"] }, {"$set": { "actionslist.$.status": "started" }}) ret = False process, stdout, stderr = None, None, None try: if action["type"] == "run": if action["input"]["application"] != "": if action["input"]["arguments"] != "": if box["os"] == "Windows": gs.process_create( action["input"]["application"], [action["input"]["application"]] + literal_eval(action["input"]["arguments"]), [], [ProcessCreateFlag(1)], 0) ret = True if box["os"] == "Linux": gs.process_create( "bin/sh", ["-c"] + [action["input"]["application"]] + literal_eval(action["input"]["arguments"]), [], [ProcessCreateFlag(1)], 0) ret = True else: if box["os"] == "Windows": gs.process_create(action["input"]["application"], [], [], [ProcessCreateFlag(1)], 0) ret = True if box["os"] == "Linux": gs.process_create("bin/sh", ["-c"] + [action["input"]["application"]], [], [ProcessCreateFlag(1)], 0) ret = True elif action["type"] == "runwithtimeout": if action["input"]["application"] != "" and action["input"][ "timeout"] != "": if action["input"]["arguments"] != "": process, stdout, stderr = gs.execute( action["input"]["application"], literal_eval(action["input"]["arguments"]), timeout_ms=int(action["input"]["timeout"]) * 1000) ret = True else: process, stdout, stderr = gs.execute( action["input"]["application"], timeout_ms=int(action["input"]["timeout"]) * 1000) ret = True elif action["type"] == "createfile": if action["input"]["filepath"] != "": if box["os"] == "Windows": process, stdout, stderr = gs.execute( "%WINDIR%\\system32\\cmd.exe", [ "/C", "call>{}".format( action["input"]["filepath"]) ]) ret = True if box["os"] == "Linux": process, stdout, stderr = gs.execute( "bin/sh", [ "-c", "touch {}".format( action["input"]["filepath"]) ]) ret = True elif action["type"] == "deletefile": if action["input"]["filepath"] != "": if box["os"] == "Windows": rocess, stdout, stderr = gs.execute( "%WINDIR%\\system32\\cmd.exe", ["/C", "del", action["input"]["filepath"]]) ret = True if box["os"] == "Linux": process, stdout, stderr = gs.execute( "bin/sh", [ "-c", "rm {}".format( action["input"]["filepath"]) ]) ret = True elif action["type"] == "downloadfromvm": if action["input"]["filepath"] != "" and action["input"][ "filename"] != "" and action["input"][ "filename"] not in box["reserved"]: progress = gs.file_copy_from_guest( action["input"]["filepath"], path.join(folder, action["input"]["filename"]), [FileCopyFlag(0)]) progress.wait_for_completion(timeout=-1) ret = True elif action["type"] == "wait": if action["input"]["timeout"] != "": sleep(int(action["input"]["timeout"])) ret = True elif action["type"] == "screenshot": if action["input"]["filename"] != "": take_screenshot( uuid, box["vm"], path.join(folder, action["input"]["filename"])) ret = True elif action["type"] == "uploadtovm": if action["input"]["filepath"] != "" and action["input"][ "filename"] != "" and action["input"][ "filename"] not in box["reserved"]: file = get_item_fs(mongo_settings_docker["malware"], {"uuid": action["uuid"]}) with open(path.join(folder, action["input"]["filename"]), "wb") as f: f.write(file) progress = gs.file_copy_to_guest( path.join(folder, action["input"]["filename"]), action["input"]["filepath"], [FileCopyFlag(0)]) progress.wait_for_completion(timeout=-1) ret = True elif action["type"] == "disablenetwork": if action["input"]["interface"] != "": session.machine.get_network_adapter( int(action["input"] ["interface"])).cable_connected = False ret = True elif action["type"] == "enablenetwork": if action["input"]["interface"] != "": session.machine.get_network_adapter( int(action["input"] ["interface"])).cable_connected = True ret = True if "saveoutput" in action["input"]: if action["input"]["saveoutput"] == "true": find_set_update_item( mongo_settings_docker["worker_db"], mongo_settings_docker["worker_col_logs"], { "uuid": uuid, "actionslist.uuid": action["uuid"] }, {"$set": { "actionslist.$.output": stdout }}) log_string( uuid, "Action uuid {} type {} returned {}".format( action["uuid"], action["type"], ret), "Green") except Exception as e: log_string( uuid, "parse_actions failed on action uuid {} type {} returned {} exception {}" .format(action["uuid"], action["type"], ret, e), "Red") sleep(1) find_set_update_item(mongo_settings_docker["worker_db"], mongo_settings_docker["worker_col_logs"], { "uuid": uuid, "actionslist.uuid": action["uuid"] }, {"$set": { "actionslist.$.status": ret }})
#!/usr/bin/python from sys import path,stdout path.append("/Users/burtnolej/Dev/pythonapps/util") from filesystem_util import read_dir, trim_justnotes_int_suffix,BadExtension from misc_util import Logger from tag_util import TaggedJSONFile from shutil import copyfile,copystat,copy2 from os import path #SOURCE_DIR = '/Users/burtnolej/Documents/Justnotes.Mar1' SOURCE_DIR = '/Users/burtnolej/Documents/Justnotes.test' TARGET_DIR = '/Users/burtnolej/Documents/Justnotes.new' #l = Logger('/tmp/log.txt') for abs_fn in read_dir(SOURCE_DIR): rel_fn = abs_fn.split("/")[-1] print 'cp',rel_fn, try: if abs_fn == trim_justnotes_int_suffix(abs_fn): new_abs_fn = path.join(TARGET_DIR,rel_fn) print 'ok' copy2(abs_fn,new_abs_fn) else: print 'skip' except BadExtension,e: print 'bad file extension',e
'stroke_opacity': 1.0, 'stroke_weight': 3, 'fill_color': map_colors[district.id], 'fill_opacity': .5, 'path': [], 'name': district.name } coord = ast.literal_eval(district.coordinates) for c in coord: pol['path'].append([c[1], c[0]]) polygons.append(pol) return polygons, legend if __name__ == '__main__': from os import path import os extra_dirs = ['static', 'templates'] extra_files = extra_dirs[:] for extra_dir in extra_dirs: for dirname, dirs, files in os.walk(extra_dir): for filename in files: filename = path.join(dirname, filename) if path.isfile(filename): extra_files.append(filename) app.run(host='0.0.0.0', extra_files=extra_files, debug=True, port=int(os.environ.get('PORT', 5000)))
def parse_stats_from_runs(experiment_name): """ Analyses a list of given stats from a group of runs saved under an "experiment_name" folder. Creates a summary .csv file which can be used by plotting functions in utilities.save_plot. Saves a file of the format: run0_gen0 run1_gen0 . . . run(n-1)_gen0 run0_gen1 run1_gen1 . . . run(n-1)_gen1 run0_gen2 run1_gen2 . . . run(n-1)_gen2 . . . . . . . . . . . . . . . . . . run0_gen(n-1) run1_gen(n-1) . . . run(n-1)_gen(n-1) run0_gen(n) run1_gen(n) . . . run(n-1)_gen(n) Generated file is compatible with utilities.save_plot.save_average_plot_across_runs() :param experiment_name: The name of a collecting folder within the ./results folder which holds multiple runs. :param graph: A boolean flag for whether or not to save figure. :return: Nothing. """ # Since results files are not kept in source directory, need to escape # one folder. file_path = path.join(getcwd(), "..", "results") # Check for use of experiment manager. if experiment_name: file_path = path.join(file_path, experiment_name) else: s = "scripts.parse_stats.parse_stats_from_runs\n" \ "Error: experiment name not specified." raise Exception(s) # Find list of all runs contained in the specified folder. runs = [run for run in listdir(file_path) if path.isdir(path.join(file_path, run))] # Place to store the header for full stats file. header = "" # Array to store all stats full_stats = [] # Get list of all stats to parse. Check stats file of first run from # runs folder. ping_file = path.join(file_path, str(runs[0]), "stats.tsv") # Load in data and get the names of all stats. stats = list(pd.read_csv(ping_file, sep="\t")) # Make list of stats we do not wish to parse. no_parse_list = ["gen", "total_inds", "time_adjust"] for stat in [stat for stat in stats if stat not in no_parse_list and not stat.startswith("Unnamed")]: # Iterate over all stats. print("Parsing", stat) summary_stats = [] # Iterate over all runs for run in runs: # Get file name file_name = path.join(file_path, str(run), "stats.tsv") # Load in data data = pd.read_csv(file_name, sep="\t") try: # Try to extract specific stat from the data. if list(data[stat]): summary_stats.append(list(data[stat])) else: s = "scripts.parse_stats.parse_stats_from_runs\n" \ "Error: stat %s is empty for run %s." % (stat, run) raise Exception(s) except KeyError: # The requested stat doesn't exist. s = "scripts.parse_stats.parse_stats_from_runs\nError: " \ "stat %s does not exist in run %s." % (stat, run) raise Exception(s) try: # Generate numpy array of all stats summary_stats = np.array(summary_stats) # Append Stat to header. header = header + stat + "_mean," summary_stats_mean = np.nanmean(summary_stats, axis=0) full_stats.append(summary_stats_mean) # Append Stat to header. header = header + stat + "_std," summary_stats_std = np.nanstd(summary_stats, axis=0) full_stats.append(summary_stats_std) summary_stats = np.transpose(summary_stats) # Save stats as a .csv file. np.savetxt(path.join(file_path, (stat + ".csv")), summary_stats, delimiter=",") # Graph stat by calling graphing function. save_average_plot_across_runs(path.join(file_path, (stat + ".csv"))) except FloatingPointError: print("scripts.stats_parser.parse_stats_from_runs\n" "Warning: FloatingPointError encountered while parsing %s " "stats." % (stat)) # Convert and rotate full stats full_stats = np.array(full_stats) full_stats = np.transpose(full_stats) # Save full stats to csv file. np.savetxt(path.join(file_path, "full_stats.csv"), full_stats, delimiter=",", header=header[:-1])
plt.rcParams.update({ "text.usetex": True, "font.family": "serif", "font.serif": ["Palatino"], "font.size": 10 }) # Constants hz2rps = 2 * np.pi rps2hz = 1 / hz2rps #%% File Lists import os.path as path pathBase = path.join('/home', 'rega0051', 'FlightArchive', 'Thor') #pathBase = path.join('G:', 'Shared drives', 'UAVLab', 'Flight Data', 'Thor') fileList = {} flt = 'FLT126' fileList[flt] = {} fileList[flt]['log'] = path.join(pathBase, 'Thor' + flt, 'Thor' + flt + '.h5') fileList[flt]['config'] = path.join(pathBase, 'Thor' + flt, 'thor.json') fileList[flt]['def'] = path.join(pathBase, 'Thor' + flt, 'thor_def.json') flt = 'FLT127' fileList[flt] = {} fileList[flt]['log'] = path.join(pathBase, 'Thor' + flt, 'Thor' + flt + '.h5') fileList[flt]['config'] = path.join(pathBase, 'Thor' + flt, 'thor.json') fileList[flt]['def'] = path.join(pathBase, 'Thor' + flt, 'thor_def.json')
def lineBreak(): # open the config file config = load(open(CONFIG_FILE, encoding='utf_8')) try: fileDirectory = config['fileDirectory'] # make sure the directory is valid if not path.exists(fileDirectory): if fileDirectory == '': fileDirectory = '.' else: print( 'Could not find the directory \"{}\", please check to make sure it is correct.' .format(fileDirectory)) sleep(2) exit('Exiting.') except: print('Could not find the file directory defined in config.json') sleep(2) exit('Exiting.') # load values from config.json try: charMax = int(config['maxCharacters']) breakCharacter = config['breakCharacter'] fileTypes = config['fileTypes'].split(', ') # if no file types are defined, exit the program if fileTypes == ['']: print('No file types have been defined in config.json.') sleep(2) exit('Exiting.') selectedType = config['chosenType'] skipCountCharacters = config['skipCountCharacters'].split(', ') if skipCountCharacters == ['']: skipCountCharacters = '' setEncoding = config['encoding'] cleanupEnabled = config['enableCleanup'] lineTypeIgnoreJson = config[selectedType] lineTypeIgnoreList = [] for key, value in lineTypeIgnoreJson.items(): lineTypeIgnoreList.append(value) except: print( 'Some values in config.json can not be found or are entered incorrectly.' ) sleep(2) exit('Exiting') for file in listdir(fileDirectory): if file.endswith(tuple(fileTypes)): # check and handle different encodings try: openFile = open(path.join(fileDirectory, file), 'r', encoding=setEncoding) except: try: openFile = open(path.join(fileDirectory, file), 'r', encoding='utf_8') print('Opened the file in utf-8 instead of {}.'.format( setEncoding)) except UnicodeDecodeError: print( 'Could not decode file \"{}\", set a different encoding in config.json' .format(file)) continue readFile = openFile.readlines() outputFile = open(OUTPUT_DIR + file, 'w', encoding=setEncoding) for line in readFile: if any(x in line for x in lineTypeIgnoreList): outputFile.write(line) else: # if cleanup is enabled in the config if cleanupEnabled == 'true': # remove existing break characters line = line.replace(breakCharacter, '') # replace double spaces with single spaces line = line.replace(' ', ' ') # replace those weird ellipses' line = line.replace('…', '...') # split the string into a list of words wordList = line.split(' ') characterCount = 0 finishedLine = '' for word in wordList: # count the characters in the word, add one for the space that was taken out characterCount += len(word) + 1 # if the word has characters than are set to be skipped, subtract them from the count if any(x in word for x in skipCountCharacters): for value in skipCountCharacters: if value in word: subtract = len(value) characterCount -= subtract # if the character count exceeds the maximum allowed if characterCount > charMax: finishedLine += breakCharacter characterCount = 0 characterCount += len(word) + 1 finishedLine += word continue finishedLine += ' ' + word # remove the space at the beginning of the string outputFile.write(finishedLine[1:]) openFile.close() outputFile.close() print('Successfully added line breaks to \"{}\".'.format(file)) else: print('File \"{}\" was skipped over.'.format(file)) continue
def __enter__(self): # Load the following assets. with open(path.join('..', 'assets', 'pingpong.assets')) as assets: self.VBOs = {}
self.preferencesGroup = (('logFile', self.logFileName), ) #write settings values. for setting, variableName in self.preferencesGroup: # if self.appSettings.contains(setting): self.appSettings.setValue(setting, variableName) self.close() def cancleClickedHandler(self): self.close() if __name__ == "__main__": app = QApplication(sys.argv) testApp = Board() testApp.show() sys.exit(app.exec_()) if __name__ == "__main__": QCoreApplication.setOrganizationName("Jazmine's Organization") QCoreApplication.setOrganizationDomain("jazminezation.com") QCoreApplication.setApplicationName("TicTacToe") appSettings = QSettings() startingFolderName = path.dirname(path.realpath(__file__)) if appSettings.contains('logFile'): logFileName = appSettings.value('logFile', type=str) else: logFileName = 'ticTactoe.log' appSettings.setValue('logFile', logFileName) basicConfig(filename=path.join(startingFolderName, logFileName), level=INFO, format='%(asctime)s %(name)-8s %(levelName)-8s %(message)s')
# See: https://docs.djangoproject.com/en/dev/ref/settings/#installed-apps INSTALLED_APPS = DJANGO_APPS + LOCAL_APPS ########## END APP CONFIGURATION AUTH_USER_MODEL = 'usermaster.UserMaster' ########## LOGGING CONFIGURATION # See: https://docs.djangoproject.com/en/dev/ref/settings/#logging # A sample logging configuration. The only tangible logging # performed by this configuration is to send an email to # the site admins on every HTTP 500 error when DEBUG=False. # See http://docs.djangoproject.com/en/dev/topics/logging for # more details on how to customize your logging configuration. LOG_DIR = path.join(DJANGO_ROOT, 'log') LOGFILE_SIZE = 5 * 1024 * 1024 LOGFILE_COUNT = 5 environ['LOG_LEVEL'] = 'DEBUG' LOG_FILE = path.join(LOG_DIR, 'rsc.log') LOGGING = { 'version': 1, 'disable_existing_loggers': False, 'formatters': { 'standard': { 'format': "[%(asctime)s] - %(levelname)s - [%(name)s.%(funcName)s:%(lineno)d] %(message)s", },
bi_prob = bigram_probs(string1, words, clean_text) string2 = np.random.choice(words, p=bi_prob) sol = string1 + ' ' + string2 for i in tqdm(range(n - 2)): tri_prob = trigram_probs(string1, string2, words, clean_text) new = np.random.choice(words, p=tri_prob) sol = sol + ' ' + new string1 = string2 string2 = new return sol np.seterr(divide='ignore', invalid='ignore') ruta = path.join(_.DIRNAME, 'files/listword.txt') rutaf = ruta.replace('\\', '/') with open(rutaf) as f: text = f.read() tokenizer = RegexpTokenizer(r'\w+') clean_text = np.array(tokenizer.tokenize(text.lower())) long_string = ' ' for i in clean_text: long_string = long_string + i + '' frequency = [] words = [] for word in set(clean_text): frequency.append(long_string.count(' ' + word + ' '))
path.insert(0, abspath(join(dirname(argv[0]), ".."))) path.insert(0, abspath(join(dirname(argv[0]), "..", 'Core'))) del path, argv, dirname, abspath, join from Core import Loader from Core import OpenData # Constants hz2rps = 2 * np.pi rps2hz = 1 / hz2rps #%% File Lists import os.path as path pathBase = path.join('/home', 'rega0051', 'FlightArchive', 'Huginn') #pathBase = path.join('G:', 'Shared drives', 'UAVLab', 'Flight Data', 'Huginn') fileList = {} flt = 'FLT03' fileList[flt] = {} fileList[flt]['log'] = path.join(pathBase, 'Huginn' + flt, 'Huginn' + flt + '.h5') fileList[flt]['config'] = path.join(pathBase, 'Huginn' + flt, 'huginn.json') fileList[flt]['def'] = path.join(pathBase, 'Huginn' + flt, 'huginn_def.json') flt = 'FLT04' fileList[flt] = {} fileList[flt]['log'] = path.join(pathBase, 'Huginn' + flt, 'Huginn' + flt + '.h5') fileList[flt]['config'] = path.join(pathBase, 'Huginn' + flt, 'huginn.json')
def parse_stats_from_runs(experiment_name): """ Analyses a list of given stats from a group of runs saved under an "experiment_name" folder. Creates a summary .csv file which can be used by plotting functions in utilities.save_plot. Saves a file of the format: run0_gen0 run1_gen0 . . . run(n-1)_gen0 run0_gen1 run1_gen1 . . . run(n-1)_gen1 run0_gen2 run1_gen2 . . . run(n-1)_gen2 . . . . . . . . . . . . . . . . . . run0_gen(n-1) run1_gen(n-1) . . . run(n-1)_gen(n-1) run0_gen(n) run1_gen(n) . . . run(n-1)_gen(n) Generated file is compatible with utilities.save_plot.save_average_plot_across_runs() :param experiment_name: The name of a collecting folder within the ./results folder which holds multiple runs. :param graph: A boolean flag for whether or not to save figure. :return: Nothing. """ # Since results files are not kept in source directory, need to escape # one folder. file_path = path.join(getcwd(), "..", "results") # Check for use of experiment manager. if experiment_name: file_path = path.join(file_path, experiment_name) else: s = "scripts.parse_stats.parse_stats_from_runs\n" \ "Error: experiment name not specified." raise Exception(s) # Find list of all runs contained in the specified folder. runs = [ run for run in listdir(file_path) if path.isdir(path.join(file_path, run)) ] # Place to store the header for full stats file. header = "" # Array to store all stats full_stats = [] # Get list of all stats to parse. Check stats file of first run from # runs folder. ping_file = path.join(file_path, str(runs[0]), "stats.tsv") # Load in data and get the names of all stats. stats = list(pd.read_csv(ping_file, sep="\t")) # Make list of stats we do not wish to parse. no_parse_list = ["gen", "total_inds", "time_adjust"] for stat in [ stat for stat in stats if stat not in no_parse_list and not stat.startswith("Unnamed") ]: # Iterate over all stats. print("Parsing", stat) summary_stats = [] # Iterate over all runs for run in runs: # Get file name file_name = path.join(file_path, str(run), "stats.tsv") # Load in data data = pd.read_csv(file_name, sep="\t") try: # Try to extract specific stat from the data. if list(data[stat]): summary_stats.append(list(data[stat])) else: s = "scripts.parse_stats.parse_stats_from_runs\n" \ "Error: stat %s is empty for run %s." % (stat, run) raise Exception(s) except KeyError: # The requested stat doesn't exist. s = "scripts.parse_stats.parse_stats_from_runs\nError: " \ "stat %s does not exist in run %s." % (stat, run) raise Exception(s) try: # Generate numpy array of all stats summary_stats = np.array(summary_stats) # Append Stat to header. header = header + stat + "_mean," summary_stats_mean = np.nanmean(summary_stats, axis=0) full_stats.append(summary_stats_mean) # Append Stat to header. header = header + stat + "_std," summary_stats_std = np.nanstd(summary_stats, axis=0) full_stats.append(summary_stats_std) summary_stats = np.transpose(summary_stats) # Save stats as a .csv file. np.savetxt(path.join(file_path, (stat + ".csv")), summary_stats, delimiter=",") # Graph stat by calling graphing function. save_average_plot_across_runs(path.join(file_path, (stat + ".csv"))) except FloatingPointError: print("scripts.stats_parser.parse_stats_from_runs\n" "Warning: FloatingPointError encountered while parsing %s " "stats." % (stat)) # Convert and rotate full stats full_stats = np.array(full_stats) full_stats = np.transpose(full_stats) # Save full stats to csv file. np.savetxt(path.join(file_path, "full_stats.csv"), full_stats, delimiter=",", header=header[:-1])
# 日志文件配置读取测试 import logging import logging.config from sys import path folder = "C:\project\python\config" #log_path = "logging.conf" log_path = f"{folder}/logging.conf" log_file_path = path.join(path.dirname(path.abspath(__file__)), log_path) #logging.config.fileConfig(log_file_path) logging.config.fileConfig(log_path) #logging.config.fileConfig(fname='logtest.conf', disable_existing_loggers=False) logs = logging.getLogger('error') logs.error('errorsssss')
def move(self, dirPath): gamma.move(meta=self.meta, out=dirPath) self.meta = path.join(dirPath, self.meta)
# plt.rcParams.update({ # "text.usetex": True, # "font.family": "serif", # "font.serif": ["Palatino"], # "font.size": 10 # }) # Constants hz2rps = 2 * np.pi rps2hz = 1 / hz2rps #%% File Lists import os.path as path # pathBase = path.join('/home', 'rega0051', 'FlightArchive', 'Thor') pathBase = path.join('G:', 'Shared drives', 'UAVLab', 'Flight Data', 'Thor') fileList = {} flt = 'FLT126' fileList[flt] = {} fileList[flt]['log'] = path.join(pathBase, 'Thor' + flt, 'Thor' + flt + '.h5') fileList[flt]['config'] = path.join(pathBase, 'Thor' + flt, 'thor.json') fileList[flt]['def'] = path.join(pathBase, 'Thor' + flt, 'thor_def.json') flt = 'FLT127' fileList[flt] = {} fileList[flt]['log'] = path.join(pathBase, 'Thor' + flt, 'Thor' + flt + '.h5') fileList[flt]['config'] = path.join(pathBase, 'Thor' + flt, 'thor.json') fileList[flt]['def'] = path.join(pathBase, 'Thor' + flt, 'thor_def.json') flt = 'FLT128'