def get_commandline_arguments(): description = "Script to get microhaplotype allele counts" epilog = "EXAMPLE: python main.py --bam in.bam --bed in.bed --info info.txt --out out.txt --mincov 0.03" parser = argparse.ArgumentParser(description=description, epilog=epilog) required_args_group = parser.add_argument_group('required arguments') required_args_group.add_argument( '-b', '--bam', dest='bam_file_path', required=True, type=lambda x: utils.is_valid_file(parser, x)) required_args_group.add_argument( '-e', '--bed', dest='bed_file_path', required=True, type=lambda x: utils.is_valid_file(parser, x)) parser.add_argument('-i', '--info', dest='info_file_path', type=lambda x: utils.is_valid_file(parser, x)) parser.add_argument('-o', '--out', dest='out_file_path') parser.add_argument( '-m', '--mincov', help='Allowed values 0-1', dest='min_coverage', default=0.02, type=lambda x: utils.is_valid_min_cov_value(parser, x)) return parser.parse_args()
def get_commandline_arguments(): description = "Script to compare different microhaplotype outputs" epilog = "EXAMPLE: python main.py --file1 file1.txt --file2 file2.json" parser = argparse.ArgumentParser(description=description, epilog=epilog) required_args_group = parser.add_argument_group('required arguments') required_args_group.add_argument('-f', '--file1', dest='file1_path', required=True, type=lambda x: utils.is_valid_file(parser, x)) required_args_group.add_argument('-i', '--file2', dest='file2_path', required=True, type=lambda x: utils.is_valid_file(parser, x)) return parser.parse_args()
def read_source(self, filepath=None): """ Reads source code and stores instructions a list :param filepath: obj """ if filepath: self.filename = filepath is_valid, file_ext = is_valid_file(self.filename) if not is_valid and file_ext != 'asm': raise AssertionError( f'Unsupported file type [{self.filename}]. Only accepting files ending in .asm' ) source = open(self.filename, 'r') lines = source.readlines() # Removes comments from instructions for i in range(len(lines)): lines[i] = re.sub(r'//\s*(\w.+)*', '', lines[i]) verify_indentation(lines[0], 0, source) self.micro_instr.append(lines[0].strip()) for i in range(1, len(lines)): if lines[i] != '\n': verify_indentation(lines[i], i, source) compare_indentation_between_lines(lines[i - 1], lines[i], i, source) self.micro_instr.append(lines[i].strip()) lines.clear() source.close()
def test_correct_pattern(self): data = {} data['sixteenth_per_beat'] = 4 data['beats_per_bar'] = 4 data['num_bars'] = 1 data['sixteenth_duration'] = 150 data['swing'] = False empty = [ [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]] ] data['beat_pattern'] = [empty, empty, empty, empty] data['swing_amount'] = 0.3 data['is_active'] = [1,1,1,1,1,1] with open('test.drum', 'w') as outfile: json.dump(data, outfile) self.assertTrue(utils.is_valid_file('test.drum')) os.remove('test.drum')
def run(): """Parse command line arguments and start workflow. """ parser = argparse.ArgumentParser(description='Run training of Siamese CBOW') parser.add_argument('corpus_name', type=str, choices=['simple'], help='Name of the corpus to use for training. simple: one tokenized sentence per line') parser.add_argument('corpus_file', type=lambda x: is_valid_file(parser, x), help='Path to the data file of the chosen corpus.') parser.add_argument('output_file', type=str, help='Path to file where trained embeddings should be written to.' 'All command line arguments can be used in the name.' 'Example: siamese_kisti_dim{dim}_ep{epochs}_neg{neg_sampling}.w2v') parser.add_argument('-dim', nargs='?', type=int, default=100, help='Dimension of the trained embeddings.') parser.add_argument('-min_count', nargs='?', type=int, default=1, help='Learn only embeddings for words with count >= min_count.') parser.add_argument('-neg_sampling', nargs='?', type=int, default=2, help='Number of negative sentences sampled.') parser.add_argument('-epochs', nargs='?', type=int, default=500, help='Number of training epochs.') parser.add_argument('-batch_size', nargs='?', type=int, default=128, help='Size of training batches.') parser.add_argument('-verbose', nargs='?', type=int, choices=[0, 1, 2], default=2, help='Verbosity of output.') parser.add_argument('-init_weights', type=lambda x: is_valid_file(parser, x), help='Read the weights from this word2vec formatted file to initialize embeddings. Use this to ' 'resume previous training or optimize embeddings from other sources') args = parser.parse_args() args = vars(args) if args['verbose'] > 0: print('got command line arguments:') print(args) workflow(**args)
def main(): # Create peers # Parse argument parser = argparse.ArgumentParser(description='Read in number of seeders.') parser.add_argument( '-seed', action='store_true', help='should these peers seed or download? (default: True)') parser.add_argument('-num', type=int, default=3, required=True, help='how many peers should we spin up? (default: 3)') parser.add_argument('-tor', type=lambda x: is_valid_file(parser, x), required=True, help='What torrent file should we use?') parser.add_argument( '-dest', required=True, help='What is the name of the file you want to seed/download??') parser.add_argument('-log', required=True, help='What should we name the logFile?') parser.add_argument( '-db', action='store_true', default=False, help='print out commands for debugging? (default: False)') parser.add_argument("-u_rate", type=int, help="Upload rate in kb/s") parser.add_argument("-d_rate", type=int, help="Download rate in kb/s") args = parser.parse_args() seed, num_seeders, file = args.seed, args.num, args.tor file_dest, log, debug = args.dest, args.log, args.db u_rate, d_rate = args.u_rate, args.d_rate # if seed: # assert is_valid_file(parser, file_dest), "Seeders require the file: " + file_dest # else: # os.system("rm " + file_dest) peer = "torrents/" if seed: peer = "" for i in range(num_seeders): dockerRun = "sudo docker run -d --name {} --network host kraken".format( log) murderClient = '"python murder_client.py' args = '--ip localhost --responsefile {} --saveas {}{} --max_upload_rate {} --max_download_rate {}"'.format( file, peer, file_dest, u_rate, d_rate) CMD = "{} {} {}".format(dockerRun, murderClient, args) if debug: print(CMD) else: os.system(CMD)
def parse_args(): logging_levels = [ "notset", "debug", "info", "warning", "error", "critical" ] parser = argparse.ArgumentParser( description="Analyze dependencies for input Python file.") parser.add_argument("dirpath", type=str, help="directory path to analyze") parser.add_argument("filepath", type=str, help="python file path to analyze") parser.add_argument("-l", "--logging_level", type=str, default="error", choices=set(logging_levels), help="logging level") parser.add_argument( "-s", "--search_imports", action='store_true', help= "flag to search local machine and check if all dependencies are installed" ) parser.add_argument("-g", "--render_graph", action='store_true', help="flag to render dependency graph") parser.add_argument("-u", "--mark_unused", action='store_true', help="flag to mark unused dependencies") args = parser.parse_args() render_graph = args.render_graph if not utils.is_valid_dir(args.dirpath): print("\n[Command Line Error] Invalid directory \"{dirpath}\".".format( dirpath=args.dirpath), file=sys.stderr) elif not utils.is_valid_file(args.filepath): print("\n[Command Line Error] Invalid file \"{filepath}\".".format( filepath=args.filepath), file=sys.stderr) else: logging_level = logging_levels.index(args.logging_level) * 10 config = Config(logging_level=logging_level, resolve_all_imports=not args.search_imports, render_graph=args.render_graph, mark_unused=args.mark_unused) dependency_analyzer = DependencyAnalyzer(config) dependency_analyzer.run(args.dirpath, args.filepath)
def main(): parser = argparse.ArgumentParser( description='Reads in the json of log locations.') parser.add_argument( '-i', type=lambda x: is_valid_file(parser, x), default="default.json", help='Identity_file for the host you would like to get logs from.') parser.add_argument( '-f', type=lambda x: is_valid_file(parser, x), default="default.json", help='Pass in a JSON detailing where to fetch the logs from') parser.add_argument( '-db', action='store_true', help='prints out commands instead of executing to debug') args = parser.parse_args() file, debug = args.f, args.db # Aggregate logs with open(file) as f: logDir = json.load(f) id = '' if "identity_file" in logDir: id = logDir.pop("identity_file") for host in logDir: for name in logDir[host]: logAgg = "mkdir -p logs && sudo docker logs {} >> logs/{}.txt".format( name, name) logCMD = 'ssh {} {} "{}"'.format(id, host, logAgg) if debug: print(logCMD) else: os.system(logCMD) logCPY = 'mkdir -p logs/logs_{} && scp {} {}:logs/{}.txt logs/logs_{}/{}.txt'.format( host, id, host, name, host, name) if debug: print(logCPY) else: os.system(logCPY)
def open_existing_file(self): # Display a QFileDialog to select the data file data_path = QtWidgets.QFileDialog.getOpenFileName( caption="Select your data file", filter="*.json")[0] if data_path and is_valid_file(data_path): self.data_path = data_path set_data_path(self.data_path) self.update_data_path() self.setup_tabs() else: #launch invalid file format dialog. QtWidgets.QMessageBox.critical( self, "Couldn't read the file", "Please select a file with the valid format or create a new one." )
def test_missing_attribute(self): data = {} data['sixteenth_per_beat'] = 4 data['beats_per_bar'] = 4 data['num_bars'] = 1 data['sixteenth_duration'] = 150 data['swing'] = False data['beat_pattern'] = [ [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]] ] with open('test.drum', 'w') as outfile: json.dump(data, outfile) self.assertFalse(utils.is_valid_file('test.drum')) os.remove('test.drum')
def main(): parser = argparse.ArgumentParser( description='Extract data from Daedalus project', ) parser.add_argument('src_path', type=lambda src_path: is_valid_file(parser, src_path), help='path to .src file') parser.add_argument('--reset-voices', action='store_true', help='regenerate voices.json') parser.add_argument('-v', '--verbose', action='store_true', help='display parsing progress') args = parser.parse_args() src_helper = SrcHelper(args.src_path) files_paths = src_helper.get_daedalus_files() data_sniffer = DataSniffer() for i, file_path in enumerate(files_paths, start=1): if args.verbose: print(f'\r{i}/{len(files_paths)} {file_path}') try: data_sniffer.sniff(file_path) except UnicodeDecodeError: print('UnicodeDecodeError') save_to_file(data_sniffer.get_dialogues_data(), settings.DIALOGUES_JSON_PATH) generator = VoiceConfigGenerator(args.verbose) generator.generate(settings.VOICES_JSON_PATH, data_sniffer.get_npc_data())
def import_file(file_name): """Take the json in the given file and import all global constants.""" global sixteenth_per_beat global beats_per_bar global num_bars global sixteenth_duration global swing global beat_pattern global is_active global swing_amount if not utils.is_valid_file('saved_beats/' + file_name + '.drum'): message.set_message("Error") return False reset_global_timer() sixteenth_per_beat, beats_per_bar, num_bars, sixteenth_duration, swing, beat_pattern, is_active, swing_amount = utils.import_file( 'saved_beats/' + file_name + '.drum') set_graphics() message.set_message("Imported") resize_window() return True
def get_commandline_arguments(self): description = "Script to execute nightly tests for microhaplotype validation" epilog = "EXAMPLE: python main.py --scenariosdir /path/to/scenarios/dir " \ "--mhjarlocation /path/to/microhaplotyper.jar --outdir /path/to/output/dir" parser = argparse.ArgumentParser(description=description, epilog=epilog) parser.add_argument('-i', '--scenariosdir', dest='scenariosdir', default=self.DEFAULT_SCENARIOS_DIR, type=lambda x: utils.is_valid_dir(parser, x)) parser.add_argument('-j', '--mhjarlocation', dest='mhjarlocation', default=self.DEFAULT_JAR_LOCATION, type=lambda x: utils.is_valid_file(parser, x)) parser.add_argument('-o', '--outdir', dest='outdir', default=self.DEFAULT_OUTPUT_DIR, type=lambda x: utils.is_valid_dir(parser, x)) return parser.parse_args()
new_fasta_file.write(seq) currentLine = nextLine if currentLine == '': break _fasta_file.close() new_fasta_file.close() if __name__ == '__main__': import subprocess, commands, shutil, sys, os from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter # # Checking arguments parser = ArgumentParser(description=__doc__, formatter_class=ArgumentDefaultsHelpFormatter) parser.add_argument("-f", "--file", dest = "file", type = lambda arg: is_valid_file(parser, arg), help = "The input fasta file name") parser.add_argument("-p", "--path", dest = "path", type = lambda arg: is_valid_file(parser, arg), help = "The path to e2p2 program") parser.add_argument("-o", "--out", dest = "output", type = str, default = "[Input fasta filename without its extension].pf", help = "The output file name") args = parser.parse_args() if args.file is None or args.path is None: parser.print_help() exit(0) #
if args.is_training: print("Beginning training of the {} model!".format(args.model)) classifier.train(input_fn=lambda:mnist_input_fn(train_data, train_labels, num_epochs=config["num_epochs"])) print("Training finished!") if args.prune_weights: print("Pruning weights with {} percentile.".format(config["pruning_percentile"])) pruned_model_dir = prune_weights(args.model_dir, config["pruning_percentile"], plot_hist=False) classifier = tf.estimator.Estimator(model_fn=model_fn, model_dir=pruned_model_dir, params=params) eval_results = classifier.evaluate(input_fn=lambda:mnist_input_fn(eval_data, eval_labels)) print(eval_results) if __name__ == "__main__": parser = argparse.ArgumentParser(description='Bayes By Backprop models') parser.add_argument('--model', choices=list(models.keys()), default='baseline', help='The model to train.') parser.add_argument('--no_training', action="store_false", dest="is_training", default=True, help='Should we just evaluate?') parser.add_argument('--model_dir', type=lambda x: is_valid_file(parser, x), default='/tmp/bayes_by_backprop', help='The model directory.') parser.add_argument('--prune_weights', action="store_true", dest="prune_weights", default=False, help='Should we do weight pruning during evaluation.') args = parser.parse_args() run(args)
type=open, help='Path to the config JSON file.') parser.add_argument('--model', choices=list(models.keys()), default='cnn', help='The model to train.') parser.add_argument('--no_training', action="store_false", dest="is_training", default=True, help='Should we just evaluate?') parser.add_argument('--build_ac_dict', action="store_true", dest="build_ac_dict", default=False, help='Should we build the Arithmetic Coding dictionary?') parser.add_argument('--model_dir', type=lambda x: is_valid_file(parser, x), default='/tmp/miracle_compress_clic', help='The model directory.') parser.add_argument('--train_stage', type=int, default=0) args = parser.parse_args() run(config_path=args.config, model_key=args.model, is_training=args.is_training, build_ac_dict=args.build_ac_dict, model_dir=args.model_dir, train_stage=args.train_stage)
' in seconds, located in root-> input folder') parser.add_argument( 'Output_file', nargs='?', metavar='OUTPUT_FILE', default='sessionization.txt', help='(Optional) Ouput file name created in root->output folder.' ' Default name is sessionization.txt') args = parser.parse_args() """ Check validity of input and output files """ # Input log file path logfile_path = os.path.join(thispath, os.pardir, 'input', args.Input_log) # sanity check on input log file (existence and extension) utils.is_valid_file(logfile_path, '.csv') # Output file path sessionfile_path = os.path.join(thispath, os.pardir, 'output', args.Output_file) # sanity check on output file (extension only) utils.check_extension(sessionfile_path, '.txt') # get inactivity period timeout_delta = utils.get_timeout(thispath, args.Input_inactivity) """ Each session has a start datetime and stop datetime and document count We will have a dictionary of ip address contaning these information ordered by stop datetime """ open_sessions = OrderedDict()
def test_wrong_extension(self): self.assertFalse(utils.is_valid_file('drum.dum'))
def test_nonexistent_file(self): self.assertFalse(utils.is_valid_file('awdawd.drum'))
def main(): # Parse arguments parser = argparse.ArgumentParser(description='Read in Json Configuration.') parser.add_argument('--config', type=lambda x: is_valid_file(parser, x), default="default.json", help='Pass in a JSON detailing how to run experiment') parser.add_argument( '-db', action='store_true', help='prints out commands instead of executing to debug') parser.add_argument( '-shutdown', action='store_true', help= 'shuts down all docker containers on hosts in config. Only flag after experiment is finished. Will shutdown all docker containers on the host, not just ones run in experiments.' ) args = parser.parse_args() global debug global workers configFile, debug = args.config, args.db workers = [] # Parse argument JSON with open(configFile) as f: config = json.load(f) if debug: print(config) global id id = '' if config["identity_file"]: id = "-i {}".format(config["identity_file"]) # List of all hosts hosts = [config["tracker_host"] ] + config["seeder_hosts"] + config["leecher_hosts"] if args.shutdown: runAllHosts("shutdown.sh", hosts, supress=True) return # Generate torrents if debug: print("Generating torrents") genTorrents(config) wait(workers) # Copy torrents to all the hosts copy('~/torrents', hosts[1:], dir=True) wait(workers) # Copy setup.sh and run it on all hosts runAllHosts("setup.sh", hosts, supress=True) wait(workers) # Start tracker if debug: print("Generating tracker") tracker(config) if debug: print("Generating peers") logDir = {host: [] for host in hosts} gen_peers(config, logDir) if debug: print("Saving logs") if id: logDir["identity_file"] = id saveLogs(logDir) if debug: print("{} workers".format(len(workers))) # Waits on all threads to finish before cleaning wait(workers) runAllHosts("clean.sh", hosts)
import numpy as np import os from pprint import pprint from support import Support from encoding import WsiEncoding from utils import write_xml from argconfigparser import ArgumentConfigParser from utils import is_valid_file # parse arguments parser = ArgumentConfigParser('./fsorparameters.yml', description='FSOR') parser.add_argument("-i", '--query_encoding_path', dest="query_encoding_path", required=True, help="query_encoding_path", metavar="FILE_PATH", type=lambda x: is_valid_file(parser, x)) config = parser.parse_args() pprint(f'CONFIG: \n{config}') # create support support = Support(datasource=config['datasource'], model_path=config['model_path'], labels=config['labels']) # get prototype, threshold and anchors prototype, threshold, anchors = support.prototype() # load wsi encoding wsienc = WsiEncoding(None) wsienc.load(config['query_encoding_path']) # get encoding coordinates enckeys = wsienc._encoding['vectors'].keys()