def main(argv): # Setup logging setup_log() logging.debug('Hello!') # Print usage if len(argv) <= 1: abort('USAGE: jm module [module args]') # Parse the arguments args = Args.Args(argv[1:]) parse_global_config(args.args) # Load the module module = args.margs[0] job = JobBinary(module) # Remove JM arguments when passing to the module margv = args.margs # Wrapper to include job module def run_wrapper(argv): return run(argv, job) # Run the module logging.info('Running module') r = job.spits_main(margv, run_wrapper) # Finalize logging.debug('Bye!')
def main(argv): # Print usage if len(argv) <= 1: abort('USAGE: jm module [module args]') # Parse the arguments args = Args.Args(argv[1:]) parse_global_config(args.args) # Setup logging setup_log() logging.debug('Hello!') # Enable memory debugging if jm_memstat == 1: memstat.enable() memstat.stats() # Load the module module = args.margs[0] job = JobBinary(module) # Remove JM arguments when passing to the module margv = args.margs # Keep a run identifier runid = [0] # Wrapper to include job module def run_wrapper(argv, jobinfo): runid[0] = runid[0] + 1 return run(argv, jobinfo, job, runid[0]) # Wrapper for the heartbeat finished = [False] def heartbeat_wrapper(): heartbeat(finished) # Start the heartbeat threading.Thread(target=heartbeat_wrapper).start() # Run the module logging.info('Running module') memstat.stats() r = job.spits_main(margv, run_wrapper) memstat.stats() # Stop the heartbeat thread finished[0] = True # Kill the workers if jm_killtms: killtms() # Print final memory report memstat.stats() # Finalize logging.debug('Bye!')
def main(argv): if len(argv) <= 1: abort('USAGE: tm [args] module [module args]') args = Args.Args(argv[1:]) parse_global_config(args.args) setup_log() logging.debug('Hello!') App(args).run() logging.debug('Bye!')
def main(): from Args import Args print("OutcomeStats.main()") # print(type(self.outcomeFormats())) args = Args('occurrence_qc.json', 'outcomeStats.xlsx', 'stats.ini') workbook = xlsxwriter.Workbook(args.getOutfile()) worksheet = workbook.add_worksheet() origin1 = [0, 0] origin2 = [5, 0] stats = OutcomeStats(workbook, worksheet, args, origin1, origin2)
def _cli_callable_from_params(params: List[str] = None) -> callable: args: Args = Args(params) if args.version: return api.version_method if args.clear_log: return api.clear_log_method if args.dump_log: return partial(api.dump_log_method, args.dump_log) return lambda: None
def test_non_existing_dir_raises_argument_type_error(self, tmp_path_as_cwd): with pytest.raises(SystemExit), pytest.raises(argparse.ArgumentTypeError), mock.patch('sys.stderr'): params = ['--dump-log', 'non_existing_dir'] Args(params)
def test_args_dump_log_defaults_to_cwd(self): cwd: Path = Path() params = ['--dump-log'] assert Args(params).dump_log == cwd
def test_args_dump_log_is_not_none_when_dump_log_dir_in_args_list(self): params = ['--dump-log', '.'] assert Args(params).dump_log is not None
def test_unrecognized_arguments_raises_system_exit(self): with pytest.raises(SystemExit), mock.patch('sys.stderr'): Args(['--foo'])
def test_args_param_property_is_true_when_param_in_args_list(self, params, param_property): args: Any = Args(params) assert param_property.getter(args)
def test_expected_params(self, params): assert Args(params)
for f in os.listdir(kron_dir): filename = os.fsdecode(f) if filename.endswith(".txt"): txt_files.append(filename) elif filename.endswith(".dat"): return utils.load_graph_list(os.path.join(kron_dir, filename)) G_list = [] for filename in txt_files: G_list.append( utils.snap_txt_output_to_nx(os.path.join(kron_dir, filename))) return G_list if __name__ == "__main__": args = Args() args_evaluate = Args_evaluate() parser = argparse.ArgumentParser(description="Evaluation arguments.") feature_parser = parser.add_mutually_exclusive_group(required=False) feature_parser.add_argument("--export-real", dest="export", action="store_true") feature_parser.add_argument("--no-export-real", dest="export", action="store_false") feature_parser.add_argument( "--kron-dir", dest="kron_dir", help="Directory where graphs generated by kronecker method is stored.", )
tf = item[1] dl = file[item[0]][1] TF = ((k1 + 1) * tf) / (k1 * (0.25 + 0.75 * dl / avdl) + tf) ### Add into Rank List if item[0] not in rank: rank[item[0]] = [0] * len(term) rank[item[0]][indx] = float(DF * TF * QF) ### Add into Vect List vect.append(time) indx += 1 return vect, rank args = Args.Args() vocb = Parse.ParseVocb(args.m) file, avdl = Parse.ParseFile(args.m, args.d) invt = Parse.ParseInvt(args.m) qury = Parse.ParseQury(args.i) print("") N = len(file) text = "query_id,retrieved_docs\n" for temp in qury: accm = {} ### Consider label k for k in range(1, 5):
def test_parse_global_config(self): args = Args.Args(['--timeout=10']) tm.parse_global_config(args.args) self.assertEqual(tm.tm_timeout, 10)
def test_args(self): args = Args.Args(['--timeout=10', 'module', 'arg1', 'arg2']) self.assertIn('timeout', args.args) self.assertEqual(args.args['timeout'], '10') self.assertSequenceEqual(['module', 'arg1', 'arg2'], args.margs)
def test_args_version_is_false_when_version_is_not_in_args_list(self): assert not Args([]).version
import xlsxwriter #import OutcomeFormats from OutcomeStats import * from OutcomeFormats import * from Args import * import argparse #import unittest #parser = argparse.ArgumentParser() #parser.add_argument('--i',default='occurrence_qc.json', help="Defaults to occurrence_qc.json if '--i' absent") #parser.add_argument('--o',default='outcomeStats.xlsx', help="Defaults to outcomeStats.xlsx if '--o' absent") #parser.add_argument('--c',default='stats.ini', help="Defaults to stats.ini if --c absent") #args = parser.parse_args() #outfile = args.o #args = parser.parse_args() args=Args('occurrence_qc.json', 'outcomeStats.xlsx', 'stats.ini') #Supply your favorite JSON output of FP-Akka as input. Do python3 statstest.py --help for help #tested against FP-Akka 1.5.2 JSON output with python3 if __name__=="__main__": ################################################### ############# First initialize resources ########## ################################################### # set input jason file from FPAkka or elsewhere # set output xlsx file # set stats.ini configurations args=Args('occurrence_qc.json', 'outcomeStats.xlsx', 'stats.ini') #load entire jason file. (Note: syntactically it is a Dictionary !!! ) with open(args.getInfile()) as data_file: fpAkkaOutput=json.load(data_file)
-xyz (if at least x,y have defaults) abreviations allowed (arguments = 'name', 'territorial') (acceptable, -na -ter) etc... unless ambiguious group = argparse.add_mutually_exclusive_group() group.add_argument... any arguments added, only 1 can be used or error .set_defaults(**kwargs) --> will become default arguments even if the arguments were not created with .add_argument .get_default('key') --> return the default of the key --- useage --- (above class saved in Args.py modual) import Args arg_parser = Args.Args() inputArgs = arg_parser.parse() inputArgs.ARGNAME #ie inputArgs.sample -------------------------------------------------------------------------------- -------------------------------------------------------------------------------- Looping for x,y in zip(l1,l2): --> loop through two items at once for k,v in dictionary.iteriteritems(): --> loop through keys and values in a dictionary xrange(0,10,2) --> range of 0 to 10, by 2s