def main(argv): print(argparse) print(type(argparse)) parser = argparse.argumentparser() # required arguments: parser.add_argument( "type", help = "what type of mission are you going to do.\n\ supported: compare loss_curve acc_curve data_range" ) parser.add_argument( "output_dir", help = "the name of output dir to store the results." ) parser.add_argument( "--results_name", help = "what results are you going to plot or compare.\n \ supported: best_acc test_acc train_acc test_loss train_loss" ) parser.add_argument( "--config_name", help = "what configs are you going to show.\n \ example: all bw group hard " ) parser.add_argument( "--file_range", nargs='+', help = "the date range of input file to read the results." ) args = parser.parse_args() print(args.file_range) dirlist = os.listdir('./') print(dirlist) configs = pd.dataframe() print(configs) results = pd.dataframe() print(results) count = 0 epoch_num = 0 for dir in dirlist: if dir.startswith('log_quantize'): date = dir[13:21] #print(date) if date >= args.file_range[0] and date <= args.file_range[1]: #print('date ok') filelist = os.listdir(dir) #print(filelist) for filename in filelist: if filename.endswith('.log'): filepath = os.path.join(dir,filename) print(filepath) if not os.path.exists(filepath): print(filepath) exit("wrong filename") epoch_num = process_file(filepath, configs, results, count) count += 1 configs.to_csv(os.path.join(args.output_dir, 'all_configs.csv')) results.to_csv(os.path.join(args.output_dir, 'all_results.csv')) # check configs and generate values #print(count) #print(results) configs = configs.loc[:, (configs != configs.iloc[0]).any()] #print(configs) trainloss = results[['%dtrainloss'%i for i in range(epoch_num+1)]] trainacc = results[['%dtrainacc'%i for i in range(epoch_num+1)]] testloss = results[['%dtestloss'%i for i in range(epoch_num+1)]] testacc = results[['%dtestacc'%i for i in range(epoch_num+1)]] values = {'train_loss': trainloss, 'train_acc': trainacc, 'test_loss': testloss, 'test_acc': testacc} # output datas with args if args.type == 'compare': # compare best acc throught table if args.results == 'best_acc': output = pd.concat(configs, results[['bestacc','bestepoch']]) print(output) output.to_csv(os.path.join(args.output_dir,'compare_best.csv')) # compare one curve throught figure elif args.results.endswith('acc') or args.results.endswiht('loss'): values[args.results_name].plot(kind='line',marker='o') # compare all curves throught figures else: exit("can't compare all results of different configs") # plot one curve of each config onto oue figure elif args.type == 'loss_curve': pass elif args.type == 'acc_curve': pass elif args.type == 'data_range': pass return
import argparse from sys import exit parser = argparse.argumentparser() parser.add_argument('--reset', action='store_const', const=true, default=none) parser.add_argument('--cores', '-c') parser.add_argument('--mem', '-m') parser.add_argument('--phoronix', action='store_const', const=true, default=none) parser.add_argument('--entry', '-i') parser.add_argument('--exit', '-o') args = parser.parse_args() print(args, "\n\n") if not (args.reset or (args.entry and args.exit) or args.phoronix): print("usage..") exit(1) print("a okay!\n\n") if args.reset: print("reseting..\n") if args.phoronix:
exit_code=1): if print_message: print(message) if log_message: journal.send("[switcher.py] " + message) if quit: exit(exit_code) # All done, return. return None # Main. # Create a parser for the command line arguments. argument_parser = argumentparser( description= "Switches between X11 virtual desktops and starts designated software on demand.", allow_abbrev=False) argument_parser.add_argument( "-d", "--desktop", type=str, help="Switch to desktop DESKTOP, as defined in the config.", required=True) argument_parser.add_argument("--config", type=str, default=expanduser("~") + "/.config/switcher.conf", help="Set the config file location.") # Parse the arguments. arguments = argument_parser.parse_args()
def arg_parse(): import argparse parser = argparse.argumentparser( description='フィルタされるツイートを表示する') parser.add_argument('globpath', help='読み込むgzipファイルのglobpath') return parser.parse_args()
import os from ripple.ledger import ledgernumber from ripple.util import file from ripple.util import log from ripple.util import prettyprint from ripple.util import range from ripple.util.function import function name = 'ledgertool' version = '0.1' none = '(none)' _parser = argparse.argumentparser( prog=name, description='retrieve and process ripple ledgers.', epilog=ledgernumber.help, ) # positional arguments. _parser.add_argument( 'command', nargs='*', help='command to execute.' ) # flag arguments. _parser.add_argument( '--binary', action='store_true', help='if true, searches are binary - by default linear search is used.',
query = "{{{keyword} {{{values}}}}}".format(keyword=keyword, values=values) # Query the API. reply = http_post(url, json={"query": query}) # Get the JSON-formatted version of the reply and extract the station information. reply_json = reply.json() reply_data = reply_json["data"][keyword] # All done, return. return reply_data # Main. # Create a parser for the command line arguments. argument_parser = argumentparser( description="Returns the status of selected HSL city bike stations.", allow_abbrev=False) argument_parser.add_argument( "-s", "--stations", type=str, help= "Return the status of one or more space-separated STATIONs. Only station number is accepted.", nargs="*", required=True) argument_parser.add_argument("--hide-empty", action="store_true", help="Hide stations that are empty.") argument_parser.add_argument("--hide-unavailable", action="store_true", help="Hide stations that are unavailable.")
def main(): parser = argparse.argumentparser() parser.add_argument('--gpu', '-g', type=int, default=0, help='gpu id (negative value indicates cpu)') parser.add_argument('--model-setup', required=true, help='model setup dictionary.') parser.add_argument('--lsh', action='store_true', default=false, help='if true, uses locally sensitive hashing \ (with k=10 nn) for nn search.') parser.add_argument('--interp_method', type=str, default='dknn', help='choose dknn, softmax, or grad') args = parser.parse_args() model, train, test, vocab, setup = setup_model(args) reverse_vocab = {v: k for k, v in vocab.items()} use_snli = false if setup['dataset'] == 'snli': # if snli, change colors and set flags converter = convert_snli_seq colors = 'piyg' use_snli = true else: converter = convert_seq colors = 'rdbu' with open(os.path.join(setup['save_path'], 'calib.json')) as f: calibration_idx = json.load(f) calibration = [train[i] for i in calibration_idx] train = [x for i, x in enumerate(train) if i not in calibration_idx] '''get dknn layers of training data''' dknn = dknn(model, lsh=args.lsh) dknn.build(train, batch_size=setup['batchsize'], converter=converter, device=args.gpu) '''calibrate the dknn credibility values''' dknn.calibrate(calibration, batch_size=setup['batchsize'], converter=converter, device=args.gpu) # opens up a html file for printing results. writes a table header to make it pretty with open(setup['dataset'] + '_' + setup['model'] + '_colorize.html', 'a') as f: f.write( '<table style="width:100%"> <tr> <th>method</th> <th>label</th> <th>prediction</th> <th>text</th> </tr>' ) # setup word importance ranking function depending on mode if args.interp_method == 'dknn' or args.interp_method == 'softmax': ranker = partial(leave_one_out, dknn, converter) elif args.interp_method == 'grad': ranker = partial(vanilla_grad, model, converter) use_cred = (args.interp_method == 'dknn') for i in range( len(test)): # generate interpretations for the whole test set if use_snli: prem, hypo, label = test[i] x = (prem, hypo) else: text, label = test[i] x = text label = label[0] prediction, original_score, scores = ranker( x, snli=use_snli, use_credibility=use_cred ) # get original score, and scores for all individual words sorted_scores = sorted(list(enumerate(scores)), key=lambda x: x[1]) # sort scores for each word print('label: {}'.format(label)) print('prediction: {} ({})'.format(prediction, original_score)) if use_snli: # print out inputs print('premise: ' + ' '.join(reverse_vocab[w] for w in prem)) print('hypothesis: ' + ' '.join(reverse_vocab[w] for w in hypo)) else: print(' '.join(reverse_vocab[w] for w in text)) for idx, score in sorted_scores: # print word importances if use_snli: print(score, reverse_vocab[hypo[idx]]) else: print(score, reverse_vocab[text[idx]]) print() print() # if using l10, get drop in score. normalized_scores = [] words = [] for idx, score in enumerate(scores): if args.interp_method == 'dknn' or args.interp_method == 'softmax': normalized_scores.append( score - original_score) # for l10 drop in score else: normalized_scores.append(score) # for grad its not a drop if snli: words.append(reverse_vocab[hypo[idx]]) else: words.append(reverse_vocab[text[idx]]) # flip sign if positive sentiment. i.e., for positive class, drop in score = red highlight. # for negative class, drop is score = blue highlight if not snli and prediction == 1: normalized_scores = [-1 * n for n in normalized_scores] if snli: normalized_scores = [-1 * n for n in normalized_scores ] # flip sign so green is drop # normalize scores across the words, doing positive and negatives seperately # final scores should be in range [0,1] 0 is dark red, 1 is dark blue. 0.5 is no highlight total_score_pos = 1e-6 # 1e-6 for case where all positive/neg scores are 0 total_score_neg = 1e-6 for idx, s in enumerate(normalized_scores): if s < 0: total_score_neg = total_score_neg + math.fabs(s) else: total_score_pos = total_score_pos + s for idx, s in enumerate(normalized_scores): if s < 0: normalized_scores[idx] = ( s / total_score_neg) / 2 # / by 2 to get max of -0.5 else: normalized_scores[idx] = (s / total_score_pos) / 2 normalized_scores = [0.5 + n for n in normalized_scores] # center scores visual = colorize(words, normalized_scores, colors=colors) # generate saliency map colors # setup html table row with snli results if snli: with open( setup['dataset'] + '_' + setup['model'] + '_colorize.html', 'a') as f: if label == 0: f.write('ground truth label: entailment') elif label == 1: f.write('ground truth label: neutral') elif label == 2: f.write('ground truth label: contradiction') if prediction == 0: f.write("prediction: entailment ({}) ".format( original_score)) elif prediction == 1: f.write("prediction: neutral ({}) ".format( original_score)) elif prediction == 2: f.write("prediction: contradiction ({}) ".format( original_score)) f.write("<br>") f.write(' '.join(reverse_vocab[w] for w in prem) + '<br>') f.write(visual + "<br>") f.write("<br>") # setup html table row with sentiment results else: with open( setup['dataset'] + '_' + setup['model'] + '_colorize.html', 'a') as f: f.write('<tr>') f.write('<td>') if args.interp_method == 'dknn': f.write('conformity leave-one-out') elif args.interp_method == 'softmax': f.write('confidence leave-one-out') else: f.write('vanilla gradient') f.write('</td>') f.write('<td>') if label == 1: f.write('label: positive') else: f.write('label: negative') f.write('</td>') f.write('<td>') if prediction == 1: f.write("prediction: positive ({0:.2f}) ".format( original_score)) else: f.write("prediction: negative ({0:.2f}) ".format( original_score)) f.write('</td>') f.write('<td>') f.write(visual) f.write('</td>') f.write('</tr>') # print nearest neighbor training data points for interpretation by analogy # neighbors = dknn.get_neighbors(x) # print('neighbors:') # for neighbor in neighbors[:5]: # curr_nearest_neighbor_input_sentence = ' ' # for word in train[neighbor][0]: # curr_nearest_neighbor_input_sentence += reverse_vocab[word] + ' ' # print(curr_nearest_neighbor_input_sentence) with open(setup['dataset'] + '_' + setup['model'] + '_colorize.html', 'a') as f: # end html table f.write('</table>')