l_tau = ['tau_dr', 'tau_ols', 'tau_ols_ps', 'tau_resid'] output = 'results/2019-10-30_' + exp_name + '.csv' l_scores = [] for args['citcio'] in [False, True]: for args['model'] in ["dlvm", "lrmf"]: for args['seed'] in range_seed: for args['prop_miss'] in range_prop_miss: for args['n'] in range_n: for args['p'] in range_p: t0 = time.time() score = exp_mi(**args) args['time'] = int(time.time() - t0) l_scores.append( np.concatenate((list(args.values()), score))) print('exp with ', args) print('........... DONE') print('in ', int(args["time"]), ' s \n\n') score_data = pd.DataFrame(l_scores, columns=list(args.keys()) + l_tau) score_data.to_csv(output + '_temp') print('saving ' + exp_name + 'at: ' + output) score_data.to_csv(output) print('*' * 20) print('Exp: ' + exp_name + ' succesfully ended.') print('*' * 20)
args['m'] = 10 print('starting exp: ' + exp_name) l_tau = ['tau_dr', 'tau_ols', 'tau_ols_ps'] output = 'results/2019-10-24_'+exp_name+'.csv' l_scores = [] for args['citcio'] in [False, True]: for args['model'] in ["dlvm","lrmf"]: for args['seed'] in range_seed: for args['prop_miss'] in range_prop_miss: for args['n'] in range_n: for args['p'] in range_p: t0 = time.time() score = exp_mi(**args) args['time'] = int(time.time() - t0) l_scores.append(np.concatenate((list(args.values()),score))) print('exp with ', args) print('........... DONE') print('in ', int(args["time"]) , ' s \n\n') score_data = pd.DataFrame(l_scores, columns=list(args.keys()) + l_tau) score_data.to_csv(output + '_temp') print('saving ' +exp_name + 'at: ' + output) score_data.to_csv(output) print('*'*20) print('Exp: '+ exp_name+' succesfully ended.') print('*'*20)
l_scores = [] for args['seed'] in range_seed: for args['citcio'] in [False, True]: for args['model'] in ["dlvm", "lrmf"]: for args['n'] in range_n: for args['sig_prior'] in range_sig_prior: for args['n_epochs'] in range_n_epochs: for args['prop_miss'] in range_prop_miss: for args['p'] in range_p: t0 = time.time() score = exp_miwae(**args) args['time'] = int(time.time() - t0) l_scores.append( np.concatenate( (list(args.values()), score))) print('exp with ', args) print('........... DONE') print('in ', int(args["time"]), ' s \n\n') score_data = pd.DataFrame( l_scores, columns=list(args.keys()) + l_tau) score_data.to_csv(output + '_temp') print('saving ' + exp_name + 'at: ' + output) score_data.to_csv(output) print('*' * 20) print('Exp: ' + exp_name + ' succesfully ended.') print('*' * 20)
type=str, default=default_args['subtask_id'], required=False, choices=['A', 'B'], help='subtask A or B (default: {})'.format( default_args['subtask_id'])) parser.add_argument('--no-cuda', action='store_true', default=False, help='disables CUDA training') # parser.add_argument('--dry-run', action='store_true', default=False, # help='quickly check a single pass') # parser.add_argument('--seed', type=int, default=1, metavar='S', # help='random seed (default: 1)') # parser.add_argument('--log-interval', type=int, default=10, metavar='N', # help='how many batches to wait before logging training status') # parser.add_argument('--save-model', action='store_true', default=False, # help='For Saving the current Model') args = parser.parse_args() # 获取用户输入的参数 # parser.print_help() for key in default_args.keys(): # 将输入的参数更新至 default_args if hasattr(args, key): default_args[key] = getattr(args, key) default_args['use_cuda'] = not args.no_cuda and torch.cuda.is_available() default_args['device'] = torch.device( 'cuda:0' if torch.cuda.is_available() else 'cpu') pprint.pprint(default_args)
tau = [] t0 = time.time() for m in range_m: tau.append(exp_mi(X_miss, w, y, m=m)) args['time'] = int(time.time() - t0) for i in range(len(tau)): l_scores.append(np.concatenate((['MI'], list(args.values()), [m], [None]*6, tau[i]))) # MF t0 = time.time() tau = exp_mf(X_miss, w, y) args['time'] = int(time.time() - t0) l_scores.append(np.concatenate((['MF'], list(args.values()), [None], [tau[-1]], [None]*5, tau[:-1]))) score_data = pd.DataFrame(l_scores, columns=['Method'] + list(args.keys()) + l_method_params + l_tau) print(score_data) # MDC range_d_miwae = [args['d']+x for x in range_d_offset] t0 = time.time() tau, params = exp_mdc(X_miss, w, y, range_d_miwae = range_d_miwae, range_sig_prior = range_sig_prior, range_num_samples_zmul = range_num_samples_zmul, range_learning_rate = range_learning_rate, range_n_epochs = range_n_epochs) args['time'] = int(time.time() - t0) for i in range(len(tau['MDC.process'])): l_scores.append(np.concatenate((['MDC.process'], list(args.values()), [None]*2, params[i], tau['MDC.process'][i]))) for i in range(len(tau['MDC.mi'])):
def build_parse(): parser = argparse.ArgumentParser( description='ECNU-SenseMaker single model') parser.add_argument( '--batch-size', type=int, default=default_args['batch_size'], metavar='N', help='input batch size for training (default: {})'.format( default_args['batch_size'])) parser.add_argument( '--test-batch-size', type=int, default=default_args['test_batch_size'], metavar='N', help='input batch size for testing (default: {})'.format( default_args['test_batch_size'])) parser.add_argument('--epochs', type=int, default=default_args['epochs'], metavar='N', help='number of epochs to train (default: {})'.format( default_args['epochs'])) parser.add_argument( '--fine-tune-epochs', type=int, default=default_args['fine_tune_epochs'], metavar='N', help='number of fine-tune epochs to train (default: {})'.format( default_args['fine_tune_epochs'])) parser.add_argument('--lr', type=float, default=default_args['lr'], metavar='LR', help='learning rate (default: {})'.format( default_args['lr'])) parser.add_argument('--fine-tune-lr', type=float, default=default_args['fine_tune_lr'], metavar='LR', help='fine-tune learning rate (default: {})'.format( default_args['fine_tune_lr'])) parser.add_argument('--adam-epsilon', type=float, default=default_args['adam_epsilon'], metavar='M', help='Adam epsilon (default: {})'.format( default_args['adam_epsilon'])) parser.add_argument('--max-seq-length', type=int, default=default_args['max_seq_length'], metavar='N', help='max length of sentences (default: {})'.format( default_args['max_seq_length'])) parser.add_argument('--subtask-id', type=str, default=default_args['subtask_id'], required=False, choices=['A', 'B'], help='subtask A or B (default: {})'.format( default_args['subtask_id'])) parser.add_argument('--with-lm', action='store_true', default=False, help='Add Internal Sharing Mechanism (LM)') parser.add_argument( '--with-kegat', action='store_true', default=False, help='Add Knowledge-enhanced Graph Attention Network (KEGAT)') parser.add_argument('--with-kemb', action='store_true', default=False, help='Add Knowledge-enhanced Embedding (KEmb)') parser.add_argument('--no-cuda', action='store_true', default=False, help='disables CUDA training') # parser.add_argument('--dry-run', action='store_true', default=False, # help='quickly check a single pass') parser.add_argument('--seed', type=int, default=1, metavar='S', help='random seed (default: 1)') # parser.add_argument('--save-model', action='store_true', default=False, # help='For Saving the current Model') args = parser.parse_args() # 获取用户输入的参数 torch.manual_seed(args.seed) for key in default_args.keys(): # 将输入的参数更新至 default_args if hasattr(args, key): default_args[key] = getattr(args, key) default_args['use_cuda'] = not args.no_cuda and torch.cuda.is_available() default_args['device'] = torch.device( 'cuda:0' if default_args['use_cuda'] else 'cpu') default_args['with_lm'] = args.with_lm default_args['with_kegat'] = args.with_kegat default_args['with_kemb'] = args.with_kemb return args