def execute(device, train_loader, val_loader, epochs, learning_rate, momentum, dropout, step_size, gamma, l1, l2): """ Execute the four model types. """ results = {x: {} for x in ['plain', 'l1', 'l2', 'l1_l2']} # Without L1 and L2 regularization print('\nTraining model without L1 and L2 regularization...') results['plain']['loss'], results['plain']['accuracy'], results['plain'][ 'incorrect'] = run_model(device, train_loader, val_loader, epochs, learning_rate, momentum, dropout, step_size, gamma) # With L1 regularization print('\nTraining model with L1 regularization...') results['l1']['loss'], results['l1']['accuracy'], results['l1'][ 'incorrect'] = run_model(device, train_loader, val_loader, epochs, learning_rate, momentum, dropout, step_size, gamma, l1=l1) # With L2 regularization print('\nTraining model with L2 regularization...') results['l2']['loss'], results['l2']['accuracy'], results['l2'][ 'incorrect'] = run_model(device, train_loader, val_loader, epochs, learning_rate, momentum, dropout, step_size, gamma, l2=l2) # With L1 and L2 regularization print('\nTraining model with L1 and L2 regularization...') results['l1_l2']['loss'], results['l1_l2']['accuracy'], results['l1_l2'][ 'incorrect'] = run_model(device, train_loader, val_loader, epochs, learning_rate, momentum, dropout, step_size, gamma, l1=l1, l2=l2) return results
def run_model_and_mlm(input_file, output_file, n_processors, model_params, test_mode): model_params = model_params + STATIC_PARAMS run_model( input_file, output_file, n_processors, model_params=ModelParamType(*model_params), test_mode=test_mode, ) mean_squared_error = run_multilevel_analysis(input_file, output_file) print(f"Mean squared error: {mean_squared_error}") return mean_squared_error
def main(): unbuffered = os.fdopen(sys.stdout.fileno(), 'w', 0) sys.stdout = unbuffered # Ignore SettingWithCopy warnings pd.options.mode.chained_assignment = None try: configFile = sys.argv[1] except IndexError: configFile = 'ToyNewConfig.txt' dataConfig, modelConfig = loadConfig(configFile) # Check if a model is being read from file if modelConfig['InputDirectory'] == "": (train_data, test_data), dataConfig = load_data(dataConfig) modelConfig['NumFeatures'] = train_data.X.shape[1] ytrain_pred, ytrain_true, ytest_pred, ytest_true = run_model( modelConfig, dataConfig, train_data, test_data) # Write predictions to csv pd.DataFrame( { '{}_prediction'.format(train_data.config['Target']): ytrain_pred.values }, index=train_data.X.index).sort_index(level=0).swaplevel( -2, -1).to_csv(train_data.config['OutputInSample']) pd.DataFrame( { '{}_prediction'.format(train_data.config['Target']): ytest_pred.values }, index=test_data.X.index).sort_index(level=0).swaplevel( -2, -1).to_csv(test_data.config['OutputOutSample']) else: (data, ), dataConfig = load_data(dataConfig, new_model=False) modelConfig['NumFeatures'] = data.X.shape[1] ypred = load_model(modelConfig, dataConfig, data) pd.DataFrame({ 'alpha': ypred.values }, index=data.X.index).sort_index(level=0).swaplevel(-2, -1).to_csv( data.config['AlphaDirectory']) print 'Wrote predictions to csv'
def run_webserver_with_params(parameter_csv, row_number): if not os.path.isfile(parameter_csv): print(f"No such file {parameter_csv}") return df = pd.read_csv(parameter_csv, sep=",") row = df.iloc[row_number] model_param_dict = dataclasses.asdict(DEFAULT_MODEL_PARAMS) for k in row.keys(): if k in model_param_dict: model_param_dict[k] = row[k] run_model( "../../classes_input/test_input_short.csv", f"../../classes_output/output{datetime.datetime.now().strftime('%Y-%m-%d_%H%M%S')}.csv", 1, all_classes=False, webserver=True, model_params=ModelParamType(**model_param_dict), )
def run_best_model(x, DEBUG=False): NUM_CORES = 48 NUM_EPOCHS = 50 train_data, test_data, id1_train, id1_test = load_data(debug=DEBUG) args = { 'num_layers': int(x[0]), 'hidden_size': int(x[1]), 'learning_rate': x[2], 'keep_prob': x[3], 'batch_size': int(x[4]) if not DEBUG else 1, 'lr_decay': x[5], 'init_scale': x[6], 'max_grad_norm': int(x[7]), 'decay_epoch': 200, 'num_features': train_data.num_features } ytrain_pred, ytrain_true, ytest_pred, ytest_true = run_model( args, train_data, test_data, id1_train, id1_test, NUM_EPOCHS, NUM_CORES, DEBUG, final_run=True) # Write predictions to csv pd.DataFrame( { '{}_prediction'.format(train_data.config['Target']): ytrain_pred.values }, index=train_data.X.index).sort_index(level=0).swaplevel(-2, -1).to_csv( train_data.config['OutputInSample']) pd.DataFrame( { '{}_prediction'.format(train_data.config['Target']): ytest_pred.values }, index=test_data.X.index).sort_index(level=0).swaplevel(-2, -1).to_csv( test_data.config['OutputOutSample']) print 'Wrote predictions to csv'
def run_best_model(x, DEBUG): train_data, test_data, id1_train, id1_test = load_data(debug=DEBUG) NUM_CORES = 48 args = { 'num_layers': int(x[0]), 'hidden_size': int(x[1]), 'learning_rate': x[2], 'keep_prob': x[3], 'num_steps': int(x[4]), 'init_scale': x[5], 'max_grad_norm': int(x[6]), 'decay_epoch': int(x[7]), 'lr_decay': x[8], 'num_features': train_data.num_features } ytrain_pred, ytrain_true, ytest_pred, ytest_true = run_model( args, train_data, test_data, id1_train, id1_test, 200, NUM_CORES, DEBUG, final_run=True) # Write predictions to csv pd.DataFrame( { 'id1': id1_train.id1.values, '{}_prediction'.format(train_data.config['Target']): ytrain_pred }, index=train_data.X.index).to_csv(train_data.config['OutputInSample']) pd.DataFrame( { 'id1': id1_test.id1.values, '{}_prediction'.format(train_data.config['Target']): ytest_pred }, index=test_data.X.index).to_csv(test_data.config['OutputOutSample']) print 'Wrote predictions to csv'
def objective(x): global train_data, test_data, id1_train, id1_test, DEBUG, NUM_CORES, NUM_EPOCHS args = { 'num_layers': int(x[0]), 'hidden_size': int(x[1]), 'learning_rate': x[2], 'keep_prob': x[3], 'num_steps': int(x[4]), 'init_scale': x[5], 'max_grad_norm': int(x[6]), 'decay_epoch': NUM_EPOCHS, 'lr_decay': 1.0, 'num_features': train_data.num_features } _, _, ypred, ytrue = run_model(args, train_data, test_data, id1_train, id1_test, NUM_EPOCHS, NUM_CORES, DEBUG) return compute_mse(ytrue, ypred)
results_file.write( "Model\tF1-score\tAUROC\tWeighted F1\tPrecision\tRecall\tAccuracy\tAUPRC\n" ) preds_path = '/path/Augmentation-for-Literary-Data/results/predictions/' + args.model + '_ELMo' + str( args.elmo) + '_EDA' + str( args.eda) + '_preds_for_Case_' + args.scenario + '.tsv' print( "Model = {} | Scenario = {} | EDA = {} | CDA = {} | ELMo = {} | SaveModel = {} | OutputResults = {} | SavePredictions = {}\n" .format(args.model, args.scenario, args.eda, args.cda, args.elmo, args.save_model, results_path, preds_path)) if args.eda: # run with EDA f1, auroc, w_f1, precision, recall, accuracy, auprc, preds = run_model( name=args.model, case=args.scenario, augmentation='EDA', use_elmo=args.elmo, save_model=args.save_model) elif args.cda: # run with CDA pass else: # run wihtout any Data Augmentation f1, auroc, w_f1, precision, recall, accuracy, auprc, preds = run_model( name=args.model, case=args.scenario, augmentation=None, use_elmo=args.elmo, save_model=args.save_model)
'rmtpp_mse_var': None, 'inference_models': None, } per_model_count['true'] = event_count_preds_true for model_name in model_names: print( "--------------------------------------------------------------------" ) args.current_model = model_name print("Running", model_name, "Model\n") model, count_dist_params, rmtpp_var_model, results \ = run.run_model(dataset_name, model_name, dataset, args, results, prev_models=per_model_save, run_model_flags=run_model_flags) #if model_name == 'count_model': # count_all_means_pred = count_dist_params['count_all_means_pred'] # count_all_sigms_pred = count_dist_params['count_all_sigms_pred'] #per_model_count[model_name] = count_all_means_pred per_model_save[model_name] = model #if model_name == 'rmtpp_mse' and args.extra_var_model: # per_model_save['rmtpp_var_model'] = rmtpp_var_model #print("Finished Running", model_name, "Model\n") #if model_name != 'inference_models' and per_model_count[model_name] is not None:
def generateSong(args): args_fake = ArgumentParserWannabe() args_fake.train = 'sample' args_fake.data_dir = '' args_fake.num_epochs = 1 args_fake.ckpt_dir = '' args_fake.set_config = 'song_generator.p' args_fake.override = False args_fake.ran_from_script = True args_fake.warm_len = args.warm_len if args.temperature==0 and (args.model=='seq2seq' or args.model=='duet'): args_fake.temperature = None else: args_fake.temperature = args.temperature sys.stdout = open(os.devnull, "w") if len(args.real_song) != 0: args_fake.warmupData = '/data/full_dataset/handmade/' + args.real_song ckpt_modifier = '' if args.ckpt_num==-1 else ('model.ckpt-'+str(args.ckpt_num)) if args.model=='seq2seq': args_fake.model = 'seq2seq' args_fake.ckpt_dir = '/data/another/seq2seq_25_2/'+ckpt_modifier paramDict = {'meta_embed':160, 'embedding_dims':100, 'keep_prob':0.8, 'attention_option':'bahnadau', 'bidirectional':False} with open(args_fake.set_config,'wb') as f: pickle.dump(paramDict, f) generated = run_model(args_fake) elif args.model=='char': args_fake.model = 'char' args_fake.ckpt_dir = '/data/another/char_50_2/'+ckpt_modifier paramDict = {'meta_embed':160, 'embedding_dims':20, 'keep_prob':0.8} with open(args_fake.set_config,'wb') as f: pickle.dump(paramDict, f) generated = run_model(args_fake) elif args.model=='cbow': args_fake.model = 'cbow' args_fake.ckpt_dir = '/data/another/cbow_ckpt/model.ckpt-8' paramDict = {'meta_embed':100, 'embedding_dims':60, 'keep_prob':0.8} with open(args_fake.set_config,'wb') as f: pickle.dump(paramDict, f) generated = run_model(args_fake) elif args.model=='duet': args_fake.model = 'seq2seq' args_fake.ckpt_dir = '/data/another/seq2seq_duet/'+ckpt_modifier args_fake.meta_map = 'full_dataset/duet_processed/vocab_map_meta.p' args_fake.music_map = 'full_dataset/duet_processed/vocab_map_music.p' args_fake.warmupData = '/data/full_dataset/duet_processed/checked' paramDict = {'meta_embed':160, 'embedding_dims':100, 'keep_prob':0.8, 'attention_option':'bahnadau', 'bidirectional':False} with open(args_fake.set_config,'wb') as f: pickle.dump(paramDict, f) generated = run_model(args_fake).replace('%','\n') generated = generated.replace('<start>','').replace('<end>','') long_num = re.findall('[0-9][0-9]+', generated) for longint in long_num: generated = generated.replace(longint, longint[0]) sys.stdout = sys.__stdout__ print '-'*50 print generated
'pred_dmat': np.array(pred_dmat), 'pixel_idx': pixel_idx, 'site_idx': site_idx, 'ls_beta_inits': ls_beta_inits, 'beta_inits': beta_inits } print print 'MODEL: {m}'.format(m=data['desc']) print M = run_model(c['mtype'], c['msubtype'], model_params, data['desc'], niter=c['niter'], nburnin=c['nburnin'], nthin=c['nthin'], nchains=c['nchains'], burn_till_tuned=c['burn_till_tuned']) data_to_pickle = { 'ls_dmat': ls_dmat, 'dmat': dmat, 'pred_dmat': pred_dmat, 'coords_grid': data['coords_grid'], 'counts': counts } output_dir = os.path.join('./run/', c['mtype'], c['msubtype'], data['desc'])
default=None) # the number of components in the 1st dimension pa.add_argument('--h_num', type=int, default=None) # the number of components in the 2nd dimension pa.add_argument('--d_num', type=int, default=None) # the number of compoents in the 3rd dimension pa.add_argument('--tr_bat', type=int, default=None) # the batch size in training phase pa.add_argument('--GPU_str', type=str, default=None) # 'yes'/'no', whether use GPU pa.add_argument('--da_str', type=str, default=None) # the name of the dataset pa.add_argument( '--aug_str', type=str, default=None ) # 'yes'/'no', whether use classical augmentation technologies pa.add_argument('--test_once', type=int, default=None) # how many times do you test after iteraing pa.add_argument('--tim_num', type=int, default=None) # running number ar = pa.parse_args() run.run_model(w_num=ar.w_num, h_num=ar.h_num, d_num=ar.d_num, sam_size=ar.sam_size, tim_num=ar.tim_num, tr_bat=ar.tr_bat, te_bat=1000, GPU_str=ar.GPU_str, iter_num=20000, test_once=ar.test_once, da_str=ar.da_str, aug_str=ar.aug_str)
import sys import argparse import pickle from run import run_model parser = argparse.ArgumentParser() parser.add_argument('--model', help='Name of the model to be run', required=True) parser.add_argument('--elmo', help='Use ELMo embeddings', action="store_true") # uses GloVe embeddings otherwise parser.add_argument('--save_preds', help='Save predictions for Error Analysis', action="store_true") parser.add_argument('--save_model', help='Save model weights & vocabulary', action="store_true") args = parser.parse_args() results_path = '/home/ndg/users/sbagga1/generalization/results/'+args.model+'_ELMo'+str(args.elmo)+'.tsv' if os.path.exists(results_path): sys.exit("Results file already exists: " + results_path) print("Model = {} | ELMo = {} | SavePreds = {} | SaveModel = {} | Output Filename = {}".format(args.model, args.elmo, args.save_preds, args.save_model, results_path)) f1, precision, recall, accuracy, auprc, preds, n_epochs = run_model(name=args.model, use_elmo=args.elmo, save_predictions=args.save_preds, save_model=args.save_model) # Write to TSV: results_file = open(results_path, "w") results_file.write("Model\tF1-score\tPrecision\tRecall\tAccuracy\tAUPRC\n") results_file.write(args.model+'_ELMo'+str(args.elmo)+'_'+str(n_epochs)+'\t'+str(f1)+'\t'+str(precision)+'\t'+str(recall)+'\t'+str(accuracy)+'\t'+str(auprc)+'\n') if args.save_preds: preds_path = '/home/ndg/users/sbagga1/generalization/predictions/' with open(preds_path+args.model+'_ELMo'+str(args.elmo)+'.pickle', 'wb') as f: pickle.dump(preds, f)
from run import run_model import logging logging.basicConfig(level=logging.DEBUG) if __name__ == "__main__": localhost = 'tcp://127.0.0.1:5678' cui_aw = 'tcp://10.129.132.192:9999' cui = 'tcp://160.36.59.189:5000' kirsten = 'tcp://160.36.56.211:9898' ehsan = 'tcp://10.129.132.192:8801' run_model(project='WECC_DBW_RealTime', model='Real_Time', raw='WECC_10%Wind_PSSE_RAWCoords.raw', dyr='WECC_10%Wind_PSSE_DYR.dyr', path='C:/RT-LABv11_Workspace_New/', server=localhost)
print( "Model = {} | ELMo = {} | SavePreds = {} | SaveModel = {} | OutFname = {}". format(args.model, args.elmo, args.save_preds, args.save_model, results_path)) CONTEXTS = [ ('question', False), ('reply_text', False), ('reply_text', True) ] # second value of tuple is for double_input | double_input = True means that preceding comment should be considered AGREEMENTS = ['confidence-60', 'confidence-100'] for (context, double_input) in CONTEXTS: for conf in AGREEMENTS: f1, auroc, w_f1, precision, recall, accuracy, auprc, preds, n_epochs = run_model( args.model, context, conf, double_input=double_input, use_elmo=args.elmo, save_predictions=args.save_preds, save_model=args.save_model) results_file.write( str(float(conf.split('-')[1]) / 100) + '\t' + context + '_' + str(double_input) + '\t' + args.model + '_' + str(args.elmo) + '_' + str(n_epochs) + '\t' + str(f1) + '\t' + str(auroc) + '\t' + str(w_f1) + '\t' + str(precision) + '\t' + str(recall) + '\t' + str(accuracy) + '\t' + str(auprc) + '\n') if args.save_preds: with open( error_path + args.model + str(args.elmo) + '_' + context + '_' + str(double_input) + '_conf_' + conf.split('-')[1] + '.pickle', 'wb') as f: pickle.dump(preds, f)
from run import run_model import logging logging.basicConfig(level=logging.DEBUG) if __name__ == "__main__": localhost = 'tcp://127.0.0.1:5678' cui_aw = 'tcp://10.129.132.192:9999' cui = 'tcp://160.36.59.189:5000' kirsten = 'tcp://160.36.56.211:9898' ehsan = 'tcp://10.129.132.192:8801' cui_ltb7 = 'tcp://160.36.56.211:9900' prod = 'tcp://160.36.58.82:' run_model(project='WECC_DBW_RealTime_Area_2_4', model='Real_Time_Area_2_4', raw='WECC_10%Wind_PSSE_RAWCoords.raw', dyr='WECC_10%Wind_PSSE_DYR.dyr', path='C:/RT-LABv11_Workspace_New/', server=cui_ltb7)
############ DATA & TRANSFORMS from data import get_data from device import get_device device = get_device(force_cpu=False) train_loader, test_loader = get_data(device, batch_size=64, data='cifar10') ##################### MODEL from model import NetCifar2 from torchsummary import summary model = NetCifar2().to(device) summary(model, input_size=(3, 32, 32)) ##################### RUN MODEL from run import run_model epochs = 20 regularization = {'l1_factor': 0, 'l2_factor': 0} model, train_trackers, test_trackers, incorrect_samples = run_model( model, train_loader, test_loader, epochs, device, **regularization) # test_trackers['test_losses'] ####################
from run import run_model import logging logging.basicConfig(level=logging.DEBUG) localhost = 'tcp://127.0.0.1:5678' if __name__ == "__main__": run_model(project='IEEE39Acq', model='phasor01_IEEE39', raw='39b_R1.raw', dyr='39b.dyr', path='C:/RT-LABv11_Workspace_New/', server=localhost)
# -*- coding: utf-8 -*- """ Created on Wed Apr 29 08:26:19 2020 @author: pc """ import run # pass parameters to the script run.run_model(w_num=3, h_num= 1, sam_size=5, tim_num=0, tr_bat=2, te_bat=1000, GPU_str='no', iter_num=100000, test_once=3, da_str='chest', aug_str='yes')
from run import run_model import logging logging.basicConfig(level=logging.DEBUG) if __name__ == "__main__": localhost = 'tcp://127.0.0.1:5678' cui_aw = 'tcp://10.129.132.192:9999' cui = 'tcp://160.36.59.189:5000' kirsten = 'tcp://160.36.56.211:9898' ehsan = 'tcp://10.129.132.192:8801' run_model(project='WECC_Wind_Inertia_Siqi', model='phasor03_PSSE', raw='Curent02_final_ConstZCoords.raw', dyr='Curent02_final_Wind.dyr', path='C:/RT-LABv11_Workspace_New/', server=localhost)
import dlc_bci as bci import argparse from util.configuration import get_args, get_model, setup_log from util.data_util import * from run import run_model, test_model, run_k_fold, train_model import math opt = get_args(argparse.ArgumentParser()) log = setup_log(opt) train_̇input, train_̇target = bci.load(root='./data', train=True, store_local=True, one_khz=opt['one_khz']) test_input, test_target = bci.load(root='./data', train=False, store_local=True, one_khz=opt['one_khz']) split = math.floor(train_̇input.size()[0]/ opt['k_fold']) train_dataset = Dataset(opt, train_̇input, train_̇target, log, 'train') test_dataset = Dataset(opt, test_input, test_target, log, 'test') log.info('[Data loaded.]') model = get_model(opt, train_dataset.input_size()) testing_accuracy = run_model(model, train_dataset, test_dataset, opt, log)
# -*- coding: utf-8 -*- """ Created on Wed Apr 29 08:26:19 2020 @author: pc """ import run # pass parameters to the script run.run_model(w_num=1, h_num=1, d_num=1, sam_size=5, tim_num=0, tr_bat=1, te_bat=1000, GPU_str='no', iter_num=100000, test_once=3, da_str='ibsr', aug_str='no')