def evaluate_from_model(model_dir, multi_flag=False, eval_data_all=False): """ Evaluating interface. 1. Retreive the flags 2. get data 3. initialize network 4. eval :param model_dir: The folder to retrieve the model :param multi_flag: The switch to turn on if you want to generate all different inference trial results :param eval_data_all: The switch to turn on if you want to put all data in evaluation data :return: None """ # Retrieve the flag object print("Retrieving flag object for parameters") if (model_dir.startswith("models")): model_dir = model_dir[7:] print("after removing prefix models/, now model_dir is:", model_dir) print(model_dir) flags = load_flags(os.path.join("models", model_dir)) flags.eval_model = model_dir # Reset the eval mode flags.backprop_step = eval_flags.backprop_step if flags.data_set == 'ballistics': flags.test_ratio = 0.001 elif flags.data_set == 'sine_wave': flags.test_ratio = 0.005 elif flags.data_set == 'robotic_arm': flags.test_ratio = 0.2 elif flags.data_set == 'sine_test_1d': flags.test_ratio = 0.05 flags.batch_size = 1 # For backprop eval mode, batchsize is always 1 flags.lr = 0.05 flags.eval_batch_size = eval_flags.eval_batch_size flags.train_step = eval_flags.train_step # Get the data train_loader, test_loader = data_reader.read_data( flags, eval_data_all=eval_data_all) print("Making network now") # Make Network ntwk = Network(Backprop, flags, train_loader, test_loader, inference_mode=True, saved_model=flags.eval_model) print("number of trainable parameters is :") pytorch_total_params = sum(p.numel() for p in ntwk.model.parameters() if p.requires_grad) print(pytorch_total_params) # Evaluation process print("Start eval now:") if multi_flag: pred_file, truth_file = ntwk.evaluate( save_dir='/work/sr365/multi_eval/Backprop/' + flags.data_set, save_all=True) else: pred_file, truth_file = ntwk.evaluate() # Plot the MSE distribution plotMSELossDistrib(pred_file, truth_file, flags) print("Evaluation finished")
def evaluate_from_model(model_dir): """ Evaluating interface. 1. Retreive the flags 2. get data 3. initialize network 4. eval :param model_dir: The folder to retrieve the model :return: None """ # Retrieve the flag object if (model_dir.startswith("models")): model_dir = model_dir[7:] print("after removing prefix models/, now model_dir is:", model_dir) print("Retrieving flag object for parameters") flags = flag_reader.load_flags(os.path.join("models", model_dir)) flags.eval_model = model_dir # Reset the eval mode # Get the data train_loader, test_loader = data_reader.read_data(flags) print("Making network now") # Make Network ntwk = Network(Forward, flags, train_loader, test_loader, inference_mode=True, saved_model=flags.eval_model) # Evaluation process print("Start eval now:") pred_file, truth_file = ntwk.evaluate() # Plot the MSE distribution plotMSELossDistrib(pred_file, truth_file, flags) print("Evaluation finished")
def evaluate_from_model(model_dir, multi_flag=False, eval_data_all=False, test_ratio=None): """ Evaluating interface. 1. Retreive the flags 2. get data 3. initialize network 4. eval :param model_dir: The folder to retrieve the model :param eval_data_all: The switch to turn on if you want to put all data in evaluation data :return: None """ # Retrieve the flag object print("Retrieving flag object for parameters") if (model_dir.startswith("models")): model_dir = model_dir[7:] print("after removing prefix models/, now model_dir is:", model_dir) flags = helper_functions.load_flags(os.path.join("models", model_dir)) flags.eval_model = model_dir # Reset the eval mode flags.batch_size = 1 flags.backprop_step=300 flags.eval_batch_size=2048 if test_ratio is None: flags.test_ratio = get_test_ratio_helper(flags) else: # To make the test ratio swipe with respect to inference time # also making the batch size large enough flags.test_ratio = test_ratio # Get the data train_loader, test_loader = data_reader.read_data(flags, eval_data_all=eval_data_all) print("Making network now") # Make Network ntwk = Network(make_cINN_and_NA, flags, train_loader, test_loader, inference_mode=True, saved_model=flags.eval_model) #print(model_dir) print("number of trainable parameters is :") pytorch_total_params = sum(p.numel() for p in ntwk.model_cINN.parameters() if p.requires_grad) print(pytorch_total_params) pytorch_total_params = sum(p.numel() for p in ntwk.model_NA.parameters() if p.requires_grad) print(pytorch_total_params) # Evaluation process print("Start eval now:") if multi_flag: pred_file, truth_file = ntwk.evaluate(save_dir='/work/sr365/NIPS_multi_eval_backup/multi_eval/hybrid_cINN_NA_0bp/'+flags.data_set, save_all=True) else: pred_file, truth_file = ntwk.evaluate() # Plot the MSE distribution if flags.data_set != 'meta_material' and not multi_flag: plotMSELossDistrib(pred_file, truth_file, flags) print("Evaluation finished")
def evaluate_from_model(model_dir, multi_flag=False, eval_data_all=False): """ Evaluating interface. 1. Retreive the flags 2. get data 3. initialize network 4. eval :param model_dir: The folder to retrieve the model :param eval_data_all: The switch to turn on if you want to put all data in evaluation data :return: None """ # Retrieve the flag object print("Retrieving flag object for parameters") if (model_dir.startswith("models")): model_dir = model_dir[7:] print("after removing prefix models/, now model_dir is:", model_dir) flags = helper_functions.load_flags(os.path.join("models", model_dir)) flags.eval_model = model_dir # Reset the eval mode # Set up the test_ratio if flags.data_set == 'ballistics': flags.test_ratio = 0.1 elif flags.data_set == 'sine_wave': flags.test_ratio = 0.1 elif flags.data_set == 'robotic_arm': flags.test_ratio = 0.1 # Get the data train_loader, test_loader = data_reader.read_data(flags, eval_data_all=eval_data_all) print("Making network now") # Make Network ntwk = Network(INN, flags, train_loader, test_loader, inference_mode=True, saved_model=flags.eval_model) print(ntwk.ckpt_dir) print("number of trainable parameters is :") pytorch_total_params = sum(p.numel() for p in ntwk.model.parameters() if p.requires_grad) print(pytorch_total_params) # Evaluation process print("Start eval now:") if multi_flag: ntwk.evaluate_multiple_time() else: pred_file, truth_file = ntwk.evaluate() # Plot the MSE distribution if flags.data_set != 'meta_material' and not multi_flag: plotMSELossDistrib(pred_file, truth_file, flags) print("Evaluation finished") # If gaussian, plot the scatter plot if flags.data_set == 'gaussian_mixture': Xpred = helper_functions.get_Xpred(path='data/', name=flags.eval_model) Ypred = helper_functions.get_Ypred(path='data/', name=flags.eval_model) # Plot the points scatter generate_Gaussian.plotData(Xpred, Ypred, save_dir='data/' + flags.eval_model.replace('/','_') + 'generation plot.png', eval_mode=True)
def evaluate_from_model(model_dir, multi_flag=False, eval_data_all=False): """ Evaluating interface. 1. Retreive the flags 2. get data 3. initialize network 4. eval :param model_dir: The folder to retrieve the model :param eval_data_all: The switch to turn on if you want to put all data in evaluation data :return: None """ # Retrieve the flag object print("Retrieving flag object for parameters") if (model_dir.startswith("models")): model_dir = model_dir[7:] print("after removing prefix models/, now model_dir is:", model_dir) if model_dir.startswith('/'): # It is a absolute path flags = helper_functions.load_flags(model_dir) else: flags = helper_functions.load_flags(os.path.join("models", model_dir)) flags.eval_model = model_dir # Reset the eval mode flags.test_ratio = get_test_ratio_helper(flags) # 2020.10.10 only, delete afterward flags.test_ratio *= 2 # Get the data train_loader, test_loader = data_reader.read_data( flags, eval_data_all=eval_data_all) print("Making network now") # Make Network ntwk = Network(MDN, flags, train_loader, test_loader, inference_mode=True, saved_model=flags.eval_model) print(model_dir) print("number of trainable parameters is :") pytorch_total_params = sum(p.numel() for p in ntwk.model.parameters() if p.requires_grad) print(pytorch_total_params) # Evaluation process print("Start eval now:") if multi_flag: ntwk.evaluate_multiple_time() else: pred_file, truth_file = ntwk.evaluate() # Plot the MSE distribution if flags.data_set != 'meta_material' and not multi_flag: plotMSELossDistrib(pred_file, truth_file, flags) print("Evaluation finished")
def evaluate_from_model(model_dir): """ Evaluating interface. 1. Retreive the flags 2. get data 3. initialize network 4. eval :param model_dir: The folder to retrieve the model :return: None """ # Retrieve the flag object print("Retrieving flag object for parameters") flags = flag_reader.load_flags(os.path.join("models", model_dir)) flags.eval_model = model_dir # Reset the eval mode flags.batch_size = 1 # For backprop eval mode, batchsize is always 1 # Get the data train_loader, test_loader = data_reader.read_data( x_range=flags.x_range, y_range=flags.y_range, geoboundary=flags.geoboundary, batch_size=flags.batch_size, normalize_input=flags.normalize_input, data_dir=flags.data_dir) print("Making network now") # Make Network ntwk = Network(Forward, Backward, flags, train_loader, test_loader, inference_mode=True, saved_model=flags.eval_model) # Evaluation process print("Start eval now:") pred_file, truth_file = ntwk.evaluate() # Plot the MSE distribution plotMSELossDistrib(pred_file, truth_file, flags) print("Evaluation finished")
def evaluate_from_model(model_dir, multi_flag=False, eval_data_all=False, save_misc=False, MSE_Simulator=False, save_Simulator_Ypred=True, init_lr=0.5, BDY_strength=1): """ Evaluating interface. 1. Retreive the flags 2. get data 3. initialize network 4. eval :param model_dir: The folder to retrieve the model :param multi_flag: The switch to turn on if you want to generate all different inference trial results :param eval_data_all: The switch to turn on if you want to put all data in evaluation data :return: None """ # Retrieve the flag object print("Retrieving flag object for parameters") if (model_dir.startswith("models")): model_dir = model_dir[7:] print("after removing prefix models/, now model_dir is:", model_dir) print(model_dir) flags = load_flags(os.path.join("models", model_dir)) flags.eval_model = model_dir # Reset the eval mode flags.test_ratio = get_test_ratio_helper(flags) if flags.data_set == 'Peurifoy': flags.eval_batch_size = 10000 elif flags.data_set == 'Chen': flags.eval_batch_size = 10000 elif flags.data_set == 'Yang' or flags.data_set == 'Yang_sim': flags.eval_batch_size = 2000 flags.batch_size = flags.eval_batch_size flags.lr = init_lr flags.BDY_strength = BDY_strength flags.eval_batch_size = eval_flags.eval_batch_size flags.train_step = eval_flags.train_step # delete after usage: 02.07 for vilidating that ball and sine is convex problem # Use a very small eval batch size and expected to see that meta and robo getting much worse performance # and the ball and sine getting nearly identical one # flags.eval_batch_size = 2 print(flags) flags.batch_size = 500 # Get the data eval_data_all = True train_loader, test_loader = data_reader.read_data( flags, eval_data_all=eval_data_all) print("LENGTH: ", len(test_loader)) print("Making network now") # Make Network ntwk = Network(Forward, flags, train_loader, test_loader, inference_mode=True, saved_model=flags.eval_model) print("number of trainable parameters is :") pytorch_total_params = sum(p.numel() for p in ntwk.model.parameters() if p.requires_grad) print(pytorch_total_params) # Evaluation process print("Start eval now:") if multi_flag: dest_dir = '/home/sr365/MM_bench_multi_eval/NA/' #dest_dir = '/data/users/ben/multi_eval/NA_lr' + str(init_lr) + 'bdy_' + str(BDY_strength)+'/' if not os.path.isdir(dest_dir): os.mkdir(dest_dir) dest_dir += flags.data_set if not os.path.isdir(dest_dir): os.mkdir(dest_dir) #pred_file, truth_file = ntwk.evaluate(save_dir='/work/sr365/multi_eval/NA/' + flags.data_set, save_all=True, pred_file, truth_file = ntwk.evaluate( save_dir=dest_dir, save_all=True, save_misc=save_misc, MSE_Simulator=MSE_Simulator, save_Simulator_Ypred=save_Simulator_Ypred) else: pred_file, truth_file = ntwk.evaluate( save_dir='data/' + flags.data_set, save_misc=save_misc, MSE_Simulator=MSE_Simulator, save_Simulator_Ypred=save_Simulator_Ypred) # Plot the MSE distribution plotMSELossDistrib(pred_file, truth_file, flags) print("Evaluation finished")
def evaluate_from_model(model_dir, multi_flag=False, eval_data_all=False, modulized_flag=False): """ Evaluating interface. 1. Retreive the flags 2. get data 3. initialize network 4. eval :param model_dir: The folder to retrieve the model :param eval_data_all: The switch to turn on if you want to put all data in evaluation data :return: None """ # Retrieve the flag object print("Retrieving flag object for parameters") if (model_dir.startswith("models")): model_dir = model_dir[7:] print("after removing prefix models/, now model_dir is:", model_dir) flags = helper_functions.load_flags(os.path.join("models", model_dir)) flags.eval_model = model_dir # Reset the eval mode flags.test_ratio = get_test_ratio_helper(flags) # Get the data train_loader, test_loader = data_reader.read_data( flags, eval_data_all=eval_data_all) print("Making network now") # Make Network ntwk = Network(INN, flags, train_loader, test_loader, inference_mode=True, saved_model=flags.eval_model) print(ntwk.ckpt_dir) print("number of trainable parameters is :") pytorch_total_params = sum(p.numel() for p in ntwk.model.parameters() if p.requires_grad) print(pytorch_total_params) # Evaluation process print("Start eval now:") if modulized_flag: ntwk.evaluate_modulized_multi_time() elif multi_flag: ntwk.evaluate_multiple_time() else: pred_file, truth_file = ntwk.evaluate() # Plot the MSE distribution if flags.data_set != 'Yang_sim' and not multi_flag and not modulized_flag: # meta-material does not have simulator, hence no Ypred given MSE = plotMSELossDistrib(pred_file, truth_file, flags) # Add this MSE back to the folder flags.best_validation_loss = MSE helper_functions.save_flags(flags, os.path.join("models", model_dir)) elif flags.data_set == 'Yang_sim' and not multi_flag and not modulized_flag: # Save the current path for getting back in the future cwd = os.getcwd() abs_path_Xpred = os.path.abspath(pred_file.replace('Ypred', 'Xpred')) # Change to NA dictory to do prediction os.chdir('../NA/') MSE = predict.ensemble_predict_master('../Data/Yang_sim/state_dicts/', abs_path_Xpred, no_plot=False) # Add this MSE back to the folder flags.best_validation_loss = MSE os.chdir(cwd) helper_functions.save_flags(flags, os.path.join("models", model_dir)) print("Evaluation finished")
def evaluate_from_model(model_dir, multi_flag=False, eval_data_all=False, save_misc=False, MSE_Simulator=False, save_Simulator_Ypred=True, init_lr=0.01, lr_decay=0.9, BDY_strength=1, save_dir='data/', noise_level=0, md_coeff=0, md_start=None, md_end=None, md_radius=None, eval_batch_size=None): """ Evaluating interface. 1. Retreive the flags 2. get data 3. initialize network 4. eval :param model_dir: The folder to retrieve the model :param multi_flag: The switch to turn on if you want to generate all different inference trial results :param eval_data_all: The switch to turn on if you want to put all data in evaluation data :return: None """ # Retrieve the flag object print("Retrieving flag object for parameters") if (model_dir.startswith("models")): model_dir = model_dir[7:] print("after removing prefix models/, now model_dir is:", model_dir) print(model_dir) flags = load_flags(os.path.join("models", model_dir)) flags.eval_model = model_dir # Reset the eval mode flags.test_ratio = get_test_ratio_helper(flags) flags.backprop_step = eval_flags.backprop_step #flags.test_ratio = 0.02 if flags.data_set != None: #== 'Yang_sim': save_Simulator_Ypred = False print("this is Yang sim dataset, setting the save_Simulator_Ypred to False") flags.batch_size = 1 # For backprop eval mode, batchsize is always 1 flags.BDY_strength = BDY_strength flags.train_step = eval_flags.train_step flags.backprop_step = 300 # MD Loss: new version if md_coeff is not None: flags.md_coeff = md_coeff if md_start is not None: flags.md_start = md_start if md_end is not None: flags.md_end = md_end if md_radius is not None: flags.md_radius = md_radius ############################# Thing that are changing ######################### flags.lr = init_lr flags.lr_decay_rate = lr_decay flags.eval_batch_size = 2048 if eval_batch_size is None else eval_batch_size flags.optim = 'Adam' ############################################################################### print(flags) # if flags.data_set == 'Peurifoy': # flags.eval_batch_size = 10000 # elif flags.data_set == 'Chen': # flags.eval_batch_size = 10000 # elif flags.data_set == 'Yang' or flags.data_set == 'Yang_sim': # flags.eval_batch_size = 2000 # # flags.batch_size = flags.eval_batch_size # Get the data train_loader, test_loader = data_reader.read_data(flags, eval_data_all=eval_data_all) print("Making network now") # Make Network ntwk = Network(NA, flags, train_loader, test_loader, inference_mode=True, saved_model=flags.eval_model) print("number of trainable parameters is :") pytorch_total_params = sum(p.numel() for p in ntwk.model.parameters() if p.requires_grad) print(pytorch_total_params) # pred_file, truth_file = ntwk.validate_model(save_dir='data/' + flags.data_set+'_best_model', save_misc=save_misc, # MSE_Simulator=MSE_Simulator, save_Simulator_Ypred=save_Simulator_Ypred) # Evaluation process print("Start eval now:") if multi_flag: #dest_dir = '/home/sr365/mm_bench_multi_eval_Chen_sweep/NA_init_lr_{}_decay_{}_batch_{}_bp_{}_noise_lvl_{}/'.format(init_lr, lr_decay, flags.eval_batch_size, flags.backprop_step, noise_level) #dest_dir = '/home/sr365/mm_bench_compare_MDNA_loss/NA_init_lr_{}_decay_{}_MD_loss_{}'.format(flags.lr, flags.lr_decay_rate, flags.md_coeff) #dest_dir = '/home/sr365/MM_bench_multi_eval/NA_RMSprop/' #dest_dir = '/data/users/ben/multi_eval/NA_lr' + str(init_lr) + 'bdy_' + str(BDY_strength)+'/' dest_dir = os.path.join('/home/sr365/MDNA_temp/', save_dir) dest_dir = os.path.join(dest_dir, flags.data_set) if not os.path.isdir(dest_dir): os.makedirs(dest_dir) #pred_file, truth_file = ntwk.evaluate(save_dir='/work/sr365/multi_eval/NA/' + flags.data_set, save_all=True, pred_file, truth_file = ntwk.evaluate(save_dir=dest_dir, save_all=True, save_misc=save_misc, MSE_Simulator=MSE_Simulator, save_Simulator_Ypred=save_Simulator_Ypred, noise_level=noise_level) else: # Creat the directory is not exist if not os.path.isdir(save_dir): os.makedirs(save_dir) pred_file, truth_file = ntwk.evaluate(save_dir=save_dir, save_misc=save_misc, MSE_Simulator=MSE_Simulator, save_Simulator_Ypred=save_Simulator_Ypred, noise_level=noise_level) #pred_file, truth_file = ntwk.evaluate(save_dir='data/'+flags.data_set,save_misc=save_misc, MSE_Simulator=MSE_Simulator, save_Simulator_Ypred=save_Simulator_Ypred) if 'Yang' in flags.data_set: return # Plot the MSE distribution MSE = plotMSELossDistrib(pred_file, truth_file, flags) print("Evaluation finished") return MSE
def evaluate_from_model(model_dir, multi_flag=False, eval_data_all=False, save_misc=False, MSE_Simulator=False, save_Simulator_Ypred=True): """ Evaluating interface. 1. Retreive the flags 2. get data 3. initialize network 4. eval :param model_dir: The folder to retrieve the model :param multi_flag: The switch to turn on if you want to generate all different inference trial results :param eval_data_all: The switch to turn on if you want to put all data in evaluation data :return: None """ # Retrieve the flag object print("Retrieving flag object for parameters") if (model_dir.startswith("models")): model_dir = model_dir[7:] print("after removing prefix models/, now model_dir is:", model_dir) print(model_dir) flags = load_flags(os.path.join("models", model_dir)) flags.eval_model = model_dir # Reset the eval mode flags.backprop_step = eval_flags.backprop_step flags.test_ratio = get_test_ratio_helper(flags) if flags.data_set == 'meta_material': save_Simulator_Ypred = False print("this is MM dataset, setting the save_Simulator_Ypred to False") flags.batch_size = 1 # For backprop eval mode, batchsize is always 1 if flags.data_set == 'chen': flags.lr = 0.01 else: flags.lr = 0.5 flags.train_step = eval_flags.train_step print(flags) # Get the data train_loader, test_loader = data_reader.read_data( flags, eval_data_all=eval_data_all) print("Making network now") # Make Network ntwk = Network(NA, flags, train_loader, test_loader, inference_mode=True, saved_model=flags.eval_model) print("number of trainable parameters is :") pytorch_total_params = sum(p.numel() for p in ntwk.model.parameters() if p.requires_grad) print(pytorch_total_params) # Evaluation process print("Start eval now:") if multi_flag: pred_file, truth_file = ntwk.evaluate( save_dir='../multi_eval/NA/' + flags.data_set, save_all=True, save_misc=save_misc, MSE_Simulator=MSE_Simulator, save_Simulator_Ypred=save_Simulator_Ypred) else: pred_file, truth_file = ntwk.evaluate( save_misc=save_misc, MSE_Simulator=MSE_Simulator, save_Simulator_Ypred=save_Simulator_Ypred) # Plot the MSE distribution makePlots(pred_file, truth_file, flags, quantiles=[0.05, 0.25, 0.5, 0.75, 0.95]) print("Evaluation finished")
def evaluate_from_model(model_dir, multi_flag=False, eval_data_all=False, save_misc=False, MSE_Simulator=False, save_Simulator_Ypred=False): """ Evaluating interface. 1. Retreive the flags 2. get data 3. initialize network 4. eval :param model_dir: The folder to retrieve the model :param multi_flag: The switch to turn on if you want to generate all different inference trial results :param eval_data_all: The switch to turn on if you want to put all data in evaluation data :return: None """ # Retrieve the flag object print("Retrieving flag object for parameters") if (model_dir.startswith("models")): model_dir = model_dir[7:] print("after removing prefix models/, now model_dir is:", model_dir) print(model_dir) flags = load_flags(os.path.join("models", model_dir)) flags.eval_model = model_dir # Reset the eval mode flags.backprop_step = eval_flags.backprop_step if flags.data_set == 'ballistics': flags.test_ratio = 0.0078 # 12800 in total elif flags.data_set == 'sine_wave': flags.test_ratio = 0.001 # 8000 in total elif flags.data_set == 'robotic_arm': flags.test_ratio = 0.1 # 10000 in total else: flags.test_ratio = 0.0051062 / 2 #flags.test_ratio = 0 #flags.test_ratio = 0.00025 # 20000 in total for Meta material flags.batch_size = 1 # For backprop eval mode, batchsize is always 1 flags.lr = 1e-2 if flags.data_set == 'ballistics': flags.lr = 1 flags.train_step = eval_flags.train_step for i in range(4000, 5000, 2000): for j in range(3): flags.eval_batch_size = i # Get the data train_loader, test_loader = data_reader.read_data( flags, eval_data_all=eval_data_all) print("Making network now") # Make Network ntwk = Network(Backprop, flags, train_loader, test_loader, inference_mode=True, saved_model=flags.eval_model) print("number of trainable parameters is :") pytorch_total_params = sum(p.numel() for p in ntwk.model.parameters() if p.requires_grad) print(pytorch_total_params) # Evaluation process print("Start eval now:") if multi_flag: pred_file, truth_file = ntwk.evaluate( save_dir='D:/Yang_MM_Absorber_ML/NA/' + flags.data_set, save_all=True, save_misc=save_misc, MSE_Simulator=MSE_Simulator, save_Simulator_Ypred=save_Simulator_Ypred) else: pred_file, truth_file = ntwk.evaluate( save_dir='D:/Yang_MM_Absorber_ML/Backprop/data/' + str(i) + '/' + str(j + 1), save_misc=save_misc, MSE_Simulator=MSE_Simulator, save_Simulator_Ypred=save_Simulator_Ypred) # Plot the MSE distribution plotMSELossDistrib( pred_file, truth_file, flags, save_dir='D:/Yang_MM_Absorber_ML/Backprop/data/' + str(i) + '/' + str(j + 1)) print("Evaluation finished")