def predict_from_model(pre_trained_model, Xpred_file, shrink_factor=1, save_name=''): """ Predicting interface. 1. Retreive the flags 2. get data 3. initialize network 4. eval :param model_dir: The folder to retrieve the model :return: None """ # Retrieve the flag object print("This is doing the prediction for file", Xpred_file) print("Retrieving flag object for parameters") if (pre_trained_model.startswith("models")): eval_model = pre_trained_model[7:] print("after removing prefix models/, now model_dir is:", eval_model) flags = load_flags(pre_trained_model) # Get the pre-trained model flags.eval_model = eval_model # Reset the eval mode # Get the data, this part is useless in prediction but just for simplicity train_loader, test_loader = data_reader.read_data(flags) print("Making network now") # Make Network ntwk = Network(Backprop, flags, train_loader, test_loader, inference_mode=True, saved_model=flags.eval_model) print("number of trainable parameters is :") pytorch_total_params = sum(p.numel() for p in ntwk.model.parameters() if p.requires_grad) print(pytorch_total_params) # Evaluation process print("Start eval now:") pred_file, truth_file = ntwk.predict(Xpred_file, save_prefix=save_name + 'shrink_factor' + str(shrink_factor), shrink_factor=shrink_factor)
def training_from_flag(flags): """ Training interface. 1. Read data 2. initialize network 3. train network 4. record flags :param flag: The training flags read from command line or parameter.py :return: None """ if flags.use_cpu_only: os.environ['CUDA_VISIBLE_DEVICES'] = '-1' # Get the data train_loader, test_loader = data_reader.read_data(flags) # Reset the boundary is normalized if flags.normalize_input: flags.geoboundary_norm = [-1, 1, -1, 1] print("Boundary is set at:", flags.geoboundary) print("Making network now") # Make Network ntwk = Network(Forward, flags, train_loader, test_loader) # Training process print("Start training now...") ntwk.train() # Do the house keeping, write the parameters and put into folder, also use pickle to save the flags obejct write_flags_and_BVE(flags, ntwk.best_validation_loss, ntwk.ckpt_dir)
def predict(model_dir, Ytruth_file, multi_flag=False): """ Predict the output from given spectra """ print("Retrieving flag object for parameters") if (model_dir.startswith("models")): model_dir = model_dir[7:] print("after removing prefix models/, now model_dir is:", model_dir) if model_dir.startswith('/'): # It is a absolute path flags = helper_functions.load_flags(model_dir) else: flags = helper_functions.load_flags(os.path.join("models", model_dir)) flags.eval_model = model_dir # Reset the eval mode ntwk = Network(INN, flags, train_loader=None, test_loader=None, inference_mode=True, saved_model=flags.eval_model) print("number of trainable parameters is :") pytorch_total_params = sum(p.numel() for p in ntwk.model.parameters() if p.requires_grad) print(pytorch_total_params) # Evaluation process pred_file, truth_file = ntwk.predict(Ytruth_file) if 'Yang' not in flags.data_set: plotMSELossDistrib(pred_file, truth_file, flags)
def evaluate_from_model(model_dir): """ Evaluating interface. 1. Retreive the flags 2. get data 3. initialize network 4. eval :param model_dir: The folder to retrieve the model :return: None """ # Retrieve the flag object if (model_dir.startswith("models")): model_dir = model_dir[7:] print("after removing prefix models/, now model_dir is:", model_dir) print("Retrieving flag object for parameters") flags = flag_reader.load_flags(os.path.join("models", model_dir)) flags.eval_model = model_dir # Reset the eval mode # Get the data train_loader, test_loader = data_reader.read_data(flags) print("Making network now") # Make Network ntwk = Network(Forward, flags, train_loader, test_loader, inference_mode=True, saved_model=flags.eval_model) # Evaluation process print("Start eval now:") pred_file, truth_file = ntwk.evaluate() # Plot the MSE distribution plotMSELossDistrib(pred_file, truth_file, flags) print("Evaluation finished")
def evaluate_from_model(model_dir, multi_flag=False, eval_data_all=False): """ Evaluating interface. 1. Retreive the flags 2. get data 3. initialize network 4. eval :param model_dir: The folder to retrieve the model :param multi_flag: The switch to turn on if you want to generate all different inference trial results :param eval_data_all: The switch to turn on if you want to put all data in evaluation data :return: None """ # Retrieve the flag object print("Retrieving flag object for parameters") if (model_dir.startswith("models")): model_dir = model_dir[7:] print("after removing prefix models/, now model_dir is:", model_dir) print(model_dir) flags = load_flags(os.path.join("models", model_dir)) flags.eval_model = model_dir # Reset the eval mode flags.backprop_step = eval_flags.backprop_step if flags.data_set == 'ballistics': flags.test_ratio = 0.001 elif flags.data_set == 'sine_wave': flags.test_ratio = 0.005 elif flags.data_set == 'robotic_arm': flags.test_ratio = 0.2 elif flags.data_set == 'sine_test_1d': flags.test_ratio = 0.05 flags.batch_size = 1 # For backprop eval mode, batchsize is always 1 flags.lr = 0.05 flags.eval_batch_size = eval_flags.eval_batch_size flags.train_step = eval_flags.train_step # Get the data train_loader, test_loader = data_reader.read_data( flags, eval_data_all=eval_data_all) print("Making network now") # Make Network ntwk = Network(Backprop, flags, train_loader, test_loader, inference_mode=True, saved_model=flags.eval_model) print("number of trainable parameters is :") pytorch_total_params = sum(p.numel() for p in ntwk.model.parameters() if p.requires_grad) print(pytorch_total_params) # Evaluation process print("Start eval now:") if multi_flag: pred_file, truth_file = ntwk.evaluate( save_dir='/work/sr365/multi_eval/Backprop/' + flags.data_set, save_all=True) else: pred_file, truth_file = ntwk.evaluate() # Plot the MSE distribution plotMSELossDistrib(pred_file, truth_file, flags) print("Evaluation finished")
def evaluate_from_model(model_dir, multi_flag=False, eval_data_all=False): """ Evaluating interface. 1. Retreive the flags 2. get data 3. initialize network 4. eval :param model_dir: The folder to retrieve the model :param eval_data_all: The switch to turn on if you want to put all data in evaluation data :return: None """ # Retrieve the flag object print("Retrieving flag object for parameters") if (model_dir.startswith("models")): model_dir = model_dir[7:] print("after removing prefix models/, now model_dir is:", model_dir) flags = helper_functions.load_flags(os.path.join("models", model_dir)) flags.eval_model = model_dir # Reset the eval mode # Set up the test_ratio if flags.data_set == 'ballistics': flags.test_ratio = 0.1 elif flags.data_set == 'sine_wave': flags.test_ratio = 0.1 elif flags.data_set == 'robotic_arm': flags.test_ratio = 0.1 # Get the data train_loader, test_loader = data_reader.read_data(flags, eval_data_all=eval_data_all) print("Making network now") # Make Network ntwk = Network(INN, flags, train_loader, test_loader, inference_mode=True, saved_model=flags.eval_model) print(ntwk.ckpt_dir) print("number of trainable parameters is :") pytorch_total_params = sum(p.numel() for p in ntwk.model.parameters() if p.requires_grad) print(pytorch_total_params) # Evaluation process print("Start eval now:") if multi_flag: ntwk.evaluate_multiple_time() else: pred_file, truth_file = ntwk.evaluate() # Plot the MSE distribution if flags.data_set != 'meta_material' and not multi_flag: plotMSELossDistrib(pred_file, truth_file, flags) print("Evaluation finished") # If gaussian, plot the scatter plot if flags.data_set == 'gaussian_mixture': Xpred = helper_functions.get_Xpred(path='data/', name=flags.eval_model) Ypred = helper_functions.get_Ypred(path='data/', name=flags.eval_model) # Plot the points scatter generate_Gaussian.plotData(Xpred, Ypred, save_dir='data/' + flags.eval_model.replace('/','_') + 'generation plot.png', eval_mode=True)
def modulized_evaluate_from_model(model_dir, operate_dir, FF=False, BP=False): """ Evaluating interface. 1. Retreive the flags 2. get data 3. initialize network 4. eval :param model_dir: The folder to retrieve the model :param operate_dir: The directory to operate in (with all the Xpred,Ypred files) :return: None """ # Retrieve the flag object print("Retrieving flag object for parameters") if (model_dir.startswith("models")): model_dir = model_dir[7:] print("after removing prefix models/, now model_dir is:", model_dir) print(model_dir) flags = load_flags(os.path.join("models", model_dir)) flags.eval_model = model_dir # Reset the eval mode if BP: flags.backprop_step = 300 else: flags.backprop_step = 1 flags.test_ratio = get_test_ratio_helper(flags) if flags.data_set == 'meta_material': save_Simulator_Ypred = False print( "this is MM dataset, there is no simple numerical simulator therefore setting the save_Simulator_Ypred to False" ) flags.batch_size = 1 # For backprop eval mode, batchsize is always 1 flags.lr = 0.5 flags.eval_batch_size = 2048 flags.train_step = 500 print(flags) # Make Network ntwk = Network(NA, flags, train_loader=None, test_loader=None, inference_mode=True, saved_model=flags.eval_model) # Set up the files Xpred_list, Xt, Yt = get_xpred_ytruth_xtruth_from_folder(operate_dir) X_init_mat = reshape_xpred_list_to_mat(Xpred_list) # Evaluation process print("Start eval now:") ntwk.modulized_bp_ff(X_init_mat=X_init_mat, Ytruth=Yt, save_dir=operate_dir, save_all=True, FF=FF)
def evaluate_from_model(model_dir, multi_flag=False, eval_data_all=False): """ Evaluating interface. 1. Retreive the flags 2. get data 3. initialize network 4. eval :param model_dir: The folder to retrieve the model :param eval_data_all: The switch to turn on if you want to put all data in evaluation data :return: None """ # Retrieve the flag object print("Retrieving flag object for parameters") if (model_dir.startswith("models")): model_dir = model_dir[7:] print("after removing prefix models/, now model_dir is:", model_dir) if model_dir.startswith('/'): # It is a absolute path flags = helper_functions.load_flags(model_dir) else: flags = helper_functions.load_flags(os.path.join("models", model_dir)) flags.eval_model = model_dir # Reset the eval mode flags.test_ratio = get_test_ratio_helper(flags) # 2020.10.10 only, delete afterward flags.test_ratio *= 2 # Get the data train_loader, test_loader = data_reader.read_data( flags, eval_data_all=eval_data_all) print("Making network now") # Make Network ntwk = Network(MDN, flags, train_loader, test_loader, inference_mode=True, saved_model=flags.eval_model) print(model_dir) print("number of trainable parameters is :") pytorch_total_params = sum(p.numel() for p in ntwk.model.parameters() if p.requires_grad) print(pytorch_total_params) # Evaluation process print("Start eval now:") if multi_flag: ntwk.evaluate_multiple_time() else: pred_file, truth_file = ntwk.evaluate() # Plot the MSE distribution if flags.data_set != 'meta_material' and not multi_flag: plotMSELossDistrib(pred_file, truth_file, flags) print("Evaluation finished")
def evaluate_from_model(model_dir, multi_flag=False, eval_data_all=False, test_ratio=None): """ Evaluating interface. 1. Retreive the flags 2. get data 3. initialize network 4. eval :param model_dir: The folder to retrieve the model :param eval_data_all: The switch to turn on if you want to put all data in evaluation data :return: None """ # Retrieve the flag object print("Retrieving flag object for parameters") if (model_dir.startswith("models")): model_dir = model_dir[7:] print("after removing prefix models/, now model_dir is:", model_dir) flags = helper_functions.load_flags(os.path.join("models", model_dir)) flags.eval_model = model_dir # Reset the eval mode flags.batch_size = 1 flags.backprop_step=300 flags.eval_batch_size=2048 if test_ratio is None: flags.test_ratio = get_test_ratio_helper(flags) else: # To make the test ratio swipe with respect to inference time # also making the batch size large enough flags.test_ratio = test_ratio # Get the data train_loader, test_loader = data_reader.read_data(flags, eval_data_all=eval_data_all) print("Making network now") # Make Network ntwk = Network(make_cINN_and_NA, flags, train_loader, test_loader, inference_mode=True, saved_model=flags.eval_model) #print(model_dir) print("number of trainable parameters is :") pytorch_total_params = sum(p.numel() for p in ntwk.model_cINN.parameters() if p.requires_grad) print(pytorch_total_params) pytorch_total_params = sum(p.numel() for p in ntwk.model_NA.parameters() if p.requires_grad) print(pytorch_total_params) # Evaluation process print("Start eval now:") if multi_flag: pred_file, truth_file = ntwk.evaluate(save_dir='/work/sr365/NIPS_multi_eval_backup/multi_eval/hybrid_cINN_NA_0bp/'+flags.data_set, save_all=True) else: pred_file, truth_file = ntwk.evaluate() # Plot the MSE distribution if flags.data_set != 'meta_material' and not multi_flag: plotMSELossDistrib(pred_file, truth_file, flags) print("Evaluation finished")
def training_from_flag(flags): """ Training interface. 1. Read data 2. initialize network 3. train network 4. record flags :param flag: The training flags read from command line or parameter.py :return: None """ # Get the data train_loader, test_loader = data_reader.read_data(flags) print("Making network now") # Make Network ntwk = Network(VAE, flags, train_loader, test_loader) # Training process print("Start training now...") ntwk.train() # Do the house keeping, write the parameters and put into folder, also use pickle to save the flags obejct write_flags_and_BVE(flags, ntwk.best_validation_loss, ntwk.ckpt_dir)
def infer(pre_trained_model, Xpred_file, no_plot=True): # Retrieve the flag object print("This is doing the prediction for file", Xpred_file) print("Retrieving flag object for parameters") if (pre_trained_model.startswith("models")): eval_model = pre_trained_model[7:] print("after removing prefix models/, now model_dir is:", eval_model) flags = load_flags(pre_trained_model) # Get the pre-trained model flags.eval_model = pre_trained_model # Reset the eval mode flags.test_ratio = 0.1 # useless number # Get the data, this part is useless in prediction but just for simplicity train_loader, test_loader = data_reader.read_data(flags) print("Making network now") # Make Network ntwk = Network(Backprop, flags, train_loader, test_loader, inference_mode=True, saved_model=flags.eval_model) print("number of trainable parameters is :") pytorch_total_params = sum(p.numel() for p in ntwk.model.parameters() if p.requires_grad) print(pytorch_total_params) # Evaluation process print("Start eval now:") if not no_plot: # Plot the MSE distribution pred_file, truth_file = ntwk.predict(Xpred_file, no_save=False) flags.eval_model = pred_file.replace( '.', '_') # To make the plot name different plotMSELossDistrib(pred_file, truth_file, flags) else: pred_file, truth_file = ntwk.predict(Xpred_file, no_save=True) print("Evaluation finished") return pred_file, truth_file, flags
def evaluate_from_model(model_dir): """ Evaluating interface. 1. Retreive the flags 2. get data 3. initialize network 4. eval :param model_dir: The folder to retrieve the model :return: None """ # Retrieve the flag object print("Retrieving flag object for parameters") flags = flag_reader.load_flags(os.path.join("models", model_dir)) flags.eval_model = model_dir # Reset the eval mode flags.batch_size = 1 # For backprop eval mode, batchsize is always 1 # Get the data train_loader, test_loader = data_reader.read_data( x_range=flags.x_range, y_range=flags.y_range, geoboundary=flags.geoboundary, batch_size=flags.batch_size, normalize_input=flags.normalize_input, data_dir=flags.data_dir) print("Making network now") # Make Network ntwk = Network(Forward, Backward, flags, train_loader, test_loader, inference_mode=True, saved_model=flags.eval_model) # Evaluation process print("Start eval now:") pred_file, truth_file = ntwk.evaluate() # Plot the MSE distribution plotMSELossDistrib(pred_file, truth_file, flags) print("Evaluation finished")
def evaluate_from_model(model_dir, multi_flag=False, eval_data_all=False, save_misc=False, MSE_Simulator=False, save_Simulator_Ypred=True, init_lr=0.5, BDY_strength=1): """ Evaluating interface. 1. Retreive the flags 2. get data 3. initialize network 4. eval :param model_dir: The folder to retrieve the model :param multi_flag: The switch to turn on if you want to generate all different inference trial results :param eval_data_all: The switch to turn on if you want to put all data in evaluation data :return: None """ # Retrieve the flag object print("Retrieving flag object for parameters") if (model_dir.startswith("models")): model_dir = model_dir[7:] print("after removing prefix models/, now model_dir is:", model_dir) print(model_dir) flags = load_flags(os.path.join("models", model_dir)) flags.eval_model = model_dir # Reset the eval mode flags.test_ratio = get_test_ratio_helper(flags) if flags.data_set == 'Peurifoy': flags.eval_batch_size = 10000 elif flags.data_set == 'Chen': flags.eval_batch_size = 10000 elif flags.data_set == 'Yang' or flags.data_set == 'Yang_sim': flags.eval_batch_size = 2000 flags.batch_size = flags.eval_batch_size flags.lr = init_lr flags.BDY_strength = BDY_strength flags.eval_batch_size = eval_flags.eval_batch_size flags.train_step = eval_flags.train_step # delete after usage: 02.07 for vilidating that ball and sine is convex problem # Use a very small eval batch size and expected to see that meta and robo getting much worse performance # and the ball and sine getting nearly identical one # flags.eval_batch_size = 2 print(flags) flags.batch_size = 500 # Get the data eval_data_all = True train_loader, test_loader = data_reader.read_data( flags, eval_data_all=eval_data_all) print("LENGTH: ", len(test_loader)) print("Making network now") # Make Network ntwk = Network(Forward, flags, train_loader, test_loader, inference_mode=True, saved_model=flags.eval_model) print("number of trainable parameters is :") pytorch_total_params = sum(p.numel() for p in ntwk.model.parameters() if p.requires_grad) print(pytorch_total_params) # Evaluation process print("Start eval now:") if multi_flag: dest_dir = '/home/sr365/MM_bench_multi_eval/NA/' #dest_dir = '/data/users/ben/multi_eval/NA_lr' + str(init_lr) + 'bdy_' + str(BDY_strength)+'/' if not os.path.isdir(dest_dir): os.mkdir(dest_dir) dest_dir += flags.data_set if not os.path.isdir(dest_dir): os.mkdir(dest_dir) #pred_file, truth_file = ntwk.evaluate(save_dir='/work/sr365/multi_eval/NA/' + flags.data_set, save_all=True, pred_file, truth_file = ntwk.evaluate( save_dir=dest_dir, save_all=True, save_misc=save_misc, MSE_Simulator=MSE_Simulator, save_Simulator_Ypred=save_Simulator_Ypred) else: pred_file, truth_file = ntwk.evaluate( save_dir='data/' + flags.data_set, save_misc=save_misc, MSE_Simulator=MSE_Simulator, save_Simulator_Ypred=save_Simulator_Ypred) # Plot the MSE distribution plotMSELossDistrib(pred_file, truth_file, flags) print("Evaluation finished")
def evaluate_from_model(model_dir, multi_flag=False, eval_data_all=False, modulized_flag=False): """ Evaluating interface. 1. Retreive the flags 2. get data 3. initialize network 4. eval :param model_dir: The folder to retrieve the model :param eval_data_all: The switch to turn on if you want to put all data in evaluation data :return: None """ # Retrieve the flag object print("Retrieving flag object for parameters") if (model_dir.startswith("models")): model_dir = model_dir[7:] print("after removing prefix models/, now model_dir is:", model_dir) flags = helper_functions.load_flags(os.path.join("models", model_dir)) flags.eval_model = model_dir # Reset the eval mode flags.test_ratio = get_test_ratio_helper(flags) # Get the data train_loader, test_loader = data_reader.read_data( flags, eval_data_all=eval_data_all) print("Making network now") # Make Network ntwk = Network(INN, flags, train_loader, test_loader, inference_mode=True, saved_model=flags.eval_model) print(ntwk.ckpt_dir) print("number of trainable parameters is :") pytorch_total_params = sum(p.numel() for p in ntwk.model.parameters() if p.requires_grad) print(pytorch_total_params) # Evaluation process print("Start eval now:") if modulized_flag: ntwk.evaluate_modulized_multi_time() elif multi_flag: ntwk.evaluate_multiple_time() else: pred_file, truth_file = ntwk.evaluate() # Plot the MSE distribution if flags.data_set != 'Yang_sim' and not multi_flag and not modulized_flag: # meta-material does not have simulator, hence no Ypred given MSE = plotMSELossDistrib(pred_file, truth_file, flags) # Add this MSE back to the folder flags.best_validation_loss = MSE helper_functions.save_flags(flags, os.path.join("models", model_dir)) elif flags.data_set == 'Yang_sim' and not multi_flag and not modulized_flag: # Save the current path for getting back in the future cwd = os.getcwd() abs_path_Xpred = os.path.abspath(pred_file.replace('Ypred', 'Xpred')) # Change to NA dictory to do prediction os.chdir('../NA/') MSE = predict.ensemble_predict_master('../Data/Yang_sim/state_dicts/', abs_path_Xpred, no_plot=False) # Add this MSE back to the folder flags.best_validation_loss = MSE os.chdir(cwd) helper_functions.save_flags(flags, os.path.join("models", model_dir)) print("Evaluation finished")
def evaluate_from_model(model_dir, multi_flag=False, eval_data_all=False, save_misc=False, MSE_Simulator=False, save_Simulator_Ypred=True): """ Evaluating interface. 1. Retreive the flags 2. get data 3. initialize network 4. eval :param model_dir: The folder to retrieve the model :param multi_flag: The switch to turn on if you want to generate all different inference trial results :param eval_data_all: The switch to turn on if you want to put all data in evaluation data :return: None """ # Retrieve the flag object print("Retrieving flag object for parameters") if (model_dir.startswith("models")): model_dir = model_dir[7:] print("after removing prefix models/, now model_dir is:", model_dir) print(model_dir) flags = load_flags(os.path.join("models", model_dir)) flags.eval_model = model_dir # Reset the eval mode flags.backprop_step = eval_flags.backprop_step flags.test_ratio = get_test_ratio_helper(flags) if flags.data_set == 'meta_material': save_Simulator_Ypred = False print("this is MM dataset, setting the save_Simulator_Ypred to False") flags.batch_size = 1 # For backprop eval mode, batchsize is always 1 if flags.data_set == 'chen': flags.lr = 0.01 else: flags.lr = 0.5 flags.train_step = eval_flags.train_step print(flags) # Get the data train_loader, test_loader = data_reader.read_data( flags, eval_data_all=eval_data_all) print("Making network now") # Make Network ntwk = Network(NA, flags, train_loader, test_loader, inference_mode=True, saved_model=flags.eval_model) print("number of trainable parameters is :") pytorch_total_params = sum(p.numel() for p in ntwk.model.parameters() if p.requires_grad) print(pytorch_total_params) # Evaluation process print("Start eval now:") if multi_flag: pred_file, truth_file = ntwk.evaluate( save_dir='../multi_eval/NA/' + flags.data_set, save_all=True, save_misc=save_misc, MSE_Simulator=MSE_Simulator, save_Simulator_Ypred=save_Simulator_Ypred) else: pred_file, truth_file = ntwk.evaluate( save_misc=save_misc, MSE_Simulator=MSE_Simulator, save_Simulator_Ypred=save_Simulator_Ypred) # Plot the MSE distribution makePlots(pred_file, truth_file, flags, quantiles=[0.05, 0.25, 0.5, 0.75, 0.95]) print("Evaluation finished")
# Read the parameters to be set flags = flag_reader.read_flag() # Get the data train_loader, test_loader = data_reader.read_data(x_range=flags.x_range, y_range=flags.y_range, geoboundary=flags.geoboundary, batch_size=flags.batch_size, normalize_input=flags.normalize_input, data_dir=flags.data_dir) # Reset the boundary is normalized if flags.normalize_input: flags.geoboundary = [-1, 1, -1, 1] print("Boundary is set at:", flags.geoboundary) print("Making network now") # Make Network ntwk = Network(Forward, flags, train_loader, test_loader) # Training process print("Start training now...") ntwk.train() # Do the house keeping, write the parameters and put into folder flag_reader.write_flags_and_BVE(flags, ntwk.best_validation_loss) put_param_into_folder()
def evaluate_from_model(model_dir, multi_flag=False, eval_data_all=False, save_misc=False, MSE_Simulator=False, save_Simulator_Ypred=True, init_lr=0.01, lr_decay=0.9, BDY_strength=1, save_dir='data/', noise_level=0, md_coeff=0, md_start=None, md_end=None, md_radius=None, eval_batch_size=None): """ Evaluating interface. 1. Retreive the flags 2. get data 3. initialize network 4. eval :param model_dir: The folder to retrieve the model :param multi_flag: The switch to turn on if you want to generate all different inference trial results :param eval_data_all: The switch to turn on if you want to put all data in evaluation data :return: None """ # Retrieve the flag object print("Retrieving flag object for parameters") if (model_dir.startswith("models")): model_dir = model_dir[7:] print("after removing prefix models/, now model_dir is:", model_dir) print(model_dir) flags = load_flags(os.path.join("models", model_dir)) flags.eval_model = model_dir # Reset the eval mode flags.test_ratio = get_test_ratio_helper(flags) flags.backprop_step = eval_flags.backprop_step #flags.test_ratio = 0.02 if flags.data_set != None: #== 'Yang_sim': save_Simulator_Ypred = False print("this is Yang sim dataset, setting the save_Simulator_Ypred to False") flags.batch_size = 1 # For backprop eval mode, batchsize is always 1 flags.BDY_strength = BDY_strength flags.train_step = eval_flags.train_step flags.backprop_step = 300 # MD Loss: new version if md_coeff is not None: flags.md_coeff = md_coeff if md_start is not None: flags.md_start = md_start if md_end is not None: flags.md_end = md_end if md_radius is not None: flags.md_radius = md_radius ############################# Thing that are changing ######################### flags.lr = init_lr flags.lr_decay_rate = lr_decay flags.eval_batch_size = 2048 if eval_batch_size is None else eval_batch_size flags.optim = 'Adam' ############################################################################### print(flags) # if flags.data_set == 'Peurifoy': # flags.eval_batch_size = 10000 # elif flags.data_set == 'Chen': # flags.eval_batch_size = 10000 # elif flags.data_set == 'Yang' or flags.data_set == 'Yang_sim': # flags.eval_batch_size = 2000 # # flags.batch_size = flags.eval_batch_size # Get the data train_loader, test_loader = data_reader.read_data(flags, eval_data_all=eval_data_all) print("Making network now") # Make Network ntwk = Network(NA, flags, train_loader, test_loader, inference_mode=True, saved_model=flags.eval_model) print("number of trainable parameters is :") pytorch_total_params = sum(p.numel() for p in ntwk.model.parameters() if p.requires_grad) print(pytorch_total_params) # pred_file, truth_file = ntwk.validate_model(save_dir='data/' + flags.data_set+'_best_model', save_misc=save_misc, # MSE_Simulator=MSE_Simulator, save_Simulator_Ypred=save_Simulator_Ypred) # Evaluation process print("Start eval now:") if multi_flag: #dest_dir = '/home/sr365/mm_bench_multi_eval_Chen_sweep/NA_init_lr_{}_decay_{}_batch_{}_bp_{}_noise_lvl_{}/'.format(init_lr, lr_decay, flags.eval_batch_size, flags.backprop_step, noise_level) #dest_dir = '/home/sr365/mm_bench_compare_MDNA_loss/NA_init_lr_{}_decay_{}_MD_loss_{}'.format(flags.lr, flags.lr_decay_rate, flags.md_coeff) #dest_dir = '/home/sr365/MM_bench_multi_eval/NA_RMSprop/' #dest_dir = '/data/users/ben/multi_eval/NA_lr' + str(init_lr) + 'bdy_' + str(BDY_strength)+'/' dest_dir = os.path.join('/home/sr365/MDNA_temp/', save_dir) dest_dir = os.path.join(dest_dir, flags.data_set) if not os.path.isdir(dest_dir): os.makedirs(dest_dir) #pred_file, truth_file = ntwk.evaluate(save_dir='/work/sr365/multi_eval/NA/' + flags.data_set, save_all=True, pred_file, truth_file = ntwk.evaluate(save_dir=dest_dir, save_all=True, save_misc=save_misc, MSE_Simulator=MSE_Simulator, save_Simulator_Ypred=save_Simulator_Ypred, noise_level=noise_level) else: # Creat the directory is not exist if not os.path.isdir(save_dir): os.makedirs(save_dir) pred_file, truth_file = ntwk.evaluate(save_dir=save_dir, save_misc=save_misc, MSE_Simulator=MSE_Simulator, save_Simulator_Ypred=save_Simulator_Ypred, noise_level=noise_level) #pred_file, truth_file = ntwk.evaluate(save_dir='data/'+flags.data_set,save_misc=save_misc, MSE_Simulator=MSE_Simulator, save_Simulator_Ypred=save_Simulator_Ypred) if 'Yang' in flags.data_set: return # Plot the MSE distribution MSE = plotMSELossDistrib(pred_file, truth_file, flags) print("Evaluation finished") return MSE
def evaluate_from_model(model_dir, multi_flag=False, eval_data_all=False, save_misc=False, MSE_Simulator=False, save_Simulator_Ypred=False): """ Evaluating interface. 1. Retreive the flags 2. get data 3. initialize network 4. eval :param model_dir: The folder to retrieve the model :param multi_flag: The switch to turn on if you want to generate all different inference trial results :param eval_data_all: The switch to turn on if you want to put all data in evaluation data :return: None """ # Retrieve the flag object print("Retrieving flag object for parameters") if (model_dir.startswith("models")): model_dir = model_dir[7:] print("after removing prefix models/, now model_dir is:", model_dir) print(model_dir) flags = load_flags(os.path.join("models", model_dir)) flags.eval_model = model_dir # Reset the eval mode flags.backprop_step = eval_flags.backprop_step if flags.data_set == 'ballistics': flags.test_ratio = 0.0078 # 12800 in total elif flags.data_set == 'sine_wave': flags.test_ratio = 0.001 # 8000 in total elif flags.data_set == 'robotic_arm': flags.test_ratio = 0.1 # 10000 in total else: flags.test_ratio = 0.0051062 / 2 #flags.test_ratio = 0 #flags.test_ratio = 0.00025 # 20000 in total for Meta material flags.batch_size = 1 # For backprop eval mode, batchsize is always 1 flags.lr = 1e-2 if flags.data_set == 'ballistics': flags.lr = 1 flags.train_step = eval_flags.train_step for i in range(4000, 5000, 2000): for j in range(3): flags.eval_batch_size = i # Get the data train_loader, test_loader = data_reader.read_data( flags, eval_data_all=eval_data_all) print("Making network now") # Make Network ntwk = Network(Backprop, flags, train_loader, test_loader, inference_mode=True, saved_model=flags.eval_model) print("number of trainable parameters is :") pytorch_total_params = sum(p.numel() for p in ntwk.model.parameters() if p.requires_grad) print(pytorch_total_params) # Evaluation process print("Start eval now:") if multi_flag: pred_file, truth_file = ntwk.evaluate( save_dir='D:/Yang_MM_Absorber_ML/NA/' + flags.data_set, save_all=True, save_misc=save_misc, MSE_Simulator=MSE_Simulator, save_Simulator_Ypred=save_Simulator_Ypred) else: pred_file, truth_file = ntwk.evaluate( save_dir='D:/Yang_MM_Absorber_ML/Backprop/data/' + str(i) + '/' + str(j + 1), save_misc=save_misc, MSE_Simulator=MSE_Simulator, save_Simulator_Ypred=save_Simulator_Ypred) # Plot the MSE distribution plotMSELossDistrib( pred_file, truth_file, flags, save_dir='D:/Yang_MM_Absorber_ML/Backprop/data/' + str(i) + '/' + str(j + 1)) print("Evaluation finished")