def ensemble_predict(model_list, Xpred_file, model_dir=None, no_plot=True, remove_extra_files=True):
    """
    This predicts the output from an ensemble of models
    :param model_list: The list of model names to aggregate
    :param Xpred_file: The Xpred_file that you want to predict
    :param model_dir: The directory to plot the plot
    :param no_plot: If True, do not plot (For multi_eval)
    :param remove_extra_files: Remove all the files generated except for the ensemble one
    :return: The prediction Ypred_file
    """
    print("this is doing ensemble prediction for models :", model_list)
    pred_list = []
    # Get the predictions into a list of np array
    for pre_trained_model in model_list:
        pred_file, truth_file, flags = predict_from_model(pre_trained_model, Xpred_file)
        #pred = np.loadtxt(pred_file, delimiter=' ')
        #if remove_extra_files:          # Remove the generated files
        #    os.remove(pred_file)
        pred_list.append(np.copy(np.expand_dims(pred_file, axis=2)))
    # Take the mean of the predictions
    pred_all = np.concatenate(pred_list, axis=2)
    pred_mean = np.mean(pred_all, axis=2)
    save_name = Xpred_file.replace('Xpred', 'Ypred')
    np.savetxt(save_name, pred_mean)
    
    # If no_plot, then return
    if no_plot:
        return

    # saving the plot down
    flags.eval_model = 'ensemble_plot' + Xpred_file.replace('/', '')
    if model_dir is None:
        plotMSELossDistrib(save_name, truth_file, flags)
    else:
        plotMSELossDistrib(save_name, truth_file, flags, save_dir=model_dir)
Exemple #2
0
def ensemble_predict(model_list, Xpred_file, plot_mode=True):
    """
    This predicts the output from an ensemble of models
    :param model_list: The list of model names to aggregate
    :param Xpred_file: The Xpred_file that you want to predict
    :return: The prediction Ypred_file
    """
    print("this is doing ensemble prediction for models :", model_list)
    pred_list = []
    # Get the predictions into a list of np array
    for pre_trained_model in model_list:
        pred_file, truth_file, flags = predict_from_model(pre_trained_model,
                                                          Xpred_file,
                                                          save_mode=False)
        #pred = np.loadtxt(pred_file, delimiter=' ')
        pred = pred_file
        pred_list.append(np.copy(np.expand_dims(pred, axis=2)))
    # Take the mean of the predictions
    pred_all = np.concatenate(pred_list, axis=2)
    pred_mean = np.mean(pred_all, axis=2)
    save_name = Xpred_file.replace('Xpred', 'Ypred_ensemble')
    np.savetxt(save_name, pred_mean)

    # saving the plot down
    save_name_ensemble = Xpred_file.split('/')[-1][:-4]
    print('save name of emsemble = ', save_name_ensemble)
    flags.eval_model = 'ensemble_model' + save_name_ensemble
    if plot_mode:
        plotMSELossDistrib(save_name, truth_file, flags)
Exemple #3
0
def predict(model_dir, Ytruth_file, multi_flag=False):
    """
    Predict the output from given spectra
    """
    print("Retrieving flag object for parameters")
    if (model_dir.startswith("models")):
        model_dir = model_dir[7:]
        print("after removing prefix models/, now model_dir is:", model_dir)
    if model_dir.startswith('/'):  # It is a absolute path
        flags = helper_functions.load_flags(model_dir)
    else:
        flags = helper_functions.load_flags(os.path.join("models", model_dir))
    flags.eval_model = model_dir  # Reset the eval mode

    ntwk = Network(INN,
                   flags,
                   train_loader=None,
                   test_loader=None,
                   inference_mode=True,
                   saved_model=flags.eval_model)
    print("number of trainable parameters is :")
    pytorch_total_params = sum(p.numel() for p in ntwk.model.parameters()
                               if p.requires_grad)
    print(pytorch_total_params)
    # Evaluation process
    pred_file, truth_file = ntwk.predict(Ytruth_file)
    if 'Yang' not in flags.data_set:
        plotMSELossDistrib(pred_file, truth_file, flags)
Exemple #4
0
def evaluate_from_model(model_dir):
    """
    Evaluating interface. 1. Retreive the flags 2. get data 3. initialize network 4. eval
    :param model_dir: The folder to retrieve the model
    :return: None
    """
    # Retrieve the flag object
    if (model_dir.startswith("models")):
        model_dir = model_dir[7:]
        print("after removing prefix models/, now model_dir is:", model_dir)
    print("Retrieving flag object for parameters")
    flags = flag_reader.load_flags(os.path.join("models", model_dir))
    flags.eval_model = model_dir                    # Reset the eval mode

    # Get the data
    train_loader, test_loader = data_reader.read_data(flags)
    print("Making network now")

    # Make Network
    ntwk = Network(Forward, flags, train_loader, test_loader, inference_mode=True, saved_model=flags.eval_model)

    # Evaluation process
    print("Start eval now:")
    pred_file, truth_file = ntwk.evaluate()

    # Plot the MSE distribution
    plotMSELossDistrib(pred_file, truth_file, flags)
    print("Evaluation finished")
Exemple #5
0
def evaluate_from_model(model_dir, multi_flag=False, eval_data_all=False):
    """
    Evaluating interface. 1. Retreive the flags 2. get data 3. initialize network 4. eval
    :param model_dir: The folder to retrieve the model
    :param multi_flag: The switch to turn on if you want to generate all different inference trial results
    :param eval_data_all: The switch to turn on if you want to put all data in evaluation data
    :return: None
    """
    # Retrieve the flag object
    print("Retrieving flag object for parameters")
    if (model_dir.startswith("models")):
        model_dir = model_dir[7:]
        print("after removing prefix models/, now model_dir is:", model_dir)
    print(model_dir)
    flags = load_flags(os.path.join("models", model_dir))
    flags.eval_model = model_dir  # Reset the eval mode
    flags.backprop_step = eval_flags.backprop_step
    if flags.data_set == 'ballistics':
        flags.test_ratio = 0.001
    elif flags.data_set == 'sine_wave':
        flags.test_ratio = 0.005
    elif flags.data_set == 'robotic_arm':
        flags.test_ratio = 0.2
    elif flags.data_set == 'sine_test_1d':
        flags.test_ratio = 0.05
    flags.batch_size = 1  # For backprop eval mode, batchsize is always 1
    flags.lr = 0.05
    flags.eval_batch_size = eval_flags.eval_batch_size
    flags.train_step = eval_flags.train_step

    # Get the data
    train_loader, test_loader = data_reader.read_data(
        flags, eval_data_all=eval_data_all)
    print("Making network now")

    # Make Network
    ntwk = Network(Backprop,
                   flags,
                   train_loader,
                   test_loader,
                   inference_mode=True,
                   saved_model=flags.eval_model)
    print("number of trainable parameters is :")
    pytorch_total_params = sum(p.numel() for p in ntwk.model.parameters()
                               if p.requires_grad)
    print(pytorch_total_params)
    # Evaluation process
    print("Start eval now:")
    if multi_flag:
        pred_file, truth_file = ntwk.evaluate(
            save_dir='/work/sr365/multi_eval/Backprop/' + flags.data_set,
            save_all=True)
    else:
        pred_file, truth_file = ntwk.evaluate()

    # Plot the MSE distribution
    plotMSELossDistrib(pred_file, truth_file, flags)
    print("Evaluation finished")
Exemple #6
0
def predict_from_model(pre_trained_model,
                       Xpred_file,
                       no_plot=True,
                       load_state_dict=None):
    """
    Predicting interface. 1. Retreive the flags 2. get data 3. initialize network 4. eval
    :param model_dir: The folder to retrieve the model
    :param Xpred_file: The Prediction file position
    :param no_plot: If True, do not plot (For multi_eval)
    :param load_state_dict: The new way to load the model for ensemble MM
    :return: None
    """
    # Retrieve the flag object
    print("This is doing the prediction for file", Xpred_file)
    print("Retrieving flag object for parameters")
    if (pre_trained_model.startswith("models")):
        eval_model = pre_trained_model[7:]
        print("after removing prefix models/, now model_dir is:", eval_model)

    flags = load_flags(pre_trained_model)  # Get the pre-trained model
    flags.eval_model = pre_trained_model  # Reset the eval mode
    flags.test_ratio = 0.1  #useless number

    # Get the data, this part is useless in prediction but just for simplicity
    #train_loader, test_loader = data_reader.read_data(flags)
    print("Making network now")

    # Make Network
    ntwk = Network(NA,
                   flags,
                   train_loader=None,
                   test_loader=None,
                   inference_mode=True,
                   saved_model=flags.eval_model)
    print("number of trainable parameters is :")
    pytorch_total_params = sum(p.numel() for p in ntwk.model.parameters()
                               if p.requires_grad)
    print(pytorch_total_params)
    # Evaluation process
    print("Start eval now:")

    if not no_plot:
        # Plot the MSE distribution
        pred_file, truth_file = ntwk.predict(Xpred_file,
                                             no_save=False,
                                             load_state_dict=load_state_dict)
        flags.eval_model = pred_file.replace(
            '.', '_')  # To make the plot name different
        plotMSELossDistrib(pred_file, truth_file, flags)
    else:
        pred_file, truth_file = ntwk.predict(Xpred_file,
                                             no_save=True,
                                             load_state_dict=load_state_dict)

    print("Evaluation finished")

    return pred_file, truth_file, flags
Exemple #7
0
def evaluate_from_model(model_dir, multi_flag=False, eval_data_all=False):
    """
    Evaluating interface. 1. Retreive the flags 2. get data 3. initialize network 4. eval
    :param model_dir: The folder to retrieve the model
    :param eval_data_all: The switch to turn on if you want to put all data in evaluation data
    :return: None
    """
    # Retrieve the flag object
    print("Retrieving flag object for parameters")
    if (model_dir.startswith("models")):
        model_dir = model_dir[7:]
        print("after removing prefix models/, now model_dir is:", model_dir)
    flags = helper_functions.load_flags(os.path.join("models", model_dir))
    flags.eval_model = model_dir                    # Reset the eval mode

    # Set up the test_ratio
    if flags.data_set == 'ballistics':
        flags.test_ratio = 0.1
    elif flags.data_set == 'sine_wave':
        flags.test_ratio = 0.1
    elif flags.data_set == 'robotic_arm':
        flags.test_ratio = 0.1
    
    # Get the data
    train_loader, test_loader = data_reader.read_data(flags, eval_data_all=eval_data_all)
    print("Making network now")

    # Make Network
    ntwk = Network(INN, flags, train_loader, test_loader, inference_mode=True, saved_model=flags.eval_model)
    print(ntwk.ckpt_dir)
    print("number of trainable parameters is :")
    pytorch_total_params = sum(p.numel() for p in ntwk.model.parameters() if p.requires_grad)
    print(pytorch_total_params)

    # Evaluation process
    print("Start eval now:")
    if multi_flag:
        ntwk.evaluate_multiple_time()
    else:
        pred_file, truth_file = ntwk.evaluate()

    # Plot the MSE distribution
    if flags.data_set != 'meta_material' and not multi_flag: 
        plotMSELossDistrib(pred_file, truth_file, flags)
    print("Evaluation finished")
    
    # If gaussian, plot the scatter plot
    if flags.data_set == 'gaussian_mixture':
        Xpred = helper_functions.get_Xpred(path='data/', name=flags.eval_model) 
        Ypred = helper_functions.get_Ypred(path='data/', name=flags.eval_model) 

        # Plot the points scatter
        generate_Gaussian.plotData(Xpred, Ypred, save_dir='data/' + flags.eval_model.replace('/','_') + 'generation plot.png', eval_mode=True)
Exemple #8
0
def ensemble_predict(model_list,
                     Xpred_file,
                     model_dir=None,
                     no_plot=True,
                     remove_extra_files=True,
                     state_dict=False):
    """
    This predicts the output from an ensemble of models
    :param model_list: The list of model names to aggregate
    :param Xpred_file: The Xpred_file that you want to predict
    :param model_dir: The directory to plot the plot
    :param no_plot: If True, do not plot (For multi_eval)
    :param remove_extra_files: Remove all the files generated except for the ensemble one
    :param state_dict: New way to load model using state_dict instead of load module
    :return: The prediction Ypred_file
    """
    print("this is doing ensemble prediction for models :", model_list)
    pred_list = []
    # Get the predictions into a list of np array
    for pre_trained_model in model_list:
        if state_dict is False:
            pred_file, truth_file, flags = predict_from_model(
                pre_trained_model, Xpred_file)
            # This line is to plot all histogram, make sure comment the pred_list.append line below as well for getting all the histograms
            #pred_file, truth_file, flags = predict_from_model(pre_trained_model, Xpred_file, no_plot=False)
        else:
            model_folder = os.path.join('..', 'Data', 'Yang_sim',
                                        'model_param')
            pred_file, truth_file, flags = predict_from_model(
                model_folder, Xpred_file, load_state_dict=pre_trained_model)
        pred_list.append(np.copy(np.expand_dims(pred_file, axis=2)))
    # Take the mean of the predictions
    pred_all = np.concatenate(pred_list, axis=2)
    pred_mean = np.mean(pred_all, axis=2)
    save_name = Xpred_file.replace('Xpred', 'Ypred')
    np.savetxt(save_name, pred_mean)

    # If no_plot, then return
    if no_plot:
        return

    # saving the plot down
    flags.eval_model = ''
    model_dir = None
    if model_dir is None:
        return plotMSELossDistrib(save_name, truth_file, flags)
    else:
        return plotMSELossDistrib(save_name,
                                  truth_file,
                                  flags,
                                  save_dir=model_dir)
Exemple #9
0
def evaluate_from_model(model_dir, multi_flag=False, eval_data_all=False):
    """
    Evaluating interface. 1. Retreive the flags 2. get data 3. initialize network 4. eval
    :param model_dir: The folder to retrieve the model
    :param eval_data_all: The switch to turn on if you want to put all data in evaluation data
    :return: None
    """
    # Retrieve the flag object
    print("Retrieving flag object for parameters")
    if (model_dir.startswith("models")):
        model_dir = model_dir[7:]
        print("after removing prefix models/, now model_dir is:", model_dir)
    if model_dir.startswith('/'):  # It is a absolute path
        flags = helper_functions.load_flags(model_dir)
    else:
        flags = helper_functions.load_flags(os.path.join("models", model_dir))
    flags.eval_model = model_dir  # Reset the eval mode
    flags.test_ratio = get_test_ratio_helper(flags)

    # 2020.10.10 only, delete afterward
    flags.test_ratio *= 2

    # Get the data
    train_loader, test_loader = data_reader.read_data(
        flags, eval_data_all=eval_data_all)
    print("Making network now")

    # Make Network
    ntwk = Network(MDN,
                   flags,
                   train_loader,
                   test_loader,
                   inference_mode=True,
                   saved_model=flags.eval_model)
    print(model_dir)
    print("number of trainable parameters is :")
    pytorch_total_params = sum(p.numel() for p in ntwk.model.parameters()
                               if p.requires_grad)
    print(pytorch_total_params)
    # Evaluation process
    print("Start eval now:")
    if multi_flag:
        ntwk.evaluate_multiple_time()
    else:
        pred_file, truth_file = ntwk.evaluate()

    # Plot the MSE distribution
    if flags.data_set != 'meta_material' and not multi_flag:
        plotMSELossDistrib(pred_file, truth_file, flags)
    print("Evaluation finished")
Exemple #10
0
def evaluate_from_model(model_dir, multi_flag=False, eval_data_all=False, test_ratio=None):
    """
    Evaluating interface. 1. Retreive the flags 2. get data 3. initialize network 4. eval
    :param model_dir: The folder to retrieve the model
    :param eval_data_all: The switch to turn on if you want to put all data in evaluation data
    :return: None
    """
    # Retrieve the flag object
    print("Retrieving flag object for parameters")
    if (model_dir.startswith("models")):
        model_dir = model_dir[7:]
        print("after removing prefix models/, now model_dir is:", model_dir)
    flags = helper_functions.load_flags(os.path.join("models", model_dir))
    flags.eval_model = model_dir                    # Reset the eval mode
    flags.batch_size = 1
    flags.backprop_step=300
    flags.eval_batch_size=2048

    if test_ratio is None:
        flags.test_ratio = get_test_ratio_helper(flags)
    else:
        # To make the test ratio swipe with respect to inference time
        # also making the batch size large enough
        flags.test_ratio = test_ratio
    # Get the data
    train_loader, test_loader = data_reader.read_data(flags, eval_data_all=eval_data_all)
    print("Making network now")

    # Make Network
    ntwk = Network(make_cINN_and_NA, flags, train_loader, test_loader, inference_mode=True, saved_model=flags.eval_model)
    #print(model_dir)
    print("number of trainable parameters is :")
    pytorch_total_params = sum(p.numel() for p in ntwk.model_cINN.parameters() if p.requires_grad)
    print(pytorch_total_params)
    pytorch_total_params = sum(p.numel() for p in ntwk.model_NA.parameters() if p.requires_grad)
    print(pytorch_total_params)
    # Evaluation process
    print("Start eval now:")
    if multi_flag:
        pred_file, truth_file = ntwk.evaluate(save_dir='/work/sr365/NIPS_multi_eval_backup/multi_eval/hybrid_cINN_NA_0bp/'+flags.data_set, save_all=True)
    else:
        pred_file, truth_file = ntwk.evaluate()

    # Plot the MSE distribution
    if flags.data_set != 'meta_material' and not multi_flag: 
        plotMSELossDistrib(pred_file, truth_file, flags)
    print("Evaluation finished")
def infer(pre_trained_model, Xpred_file, no_plot=True):
    # Retrieve the flag object
    print("This is doing the prediction for file", Xpred_file)
    print("Retrieving flag object for parameters")
    if (pre_trained_model.startswith("models")):
        eval_model = pre_trained_model[7:]
        print("after removing prefix models/, now model_dir is:", eval_model)

    flags = load_flags(pre_trained_model)  # Get the pre-trained model
    flags.eval_model = pre_trained_model  # Reset the eval mode
    flags.test_ratio = 0.1  # useless number

    # Get the data, this part is useless in prediction but just for simplicity
    train_loader, test_loader = data_reader.read_data(flags)
    print("Making network now")

    # Make Network
    ntwk = Network(Backprop,
                   flags,
                   train_loader,
                   test_loader,
                   inference_mode=True,
                   saved_model=flags.eval_model)
    print("number of trainable parameters is :")
    pytorch_total_params = sum(p.numel() for p in ntwk.model.parameters()
                               if p.requires_grad)
    print(pytorch_total_params)
    # Evaluation process
    print("Start eval now:")

    if not no_plot:
        # Plot the MSE distribution
        pred_file, truth_file = ntwk.predict(Xpred_file, no_save=False)
        flags.eval_model = pred_file.replace(
            '.', '_')  # To make the plot name different
        plotMSELossDistrib(pred_file, truth_file, flags)
    else:
        pred_file, truth_file = ntwk.predict(Xpred_file, no_save=True)

    print("Evaluation finished")

    return pred_file, truth_file, flags
Exemple #12
0
def evaluate_from_model(model_dir,
                        multi_flag=False,
                        eval_data_all=False,
                        save_misc=False,
                        MSE_Simulator=False,
                        save_Simulator_Ypred=True,
                        init_lr=0.5,
                        BDY_strength=1):
    """
    Evaluating interface. 1. Retreive the flags 2. get data 3. initialize network 4. eval
    :param model_dir: The folder to retrieve the model
    :param multi_flag: The switch to turn on if you want to generate all different inference trial results
    :param eval_data_all: The switch to turn on if you want to put all data in evaluation data
    :return: None
    """
    # Retrieve the flag object
    print("Retrieving flag object for parameters")
    if (model_dir.startswith("models")):
        model_dir = model_dir[7:]
        print("after removing prefix models/, now model_dir is:", model_dir)
    print(model_dir)
    flags = load_flags(os.path.join("models", model_dir))
    flags.eval_model = model_dir  # Reset the eval mode
    flags.test_ratio = get_test_ratio_helper(flags)

    if flags.data_set == 'Peurifoy':
        flags.eval_batch_size = 10000
    elif flags.data_set == 'Chen':
        flags.eval_batch_size = 10000
    elif flags.data_set == 'Yang' or flags.data_set == 'Yang_sim':
        flags.eval_batch_size = 2000

    flags.batch_size = flags.eval_batch_size
    flags.lr = init_lr
    flags.BDY_strength = BDY_strength
    flags.eval_batch_size = eval_flags.eval_batch_size
    flags.train_step = eval_flags.train_step

    # delete after usage: 02.07 for vilidating that ball and sine is convex problem
    # Use a very small eval batch size and expected to see that meta and robo getting much worse performance
    # and the ball and sine getting nearly identical one
    # flags.eval_batch_size = 2

    print(flags)
    flags.batch_size = 500

    # Get the data
    eval_data_all = True
    train_loader, test_loader = data_reader.read_data(
        flags, eval_data_all=eval_data_all)

    print("LENGTH: ", len(test_loader))

    print("Making network now")

    # Make Network
    ntwk = Network(Forward,
                   flags,
                   train_loader,
                   test_loader,
                   inference_mode=True,
                   saved_model=flags.eval_model)
    print("number of trainable parameters is :")
    pytorch_total_params = sum(p.numel() for p in ntwk.model.parameters()
                               if p.requires_grad)
    print(pytorch_total_params)

    # Evaluation process
    print("Start eval now:")
    if multi_flag:
        dest_dir = '/home/sr365/MM_bench_multi_eval/NA/'
        #dest_dir = '/data/users/ben/multi_eval/NA_lr' + str(init_lr)  + 'bdy_' + str(BDY_strength)+'/'
        if not os.path.isdir(dest_dir):
            os.mkdir(dest_dir)
        dest_dir += flags.data_set
        if not os.path.isdir(dest_dir):
            os.mkdir(dest_dir)
        #pred_file, truth_file = ntwk.evaluate(save_dir='/work/sr365/multi_eval/NA/' + flags.data_set, save_all=True,
        pred_file, truth_file = ntwk.evaluate(
            save_dir=dest_dir,
            save_all=True,
            save_misc=save_misc,
            MSE_Simulator=MSE_Simulator,
            save_Simulator_Ypred=save_Simulator_Ypred)
    else:
        pred_file, truth_file = ntwk.evaluate(
            save_dir='data/' + flags.data_set,
            save_misc=save_misc,
            MSE_Simulator=MSE_Simulator,
            save_Simulator_Ypred=save_Simulator_Ypred)

    # Plot the MSE distribution
    plotMSELossDistrib(pred_file, truth_file, flags)
    print("Evaluation finished")
Exemple #13
0
def evaluate_from_model(model_dir,
                        multi_flag=False,
                        eval_data_all=False,
                        modulized_flag=False):
    """
    Evaluating interface. 1. Retreive the flags 2. get data 3. initialize network 4. eval
    :param model_dir: The folder to retrieve the model
    :param eval_data_all: The switch to turn on if you want to put all data in evaluation data
    :return: None
    """
    # Retrieve the flag object
    print("Retrieving flag object for parameters")
    if (model_dir.startswith("models")):
        model_dir = model_dir[7:]
        print("after removing prefix models/, now model_dir is:", model_dir)
    flags = helper_functions.load_flags(os.path.join("models", model_dir))
    flags.eval_model = model_dir  # Reset the eval mode

    flags.test_ratio = get_test_ratio_helper(flags)

    # Get the data
    train_loader, test_loader = data_reader.read_data(
        flags, eval_data_all=eval_data_all)
    print("Making network now")

    # Make Network
    ntwk = Network(INN,
                   flags,
                   train_loader,
                   test_loader,
                   inference_mode=True,
                   saved_model=flags.eval_model)
    print(ntwk.ckpt_dir)
    print("number of trainable parameters is :")
    pytorch_total_params = sum(p.numel() for p in ntwk.model.parameters()
                               if p.requires_grad)
    print(pytorch_total_params)

    # Evaluation process
    print("Start eval now:")
    if modulized_flag:
        ntwk.evaluate_modulized_multi_time()
    elif multi_flag:
        ntwk.evaluate_multiple_time()
    else:
        pred_file, truth_file = ntwk.evaluate()

    # Plot the MSE distribution
    if flags.data_set != 'Yang_sim' and not multi_flag and not modulized_flag:  # meta-material does not have simulator, hence no Ypred given
        MSE = plotMSELossDistrib(pred_file, truth_file, flags)
        # Add this MSE back to the folder
        flags.best_validation_loss = MSE
        helper_functions.save_flags(flags, os.path.join("models", model_dir))
    elif flags.data_set == 'Yang_sim' and not multi_flag and not modulized_flag:
        # Save the current path for getting back in the future
        cwd = os.getcwd()
        abs_path_Xpred = os.path.abspath(pred_file.replace('Ypred', 'Xpred'))
        # Change to NA dictory to do prediction
        os.chdir('../NA/')
        MSE = predict.ensemble_predict_master('../Data/Yang_sim/state_dicts/',
                                              abs_path_Xpred,
                                              no_plot=False)
        # Add this MSE back to the folder
        flags.best_validation_loss = MSE
        os.chdir(cwd)
        helper_functions.save_flags(flags, os.path.join("models", model_dir))
    print("Evaluation finished")
Exemple #14
0
from utils.evaluation_helper import plotMSELossDistrib

big_folder = '/home/sr365/MM_Bench/NA/'
#big_folder = '/home/sr365/MM_Bench/NA/'

for folder in os.listdir(big_folder):
    # ignore if not a folder or no data in folder name
    if 'data' not in folder or not os.path.isdir(
            os.path.join(big_folder, folder)) or 'Chen' not in folder:
        continue

    print('going into folder ', folder)
    # Loop over the data folder
    for file in os.listdir(os.path.join(big_folder, folder)):
        if 'test_Ytruth' not in file:
            continue
        print('going in file', file)
        truth_file = os.path.join(big_folder, folder, file)
        pred_file = os.path.join(big_folder, folder,
                                 file.replace('Ytruth', 'Ypred'))
        # Make sure Ypred is also present
        if not os.path.isfile(pred_file):
            print('no Ypred file, abort!')
            continue

        print('doing MSE plot for file ', file, 'in folder ',
              os.path.join(big_folder, folder))
        plotMSELossDistrib(pred_file=pred_file,
                           truth_file=truth_file,
                           save_dir=os.path.join(big_folder, folder))
def retrain_different_dataset(index):
    """
     This function is to evaluate all different datasets in the model with one function call
     """
    from utils.helper_functions import load_flags
    data_set_list = ['mm1', 'mm2', 'mm3', 'mm4', 'mm5']
    for eval_model in data_set_list:
        flags = load_flags(os.path.join("prev_models", eval_model))
        flags.model_name = "retrain_" + str(index) + flags.model_name
        flags.data_dir = '/work/sr365/Christian_data_augmented'
        flags.ckpt_dir = '/work/sr365/MM_ensemble'
        flags.train_step = 500
        flags.test_ratio = 0.2
        training_from_flag(flags)


if __name__ == '__main__':
    # Read the parameters to be set
    from utils import evaluation_helper
    flags = flag_reader_ensemble.read_flag()
    evaluation_helper.plotMSELossDistrib('datapool/Ypred_ensemble.csv',
                                         'datapool/Ytruth.csv', flags)
    # Call the train from flag function
    #for i in range(3):
    #    training_from_flag(flags)
    print(type(flags))
    # Do the retraining for all the data set to get the training
    #for i in range(1):
    #    retrain_different_dataset(i)
Exemple #16
0
def evaluate_from_model(model_dir, multi_flag=False, eval_data_all=False, save_misc=False, 
                        MSE_Simulator=False, save_Simulator_Ypred=True, 
                        init_lr=0.01, lr_decay=0.9, BDY_strength=1, save_dir='data/',
                        noise_level=0, 
                        md_coeff=0, md_start=None, md_end=None, md_radius=None,
                        eval_batch_size=None):

    """
    Evaluating interface. 1. Retreive the flags 2. get data 3. initialize network 4. eval
    :param model_dir: The folder to retrieve the model
    :param multi_flag: The switch to turn on if you want to generate all different inference trial results
    :param eval_data_all: The switch to turn on if you want to put all data in evaluation data
    :return: None
    """
    # Retrieve the flag object
    print("Retrieving flag object for parameters")
    if (model_dir.startswith("models")):
        model_dir = model_dir[7:]
        print("after removing prefix models/, now model_dir is:", model_dir)
    print(model_dir)
    flags = load_flags(os.path.join("models", model_dir))
    flags.eval_model = model_dir                    # Reset the eval mode
    flags.test_ratio = get_test_ratio_helper(flags)
    flags.backprop_step = eval_flags.backprop_step
    #flags.test_ratio = 0.02

    if flags.data_set != None: #== 'Yang_sim':
        save_Simulator_Ypred = False
        print("this is Yang sim dataset, setting the save_Simulator_Ypred to False")
    flags.batch_size = 1                            # For backprop eval mode, batchsize is always 1
    flags.BDY_strength = BDY_strength
    flags.train_step = eval_flags.train_step
    flags.backprop_step = 300 

    # MD Loss: new version
    if md_coeff is not None:
        flags.md_coeff = md_coeff
    if md_start is not None:
        flags.md_start = md_start
    if md_end is not None:
        flags.md_end = md_end
    if md_radius is not None:
        flags.md_radius = md_radius

    ############################# Thing that are changing #########################
    flags.lr = init_lr
    flags.lr_decay_rate = lr_decay
    flags.eval_batch_size = 2048 if eval_batch_size is None else eval_batch_size
    flags.optim = 'Adam'
    ###############################################################################
    
    print(flags)

    # if flags.data_set == 'Peurifoy':
    #     flags.eval_batch_size = 10000
    # elif flags.data_set == 'Chen':
    #     flags.eval_batch_size = 10000
    # elif flags.data_set == 'Yang' or flags.data_set == 'Yang_sim':
    #     flags.eval_batch_size = 2000
    #
    # flags.batch_size = flags.eval_batch_size

    # Get the data
    train_loader, test_loader = data_reader.read_data(flags, eval_data_all=eval_data_all)
    print("Making network now")
    
    # Make Network
    ntwk = Network(NA, flags, train_loader, test_loader, inference_mode=True, saved_model=flags.eval_model)
    print("number of trainable parameters is :")
    pytorch_total_params = sum(p.numel() for p in ntwk.model.parameters() if p.requires_grad)
    print(pytorch_total_params)

    # pred_file, truth_file = ntwk.validate_model(save_dir='data/' + flags.data_set+'_best_model', save_misc=save_misc,
    #                                       MSE_Simulator=MSE_Simulator, save_Simulator_Ypred=save_Simulator_Ypred)

    # Evaluation process
    print("Start eval now:")
    if multi_flag:
        #dest_dir = '/home/sr365/mm_bench_multi_eval_Chen_sweep/NA_init_lr_{}_decay_{}_batch_{}_bp_{}_noise_lvl_{}/'.format(init_lr, lr_decay, flags.eval_batch_size, flags.backprop_step, noise_level)
        #dest_dir = '/home/sr365/mm_bench_compare_MDNA_loss/NA_init_lr_{}_decay_{}_MD_loss_{}'.format(flags.lr, flags.lr_decay_rate, flags.md_coeff)
        #dest_dir = '/home/sr365/MM_bench_multi_eval/NA_RMSprop/'
        #dest_dir = '/data/users/ben/multi_eval/NA_lr' + str(init_lr)  + 'bdy_' + str(BDY_strength)+'/' 
        dest_dir = os.path.join('/home/sr365/MDNA_temp/', save_dir)
        dest_dir = os.path.join(dest_dir, flags.data_set)
        if not os.path.isdir(dest_dir):
            os.makedirs(dest_dir)
        #pred_file, truth_file = ntwk.evaluate(save_dir='/work/sr365/multi_eval/NA/' + flags.data_set, save_all=True,
        pred_file, truth_file = ntwk.evaluate(save_dir=dest_dir, save_all=True,
                                                save_misc=save_misc, MSE_Simulator=MSE_Simulator,
                                                save_Simulator_Ypred=save_Simulator_Ypred,
                                                noise_level=noise_level)
    else:
        # Creat the directory is not exist
        if not os.path.isdir(save_dir):
            os.makedirs(save_dir)
        pred_file, truth_file = ntwk.evaluate(save_dir=save_dir, save_misc=save_misc,
                                             MSE_Simulator=MSE_Simulator, 
                                             save_Simulator_Ypred=save_Simulator_Ypred,
                                             noise_level=noise_level)
        #pred_file, truth_file = ntwk.evaluate(save_dir='data/'+flags.data_set,save_misc=save_misc, MSE_Simulator=MSE_Simulator, save_Simulator_Ypred=save_Simulator_Ypred)

    if 'Yang' in flags.data_set:
        return
    # Plot the MSE distribution
    MSE = plotMSELossDistrib(pred_file, truth_file, flags)
    print("Evaluation finished")
    return MSE
def evaluate_from_model(model_dir,
                        multi_flag=False,
                        eval_data_all=False,
                        save_misc=False,
                        MSE_Simulator=False,
                        save_Simulator_Ypred=False):
    """
    Evaluating interface. 1. Retreive the flags 2. get data 3. initialize network 4. eval
    :param model_dir: The folder to retrieve the model
    :param multi_flag: The switch to turn on if you want to generate all different inference trial results
    :param eval_data_all: The switch to turn on if you want to put all data in evaluation data
    :return: None
    """
    # Retrieve the flag object
    print("Retrieving flag object for parameters")
    if (model_dir.startswith("models")):
        model_dir = model_dir[7:]
        print("after removing prefix models/, now model_dir is:", model_dir)
    print(model_dir)
    flags = load_flags(os.path.join("models", model_dir))
    flags.eval_model = model_dir  # Reset the eval mode
    flags.backprop_step = eval_flags.backprop_step
    if flags.data_set == 'ballistics':
        flags.test_ratio = 0.0078  # 12800 in total
    elif flags.data_set == 'sine_wave':
        flags.test_ratio = 0.001  # 8000 in total
    elif flags.data_set == 'robotic_arm':
        flags.test_ratio = 0.1  # 10000 in total
    else:
        flags.test_ratio = 0.0051062 / 2
        #flags.test_ratio = 0
        #flags.test_ratio = 0.00025                        # 20000 in total for Meta material
    flags.batch_size = 1  # For backprop eval mode, batchsize is always 1
    flags.lr = 1e-2
    if flags.data_set == 'ballistics':
        flags.lr = 1

    flags.train_step = eval_flags.train_step

    for i in range(4000, 5000, 2000):
        for j in range(3):
            flags.eval_batch_size = i
            # Get the data
            train_loader, test_loader = data_reader.read_data(
                flags, eval_data_all=eval_data_all)
            print("Making network now")

            # Make Network
            ntwk = Network(Backprop,
                           flags,
                           train_loader,
                           test_loader,
                           inference_mode=True,
                           saved_model=flags.eval_model)
            print("number of trainable parameters is :")
            pytorch_total_params = sum(p.numel()
                                       for p in ntwk.model.parameters()
                                       if p.requires_grad)
            print(pytorch_total_params)

            # Evaluation process
            print("Start eval now:")
            if multi_flag:
                pred_file, truth_file = ntwk.evaluate(
                    save_dir='D:/Yang_MM_Absorber_ML/NA/' + flags.data_set,
                    save_all=True,
                    save_misc=save_misc,
                    MSE_Simulator=MSE_Simulator,
                    save_Simulator_Ypred=save_Simulator_Ypred)
            else:
                pred_file, truth_file = ntwk.evaluate(
                    save_dir='D:/Yang_MM_Absorber_ML/Backprop/data/' + str(i) +
                    '/' + str(j + 1),
                    save_misc=save_misc,
                    MSE_Simulator=MSE_Simulator,
                    save_Simulator_Ypred=save_Simulator_Ypred)

            # Plot the MSE distribution
            plotMSELossDistrib(
                pred_file,
                truth_file,
                flags,
                save_dir='D:/Yang_MM_Absorber_ML/Backprop/data/' + str(i) +
                '/' + str(j + 1))
            print("Evaluation finished")