Example #1
0
        return f1_score(y_true_array, y_pred_array,
                        average="macro"), f1_score(y_true_array,
                                                   y_pred_array,
                                                   average="micro"), eval_stat
        #return running_loss / len(loader)


if __name__ == "__main__":

    # initialize a model
    params = load_config('config.yaml')
    #model = make_model(h=params["head_number"], d_model=params["d_model"], d_ff=params["d_ff"], dropout=params["dropout"], max_len=params["max_len"], record_dim=params["record_dim"], d_ff_hidden=params["d_ff_hidden"], N=params["encoder_num"], de_factor=params["de_factor"])
    model = LinearRegression(params["feature_dim"], params["output_dim"])
    # load in the pre-trained model
    PATH_pretrained = "./models/session_2019-09-08[17_52_06]/model_24.pth"
    model.load_state_dict(torch.load(PATH_pretrained))

    # move the model into the corresponding device
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    model.to(device)

    # define the criterion
    # define the criterion
    # How to get weights:
    # e.g., weight of NORM = (# all training samples) / (# normal samples)
    training_loss_weights = [2.488, 43.346, 3.295, 3.683]
    weights = torch.FloatTensor(training_loss_weights).to(device)
    criterion = nn.CrossEntropyLoss(weight=weights)  # reduction = "mean"

    # define data loader
    eval_dir = params["val_dir"]
Example #2
0
eval_model_micro = LinearRegression(params["feature_dim"],
                                    params["output_dim"])

# define data loader
eval_dir = params["val_dir"]
eval_loader = torch.utils.data.DataLoader(NACCDataset(eval_dir, "test"),
                                          batch_size=16,
                                          shuffle=False)

#### macro score ####

# load in the pre-trained model for max f1 macro score
PATH_pretrained_macro = modelPath + "/model_" + str(
    max_valid_f1_macro_epoch) + ".pth"
print("read in", PATH_pretrained_macro)
eval_model_macro.load_state_dict(torch.load(PATH_pretrained_macro))

# move the model into the corresponding device
eval_model_macro.to(device)

# call the evaluation
print("*" * 20 + " Test Max F1 Macro Information " + "*" * 40 + '\n')
evaluation(eval_model_macro, params["output_dim"], criterion, params["alpha"],
           eval_loader, device)
print("\n")

#### micro score ####

# load in the pre-trained model for max f1 micro score
PATH_pretrained_micro = modelPath + "/model_" + str(
    max_valid_f1_micro_epoch) + ".pth"