Ejemplo n.º 1
0
def evaluate_from_workspace(workspace_dir):
    global args, data_loader
    """
        Evaluate the model on the test set.
    """
    data_dir = workspace_dir
    model_dir = os.path.join(data_dir, "model")

    # Load the parameters
    args = parser.parse_args()
    json_path = os.path.join(model_dir, 'params.json')
    assert os.path.isfile(
        json_path), "No json configuration file found at {}".format(json_path)
    params = utils.Params(json_path)
    params.data_dir = data_dir if data_dir else args.data_dir
    params.model_dir = model_dir if model_dir else args.model_dir

    # use GPU if available
    params.cuda = torch.cuda.is_available()  # use GPU is available

    # Set the random seed for reproducible experiments
    torch.manual_seed(230)
    if params.cuda: torch.cuda.manual_seed(230)

    # Get the logger
    utils.set_logger(os.path.join(params.model_dir, 'evaluate.log'))

    # Create the input data pipeline
    logging.info("Creating the dataset...")

    # load data
    data_loader = DataLoader(params.data_dir, params)
    data = data_loader.load_data_from_dir(['test'], params.data_dir)
    test_data = data['test']

    # specify the test set size
    params.test_size = test_data['size']
    test_data_iterator = data_loader.data_iterator(test_data, params)

    logging.info("- done.")

    # Define the model
    model = net.Net(params).cuda() if params.cuda else net.Net(params)

    loss_fn = net.loss_fn
    metrics = net.metrics

    logging.info("Starting evaluation")

    # Reload weights from the saved file
    utils.load_checkpoint(
        os.path.join(params.model_dir, args.restore_file + '.pth.tar'), model)

    # Evaluate
    num_steps = (params.test_size + 1) // params.batch_size
    test_metrics = evaluate(model, loss_fn, test_data_iterator, metrics,
                            params, num_steps)
    save_path = os.path.join(params.model_dir,
                             "metrics_test_{}.json".format(args.restore_file))
    utils.save_dict_to_json(test_metrics, save_path)
Ejemplo n.º 2
0
    if params.cuda: torch.cuda.manual_seed(230)
        
    # Get the logger
    utils.set_logger(os.path.join(args.model_dir, 'evaluate.log'))

    # Create the input data pipeline
    logging.info("Creating the dataset...")

    # load data
    data_loader = DataLoader(args.data_dir, params)
    data = data_loader.load_data(['test'], args.data_dir)
    test_data = data['test']

    # specify the test set size
    params.test_size = test_data['size']
    test_data_iterator = data_loader.data_iterator(test_data, params)

    logging.info("- done.")

    # Load embeddings
    gen_emb = np.load(os.path.join(args.emb_dir, 'gen.npy'))
    domain_emb = np.load(os.path.join(args.emb_dir, 'domain.npy'))

    # Define the model
    model = net.Net(params, gen_emb, domain_emb).cuda() if params.cuda else net.Net(params, gen_emb, domain_emb)
    
    loss_fn = net.loss_fn
    metrics = net.metrics
    
    logging.info("Starting evaluation")
Ejemplo n.º 3
0
MODEL_DIR = 'experiments/base_model/'
DATA_DIR = 'data/'
params = utils.Params(MODEL_DIR + 'params.json')
params.vocab_size = 25
params.number_of_classes = 10
params.cuda = torch.cuda.is_available()

weights = MODEL_DIR + 'best.pth'

model = net.Net(params).cuda() if params.cuda else net.Net(params)
checkpoint = torch.load(weights, map_location=dev('cpu'))
model.load_state_dict(checkpoint['state_dict'])

data_loader = DataLoader(DATA_DIR, params)
data = data_loader.load_data(['train', 'val'], DATA_DIR)
train_data = data['train']
train_data_iterator = data_loader.data_iterator(train_data,
                                                params,
                                                shuffle=True)
train_batch, _ = next(train_data_iterator)

val_data = data['val']
val_data_iterator = data_loader.data_iterator(val_data, params, shuffle=False)
val_batch, _ = next(val_data_iterator)
explainer = shap.KernelExplainer(model.forward, train_batch[:1])
vals = train_batch[:10]

shap_values = explainer.shap_values(train_batch[:10])
shap.force_plot(explainer.expected_value[0], shap_values[0][0],
                train_batch[:10])
Ejemplo n.º 4
0
        elif 'sp' in params.emb:
            model = models.CNN_Text(data_loader.weights_sp, params)
    elif params.model == "cnn_text_attn":
        if 'w2v' in params.emb:
            model = models.CNN_Text_Attn(data_loader.weights_w2v, params)
        elif 'sp' in params.emb:
            model = models.CNN_Text_Attn(data_loader.weights_sp, params)

    print(model)
    if torch.cuda.device_count() > 1:
        print("Let's use", torch.cuda.device_count(), "GPUs!")
        model = nn.DataParallel(model)
    model.to(device)

    # fetch loss function and metrics
    loss_fn = torch.nn.BCELoss()
    metrics = models.metrics

    # Train the model
    logging.info("Starting evaluation ")
    utils.load_checkpoint(os.path.join(model_dir, 'best.pth.tar'),
                          model,
                          parallel=False)
    num_steps = test_data['size'] // params.batch_size
    test_data_iterator = data_loader.data_iterator(test_data,
                                                   params,
                                                   shuffle=False)
    if 'attn' in params.model:
        test_metrics = evaluate(model, loss_fn, test_data_iterator, metrics,
                                params, num_steps)
Ejemplo n.º 5
0
    if params.cuda: torch.cuda.manual_seed(230)

    # Get the logger
    utils.set_logger(os.path.join(args.model_dir, 'evaluate.log'))

    # Create the input data pipeline
    logging.info("Creating the dataset...")

    # load data
    print(params)
    data_loader = DataLoader(params)
    data = data_loader.load_data(args.test_data_path)

    # specify the test set size
    params.test_size = data_loader.get_dataset_size('all')
    test_data_iterator = data_loader.data_iterator(split='all', batch_size=params.batch_size)

    logging.info("- done.")

    # Define the model
    model = net.Model(params).cuda() if params.cuda else net.Model(params)

    loss_fn = model.loss_fn
    metrics = {
        'EM': model.exact_match_score,
        'f1': model.f1_score
    }

    logging.info("Starting evaluation")

    # Reload weights from the saved file