def get_output_distributions(model, input_data_loader):
    # set to gpu if available and set dtype to float32
    device, dtype, _ = get_settings()

    dists = []
    for batch in input_data_loader:
        prediction = model.predict(batch.to(device,
                                            dtype)).detach().cpu().numpy()
        dists.extend(prediction)

    return np.array(dists).squeeze()


# def test_model(model, data_loader):
#     # Check model's train accuracy first...
#     check_accuracy(model)

#     # test sensitivity
#     sens = test_sensitivity(model, data_loader)

#     # get predictions from the model
#     dists = get_output_distributions(model, data_loader)

#     plot_test_results(sens, dists, data_loader.dataset.targets)

# def get_avg_sensitivity(model, test_loader):
#     device, dtype, dtype_y = get_settings()
#     model.eval()
#     local_sensitivity = 0
#     for data, target in test_loader:
#         data, _ = data.to(device, dtype), target.to(device, dtype_y)
#         local_sensitivity += sensitivity.local_sensitivity(data, model).data
#     local_sensitivity /= len(test_loader.dataset)
#     return local_sensitivity
def write_activations_to_file(model, save_path, data_loader, layers):
    device, dtype, dtype_y = get_settings()
    files = list()
    handles = list()

    for i, layer in enumerate(layers):
        os.makedirs(save_path, exist_ok=True)
        file = tables.open_file('{}/layer_{}.h5'.format(save_path, i),
                                mode='w')
        files.append(file)
        handle = layer.register_forward_hook(
            activation_hook(file, to_file_hook))
        handles.append(handle)

    with torch.no_grad():
        for batch_idx, (data, target) in enumerate(data_loader):
            data, target = data, target = data.to(device, dtype), target.to(
                device, dtype_y)
            model(data)

    for file in files:
        file.close()

    for handle in handles:
        handle.remove()
def get_activations(model, path, loader, layers="all"):
    device, dtype, dtype_y = get_settings()
    model.load_state_dict(torch.load(path, map_location=device))
    model.eval()

    activations = []
    all_layers = list(model.layers)
    if layers == 'all':
        selected_layers = all_layers
    else:
        selected_layers = [all_layers[l] for l in layers]

    for l, layer in enumerate(selected_layers):
        activations.append([])
        layer.register_forward_hook(
            activation_hook(activations[l], to_memory_hook))

    with torch.no_grad():
        for batch_idx, (data, target) in enumerate(loader):
            data, target = data.to(device, dtype), target.to(device, dtype_y)
            model(data)

    activations_cat = []
    for activation in activations:
        activations_cat.append(torch.cat(activation, dim=0).cpu().numpy().T)
    return activations_cat
def test_sensitivity(model, input_data_loader):
    # set to gpu if available and set dtype to float32
    device, dtype, _ = get_settings()

    sensitivities = []
    for batch in input_data_loader:
        sensitivities.append(
            local_sensitivity(batch.to(device, dtype),
                              model).detach().cpu().numpy())

    return np.array(sensitivities)
def dump_model_state_activations(model_state_path,
                                 data_loader,
                                 save_path,
                                 final=False):

    print('-------------------------------')
    print('GETTING MODEL STATE ACTIVATIONS')
    print('-------------------------------')
    device, dtype, dtype_y = get_settings()

    if final:
        save_path = save_path + '/model_final_act'
    else:
        save_path = save_path + '/model_act'

    net = recreate_model(model_state_path)
    # net.load_state_dict(torch.load(model_state_path, map_location=device))
    # net.to(device)
    net.eval()
    write_activations_to_file(net, save_path, data_loader, list(net.layers))
Пример #6
0
import os, sys, torch
import numpy as np

# custom imports
base_path = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(base_path, '..'))

from src.utils import get_hyperparameters, get_settings, get_experiment, get_number_of_experiments, save_model, get_noise
from src.utils import train, recreate_model
from src.data_loader import load_data, get_data_dimensions
from src.optimisers import get_optimiser
from src.net import Net
from src.utils import model_name, get_train_and_start_epoch

if __name__ == "__main__":
    device, dtype, _ = get_settings()
    hyperparam_indices = [int(i) for i in sys.argv[1].split("_")]
    experiment_indices = [int(i) for i in sys.argv[2].split("_")]
    act = "relu"
    dataset = sys.argv[3]
    epochs = int(sys.argv[4])
    model_states_dir = sys.argv[5]
    plotting_dir = sys.argv[6]
    experiment_name = model_name(hyperparam_indices, experiment_indices)

    # # hyperparameters
    noise_type, noise_level = get_noise(experiment_indices)
    hyperparams = get_hyperparameters(hyperparam_indices, noise_type,
                                      noise_level)
    batch_size = hyperparams[1]
    n_hidden = hyperparams[3]