def train_with_loader(network,
                      training_data,
                      config_dict,
                      validation_data=None,
                      loss_fn=torch.nn.MSELoss()):
    print('------- TRAINING WITH LOADER ---------')
    # set configurations
    # optimizer = set_optimizer(network, config_dict)
    optimizer = torch.optim.Adam(network.params_groups(config_dict),
                                 betas=config_dict['betas'])
    config_dict["save_dir"] = create_directory_timestamp(
        config_dict["save_dir"], config_dict["name"])
    save_configs(
        config_dict,
        os.path.join(config_dict["save_dir"], "training_configs.json"))
    # Define variables
    regularization = torch.tensor(config_dict['cv_penalty'],
                                  device=TorchUtils.get_accelerator_type())
    costs = np.zeros((config_dict["nr_epochs"],
                      2))  # training and validation costs per epoch
    accuracy_over_epochs = np.zeros((config_dict["nr_epochs"], 2))
    # grads_epochs = np.zeros((config_dict["nr_epochs"], config_dict["nr_nodes"]))
    looper = trange(config_dict["nr_epochs"], desc='Initialising')
    for epoch in looper:

        costs[epoch, 0], accuracy_over_epochs[epoch, 0] = batch_training(
            training_data, network, optimizer, loss_fn, regularization)
        # grads_epochs[epoch] = np.asarray([np.abs(p.grad.detach().cpu().numpy()).max()
        #                                   for p in network.dnpu_layer.parameters() if p.requires_grad])
        # Evaluate Validation error
        if validation_data is not None:
            costs[epoch, 1], accuracy_over_epochs[epoch, 1] = evaluate(
                network, loss_fn, validation_data)
        else:
            costs[epoch, 1], accuracy_over_epochs[epoch, 1] = np.nan, np.nan

        if 'save_interval' in config_dict.keys(
        ) and epoch % config_dict["save_interval"] == 10:
            save_model(network, config_dict["save_dir"], f'checkpoint_{epoch}')
        if np.isnan(costs[epoch, 0]):
            costs[epoch:, 0] = np.nan
            costs[epoch:, 1] = np.nan
            print('--------- Training interrupted value was Nan!! ---------')
            break
        looper.set_description(
            f' Epoch: {epoch} | Training Error:{costs[epoch, 0]:7.3f} | Val. Error:{costs[epoch, 1]:7.3f}'
        )

    save_model(network, config_dict["save_dir"], 'final_model')
    save_parameters_as_numpy(network, config_dict["save_dir"])
    np.savez(os.path.join(config_dict["save_dir"], 'training_history'),
             costs=costs,
             accuracy=accuracy)
    print('------------DONE-------------')
    return costs, accuracy_over_epochs
Beispiel #2
0
 def load_file(self, data_dir, file_type):
     if file_type == 'pt':
         state_dict = torch.load(data_dir, map_location=TorchUtils.get_accelerator_type())
         info = state_dict['info']
         del state_dict['info']
         info['smg_configs'] = self._info_consistency_check(info['smg_configs'])
         if 'amplification' not in info['data_info']['processor'].keys():
             info['data_info']['processor']['amplification'] = 1
     elif file_type == 'json':
         state_dict = None
         # TODO: Implement loading from a json file
         raise NotImplementedError(f"Loading file from a json file in TorchModel has not been implemented yet. ")
         # info = model_info loaded from a json file
     return info, state_dict
def get_simulation_architecture(configs):
    if configs['processor_type'] == 'nn':
        raise NotImplementedError(
            f"{configs['processor_type']} 'processor_type' nn configuration is not implemented yet. "
        )
    if configs['processor_type'] == 'surrogate':
        return get_processor_architecture(configs)
    elif configs['processor_type'] == 'dnpu':
        return get_dnpu_architecture(configs).to(
            device=TorchUtils.get_accelerator_type())
    else:
        raise NotImplementedError(
            f"{configs['processor_type']} 'processor_type' configuration is not recognised. The simulation type has to be defined as 'nn', 'surrogate' or 'dpnu'. "
        )
Beispiel #4
0
def load_data(base_dir):
    import os
    import torch
    import pickle
    from bspyalgo.utils.io import load_configs

    model_dir = os.path.join(base_dir, 'reproducibility', 'model.pt')
    results_dir = os.path.join(base_dir, 'reproducibility', 'results.pickle')
    configs_dir = os.path.join(base_dir, 'reproducibility', 'configs.json')
    model = torch.load(model_dir,
                       map_location=TorchUtils.get_accelerator_type())
    results = pickle.load(open(results_dir, "rb"))
    configs = load_configs(configs_dir)
    configs['results_base_dir'] = base_dir
    return model, results, configs
Beispiel #5
0
def choose_loss_function(loss_fn_name):
    '''Gets the fitness function used in GD from the module losses.
    The loss functions must take two arguments, the outputs of the black-box and the target
    and must return a torch array of scores of size len(outputs).
    '''
    if loss_fn_name == 'corrsig':
        return corrsig
    elif loss_fn_name == 'sqrt_corrsig':
        return sqrt_corrsig
    elif loss_fn_name == 'fisher':
        return fisher
    elif loss_fn_name == 'fisher_added_corr':
        return fisher_added_corr
    elif loss_fn_name == 'fisher_multipled_corr':
        return fisher_multipled_corr
    elif loss_fn_name == 'bce':
        bce = BCELossWithSigmoid()
        bce.cuda(TorchUtils.get_accelerator_type()).to(TorchUtils.data_type)
        return bce
    else:
        raise NotImplementedError(
            f"Loss function {loss_fn_name} is not recognized!")
    import torch.nn as nn
    import matplotlib.pyplot as plt
    from bspyalgo.utils.io import load_configs
    from bspyproc.architectures.dnpu.modules import DNPU_Layer
    from bspyproc.utils.pytorch import TorchUtils

    # Generate model
    NODE_CONFIGS = load_configs(
        '/home/hruiz/Documents/PROJECTS/DARWIN/Code/packages/brainspy/brainspy-processors/configs/configs_nn_model.json'
    )
    nr_nodes = 5
    input_list = [[0, 3, 4]] * nr_nodes
    data_dim = 20
    linear_layer = nn.Linear(data_dim,
                             len(input_list[0]) * nr_nodes).to(
                                 device=TorchUtils.get_accelerator_type())
    dnpu_layer = DNPU_Layer(input_list, NODE_CONFIGS)
    model = nn.Sequential(linear_layer, dnpu_layer)

    # Generate data
    nr_train_samples = 50
    nr_val_samples = 10
    x = TorchUtils.format_tensor(
        torch.rand(nr_train_samples + nr_val_samples, data_dim))
    y = TorchUtils.format_tensor(
        5. * torch.ones(nr_train_samples + nr_val_samples, nr_nodes))

    inp_train = x[:nr_train_samples]
    t_train = y[:nr_train_samples]
    inp_val = x[nr_train_samples:]
    t_val = y[nr_train_samples:]
Beispiel #7
0
        return x.view(-1, self.out_size, self.out_size)


if __name__ == '__main__':
    from bspyalgo.utils.io import load_configs
    import matplotlib.pyplot as plt
    import time

    NODE_CONFIGS = load_configs(
        '/home/hruiz/Documents/PROJECTS/DARWIN/Code/packages/brainspy/brainspy-processors/configs/configs_nn_model.json'
    )
    node = TorchModel(NODE_CONFIGS)
    # linear_layer = nn.Linear(20, 3).to(device=TorchUtils.get_accelerator_type())
    # dnpu_layer = DNPU_Channels([[0, 3, 4]] * 1000, node)
    linear_layer = nn.Linear(20,
                             300).to(device=TorchUtils.get_accelerator_type())
    dnpu_layer = DNPU_Layer([[0, 3, 4]] * 100, node)

    model = nn.Sequential(linear_layer, dnpu_layer)

    data = torch.rand((200, 20)).to(device=TorchUtils.get_accelerator_type())
    start = time.time()
    output = model(data)
    end = time.time()

    # print([param.shape for param in model.parameters() if param.requires_grad])
    print(
        f'(inputs,outputs) = {output.shape} of layer evaluated in {end-start} seconds'
    )
    print(f'Output range : [{output.min()},{output.max()}]')