Beispiel #1
0
 def init_scale(self, scale_min, scale_max):
     if scale_min == 1.0 and scale_max == 1.0:
         scale = TorchUtils.get_tensor_from_numpy(np.array([1.0]))
         return scale
     else:
         scale = TorchUtils.get_tensor_from_numpy(scale_min + scale_max * np.random.rand(1))
         return nn.Parameter(scale)
Beispiel #2
0
    def crossover_blxab(self, parent1, parent2):
        '''
        Crossover method: Blend alpha beta crossover returns a new genome (voltage combination)
        from two parents. Here, parent 1 has a higher fitness than parent 2
        '''

        alpha = 0.6
        beta = 0.4
        # check this in pytorch
        maximum = torch.max(parent1, parent2)
        minimum = torch.min(parent1, parent2)
        diff_maxmin = (maximum - minimum)
        offspring = TorchUtils.format_tensor(torch.zeros((parent1.shape)))
        for i in range(len(parent1)):
            if parent1[i] > parent2[i]:
                offspring[i] = TorchUtils.format_tensor(
                    uniform(minimum[i] - diff_maxmin[i] * beta,
                            maximum[i] + diff_maxmin[i] * alpha).sample())
            else:
                offspring[i] = TorchUtils.format_tensor(
                    uniform(minimum[i] - diff_maxmin[i] * alpha,
                            maximum[i] + diff_maxmin[i] * beta).sample())
        for i in range(0, len(self.gene_range)):
            if offspring[i] < self.gene_range[i][0]:
                offspring[i] = self.gene_range[i][0]
            if offspring[i] > self.gene_range[i][1]:
                offspring[i] = self.gene_range[i][1]
        return offspring
Beispiel #3
0
    def mutation(self, pool):
        '''
        Mutate all genes but the first partition[0] with a triangular
        distribution in gene range with mode=gene to be mutated.
        '''
        # Check if the mask requires to
        mask = TorchUtils.get_tensor_from_numpy(
            np.random.choice([0, 1],
                             size=pool[self.partition[0]:].shape,
                             p=[1 - self.mutation_rate, self.mutation_rate]))
        mutated_pool = np.zeros(
            (self.genome_no - self.partition[0], len(self.gene_range)))
        gene_range = TorchUtils.get_numpy_from_tensor(self.gene_range)
        for i in range(0, len(gene_range)):
            if gene_range[i][0] == gene_range[i][1]:
                mutated_pool[:, i] = gene_range[i][0] * np.ones(
                    mutated_pool[:, i].shape)
            else:
                mutated_pool[:, i] = np.random.triangular(
                    gene_range[i][0],
                    TorchUtils.get_numpy_from_tensor(pool[self.partition[0]:,
                                                          i]),
                    gene_range[i][1])

        mutated_pool = TorchUtils.get_tensor_from_numpy(mutated_pool)
        pool[self.partition[0]:] = ((TorchUtils.format_tensor(
            torch.ones(pool[self.partition[0]:].shape)) - mask) *
                                    pool[self.partition[0]:] +
                                    mask * mutated_pool)
        return pool
Beispiel #4
0
 def _init_pool(self):
     pool = TorchUtils.format_tensor(
         torch.zeros((self.genome_no, len(
             self.gene_range))))  # Dimensions (Genome number, gene number)
     for i in range(0, len(self.gene_range)):
         pool[:, i] = TorchUtils.format_tensor(
             uniform(self.gene_range[i][0], self.gene_range[i][1]).sample(
                 (self.genome_no, )))
     return pool
Beispiel #5
0
 def init_noise_configs(self):
     if 'noise' in self.configs:
         print(f"The model has a gaussian noise based on a MSE of {torch.tensor([self.configs['noise']])}")
         self.error = torch.sqrt(TorchUtils.format_tensor(torch.tensor([self.configs['noise']])))
         self.forward_processed = self.forward_amplification_and_noise
     else:
         print(f"The model has been initialised without noise.")
         self.error = TorchUtils.format_tensor(torch.tensor([0]))
         self.forward_processed = self.forward_amplification
Beispiel #6
0
 def reset(self):
     # This function needs to be checked
     self.input_node1.reset()
     self.input_node2.reset()
     self.hidden_node1.reset()
     self.hidden_node2.reset()
     self.output_node.reset()
     self.bn1 = TorchUtils.format_tensor(nn.BatchNorm1d(2, affine=False))
     self.bn2 = TorchUtils.format_tensor(nn.BatchNorm1d(2, affine=False))
    def improve_solution(self, results, model):
        self.processor.load_state_dict(model.copy())
        inputs = TorchUtils.get_tensor_from_numpy(results['inputs'])
        targets = TorchUtils.get_tensor_from_numpy(results['targets'])
        TorchUtils.init_seed(results['seed'], deterministic=True)
        new_results = self.task.run_task(inputs, targets, results['mask'])

        plt.figure()
        plt.plot(results['best_output'])
        plt.plot(new_results['best_output'])
        plt.show()
Beispiel #8
0
 def __init__(self, configs):
     super().__init__(configs)
     self.init_model(configs)
     self.init_clipping_values(configs['waveform']['output_clipping_value'])
     self.bn1 = TorchUtils.format_tensor(nn.BatchNorm1d(2, affine=False))
     self.bn2 = TorchUtils.format_tensor(nn.BatchNorm1d(2, affine=False))
     self.init_current_to_voltage_conversion_variables()
     self.init_control_voltage_no()
     if self.configs['debug']:
         self.forward = self.forward_with_debug
     else:
         self.forward = self.forward_
Beispiel #9
0
 def find_gate_with_torch(self, encoded_inputs, encoded_gate, mask):
     encoded_gate = TorchUtils.format_tensor(encoded_gate)
     excel_results = self.optimize(encoded_inputs, encoded_gate, mask)
     excel_results['accuracy'], _, _ = perceptron(
         excel_results['best_output'][excel_results['mask']],
         TorchUtils.get_numpy_from_tensor(
             encoded_gate[excel_results['mask']]))
     excel_results['encoded_gate'] = encoded_gate.cpu()
     # excel_results['targets'] = excel_results
     excel_results['correlation'] = corr_coeff(
         excel_results['best_output'][excel_results['mask']].T,
         excel_results['targets'].cpu()[excel_results['mask']].T)
     return excel_results
def test_gd_nn(validation=True, plot=False):
    INPUTS = torchutils.get_tensor_from_numpy(XNOR['inputs'].T)
    TARGETS = torchutils.get_tensor_from_numpy(XNOR['targets'])
    INPUTS_VAL = torchutils.get_tensor_from_numpy(XNOR['inputs_val'].T)
    TARGETS_VAL = torchutils.get_tensor_from_numpy(XNOR['targets_val'])
    gd_nn = get_algorithm('./configs/gd/configs_template_nn_model.json')
    task_to_solve(gd_nn,
                  INPUTS,
                  TARGETS,
                  INPUTS_VAL,
                  TARGETS_VAL,
                  validation=validation,
                  plot=plot)
 def set_control_voltages(self, state_dict):
     control_voltages = np.zeros([len(self.control_voltage_indices)])
     control_voltages[0:5] = TorchUtils.get_numpy_from_tensor(
         state_dict['input_node1.bias'])
     control_voltages[5:10] = TorchUtils.get_numpy_from_tensor(
         state_dict['input_node2.bias'])
     control_voltages[10:15] = TorchUtils.get_numpy_from_tensor(
         state_dict['hidden_node1.bias'])
     control_voltages[15:20] = TorchUtils.get_numpy_from_tensor(
         state_dict['hidden_node2.bias'])
     control_voltages[20:25] = TorchUtils.get_numpy_from_tensor(
         state_dict['output_node.bias'])
     self.control_voltages = control_voltages
Beispiel #12
0
 def evaluate_node(self, x, x_indices, controls, c_indices):
     expand_controls = controls.expand(x.size()[0], -1)
     data = torch.empty((x.size()[0], x.size()[1] + controls.size()[1]))
     data = TorchUtils.format_tensor(data)
     data[:, x_indices] = x
     data[:, c_indices] = expand_controls
     return self.node(data) * self.node.amplification
Beispiel #13
0
 def process_targets(self, targets, processor_configs):
     mask = (targets == 1)
     targets[targets == 0] = 1
     targets[mask] = 0
     # targets = np.asarray(generate_waveform(targets, processor_configs['waveform']['amplitude_lengths'], processor_configs['waveform']['slope_lengths'])).T
     if processor_configs["processor_type"] == 'dnpu':
         return TorchUtils.get_tensor_from_numpy(targets)
     return targets
Beispiel #14
0
def load_data(path, steps):
    print('Data loading from: \n' + path)
    with np.load(
            path, allow_pickle=True
    ) as data:  # why was allow_pickle not required before? Do we need this?
        # TODO: change in data generation the key meta to info_dictionary
        info_dictionary = data['info'].tolist()
        print(f'Metadata :\n {info_dictionary.keys()}')
        # Create from numpy arrays torch.tensors and send them to device
        inputs = TorchUtils.get_tensor_from_numpy(
            data['inputs'][::steps])  # shape: Nx#electrodes
        outputs = TorchUtils.get_tensor_from_numpy(
            data['outputs'][::steps])  # Outputs need dim Nx1
        print(
            f'Shape of outputs: {outputs.shape}; shape of inputs: {inputs.shape}'
        )

    return inputs, outputs, info_dictionary
Beispiel #15
0
    def __init__(self, inputs_list, model):
        super().__init__()
        self.node = model
        ######### Set up node #########
        # Freeze parameters of node
        for params in self.node.parameters():
            params.requires_grad = False

        self.indices_node = np.arange(len(self.node.amplitude))
        ######### set learnable parameters #########
        control_list = self.set_controls(inputs_list)

        ###### Set everything as torch Tensors and send to DEVICE ######
        self.inputs_list = TorchUtils.get_tensor_from_list(
            inputs_list, torch.int64)
        self.control_list = TorchUtils.get_tensor_from_list(
            control_list, torch.int64
        )  # IndexError: tensors used as indices must be long, byte or bool tensors
def train_with_loader(network,
                      training_data,
                      config_dict,
                      validation_data=None,
                      loss_fn=torch.nn.MSELoss()):
    print('------- TRAINING WITH LOADER ---------')
    # set configurations
    # optimizer = set_optimizer(network, config_dict)
    optimizer = torch.optim.Adam(network.params_groups(config_dict),
                                 betas=config_dict['betas'])
    config_dict["save_dir"] = create_directory_timestamp(
        config_dict["save_dir"], config_dict["name"])
    save_configs(
        config_dict,
        os.path.join(config_dict["save_dir"], "training_configs.json"))
    # Define variables
    regularization = torch.tensor(config_dict['cv_penalty'],
                                  device=TorchUtils.get_accelerator_type())
    costs = np.zeros((config_dict["nr_epochs"],
                      2))  # training and validation costs per epoch
    accuracy_over_epochs = np.zeros((config_dict["nr_epochs"], 2))
    # grads_epochs = np.zeros((config_dict["nr_epochs"], config_dict["nr_nodes"]))
    looper = trange(config_dict["nr_epochs"], desc='Initialising')
    for epoch in looper:

        costs[epoch, 0], accuracy_over_epochs[epoch, 0] = batch_training(
            training_data, network, optimizer, loss_fn, regularization)
        # grads_epochs[epoch] = np.asarray([np.abs(p.grad.detach().cpu().numpy()).max()
        #                                   for p in network.dnpu_layer.parameters() if p.requires_grad])
        # Evaluate Validation error
        if validation_data is not None:
            costs[epoch, 1], accuracy_over_epochs[epoch, 1] = evaluate(
                network, loss_fn, validation_data)
        else:
            costs[epoch, 1], accuracy_over_epochs[epoch, 1] = np.nan, np.nan

        if 'save_interval' in config_dict.keys(
        ) and epoch % config_dict["save_interval"] == 10:
            save_model(network, config_dict["save_dir"], f'checkpoint_{epoch}')
        if np.isnan(costs[epoch, 0]):
            costs[epoch:, 0] = np.nan
            costs[epoch:, 1] = np.nan
            print('--------- Training interrupted value was Nan!! ---------')
            break
        looper.set_description(
            f' Epoch: {epoch} | Training Error:{costs[epoch, 0]:7.3f} | Val. Error:{costs[epoch, 1]:7.3f}'
        )

    save_model(network, config_dict["save_dir"], 'final_model')
    save_parameters_as_numpy(network, config_dict["save_dir"])
    np.savez(os.path.join(config_dict["save_dir"], 'training_history'),
             costs=costs,
             accuracy=accuracy)
    print('------------DONE-------------')
    return costs, accuracy_over_epochs
Beispiel #17
0
def evaluate_population(inputs, pool, model):
    '''Optimisation function of the platform '''
    output_popul = TorchUtils.format_tensor(torch.zeros((len(pool),) + (len(inputs), 1)))
    for j in range(len(pool)):

        # control_voltage_genes = self.get_control_voltages(gene_pool[j], len(inputs_wfm))  # , gene_pool[j, self.gene_trafo_index]
        # inputs_without_offset_and_scale = self._input_trafo(inputs_wfm, gene_pool[j, self.gene_trafo_index])
        # assert False, 'Check the case for inputing voltages with plateaus to check if it works when merging control voltages and inputs'
        model.set_control_voltages(pool[j])
        output_popul[j] = model(inputs)
        # output_popul[j] = self.processor.get_output(merge_inputs_and_control_voltages_in_numpy(inputs_without_offset_and_scale, control_voltage_genes, self.input_indices, self.control_voltage_indices))
    return output_popul
Beispiel #18
0
    def get_inputs(self, vc_dimension, validation=False):
        readable_inputs = self.generate_test_inputs(vc_dimension)
        if self.use_waveform:
            transformed_inputs = self.generate_inputs_waveform(
                readable_inputs, validation)
        else:
            transformed_inputs = readable_inputs
        if self.use_torch:
            transformed_inputs = TorchUtils.get_tensor_from_numpy(
                transformed_inputs)

        return readable_inputs, transformed_inputs
Beispiel #19
0
 def process_inputs(self, inputs, processor_configs):
     assert inputs.shape[1] == len(processor_configs['input_indices'])
     # for i in range(inputs.shape[1]):
     #     inputs[:, i] = normalise(inputs[:, i])
     #     inputs[:, i] = map_to_voltage(inputs[:, i],
     #                                   MIN_INPUT_VOLT[processor_configs['input_indices'][i]],
     #                                   MAX_INPUT_VOLT[processor_configs['input_indices'][i]])
     # inputs[:, i] = generate_waveform(inputs[:, i], processor_configs['waveform']['amplitude_lengths'], slope_lengths=processor_configs['waveform']['slope_lengths'])
     # inputs = self.generate_data_waveform(inputs, processor_configs['waveform']['amplitude_lengths'], processor_configs['waveform']['slope_lengths'])
     if processor_configs["processor_type"] == 'dnpu':
         return TorchUtils.get_tensor_from_numpy(inputs)
     return inputs
Beispiel #20
0
def evaluate_criterion(outputs_pool, target, criterion, clipvalue=[-np.inf, np.inf]):
    genome_no = len(outputs_pool)
    criterion_pool = TorchUtils.format_tensor(torch.zeros(genome_no))
    for j in range(genome_no):
        output = outputs_pool[j]
        if torch.any(output < clipvalue[0]) or torch.any(output > clipvalue[1]):
            # print(f'Clipped at {clipvalue} nA')
            result = criterion(None, None, default_value=True)
        else:
            result = criterion(output, target)
        criterion_pool[j] = result
    return criterion_pool
Beispiel #21
0
 def load_model(self, configs):
     """Loads a pytorch model from a directory string."""
     self.info, state_dict = self.load_file(configs['torch_model_dict'], 'pt')
     if 'smg_configs' in self.info.keys():
         model_dict = self.info['smg_configs']['processor']
     else:
         model_dict = self.info
     super().__init__(model_dict)
     self.configs = configs
     self.load_state_dict(state_dict)
     self.init_max_and_min_values()
     self.amplification = TorchUtils.get_tensor_from_list(self.info['data_info']['processor']['amplification'])
     self.init_noise_configs()
Beispiel #22
0
 def get_model_output(self, model, results):
     self.processor.load_state_dict(model.copy())
     self.processor.eval()
     if self.configs['algorithm_configs']['processor'][
             'platform'] == 'simulation':
         inputs = TorchUtils.get_tensor_from_numpy(results['inputs'])
     else:
         inputs = results['inputs']
     model_output = self.processor.forward(inputs).detach().cpu().numpy()
     return generate_waveform(
         model_output[:, 0], self.configs['validation']['processor']
         ['waveform']['amplitude_lengths'], self.configs['validation']
         ['processor']['waveform']['slope_lengths'])
Beispiel #23
0
 def load_file(self, data_dir, file_type):
     if file_type == 'pt':
         state_dict = torch.load(data_dir, map_location=TorchUtils.get_accelerator_type())
         info = state_dict['info']
         del state_dict['info']
         info['smg_configs'] = self._info_consistency_check(info['smg_configs'])
         if 'amplification' not in info['data_info']['processor'].keys():
             info['data_info']['processor']['amplification'] = 1
     elif file_type == 'json':
         state_dict = None
         # TODO: Implement loading from a json file
         raise NotImplementedError(f"Loading file from a json file in TorchModel has not been implemented yet. ")
         # info = model_info loaded from a json file
     return info, state_dict
def get_simulation_architecture(configs):
    if configs['processor_type'] == 'nn':
        raise NotImplementedError(
            f"{configs['processor_type']} 'processor_type' nn configuration is not implemented yet. "
        )
    if configs['processor_type'] == 'surrogate':
        return get_processor_architecture(configs)
    elif configs['processor_type'] == 'dnpu':
        return get_dnpu_architecture(configs).to(
            device=TorchUtils.get_accelerator_type())
    else:
        raise NotImplementedError(
            f"{configs['processor_type']} 'processor_type' configuration is not recognised. The simulation type has to be defined as 'nn', 'surrogate' or 'dpnu'. "
        )
Beispiel #25
0
def train_surrogate_model(configs, main_folder='training_data'):

    if 'seed' in configs:
        seed = configs['seed']
    else:
        seed = None

    seed = TorchUtils.init_seed(seed, deterministic=True)
    configs['seed'] = seed
    # Get training and validation data
    INPUTS, TARGETS, INPUTS_VAL, TARGETS_VAL, INFO = get_training_data(configs)

    # Train the model
    model_generator = get_algorithm(configs, is_main=True)
    data = model_generator.optimize(INPUTS, TARGETS, validation_data=(INPUTS_VAL, TARGETS_VAL), data_info=INFO)

    results_dir = os.path.join(model_generator.base_dir, main_folder)
    create_directory(results_dir)

    train_targets = INFO['processor']['amplification'] * TorchUtils.get_numpy_from_tensor(TARGETS[data.results['target_indices']][:len(INPUTS_VAL)])
    train_output = INFO['processor']['amplification'] * data.results['best_output_training']
    plot_all(train_targets, train_output, results_dir, name='TRAINING')

    val_targets = INFO['processor']['amplification'] * TorchUtils.get_numpy_from_tensor(TARGETS_VAL)
    val_output = INFO['processor']['amplification'] * data.results['best_output']
    plot_all(val_targets, val_output, results_dir, name='VALIDATION')

    training_profile = data.results['performance_history'] * (INFO['processor']['amplification']**2)

    plt.figure()
    plt.plot(training_profile)
    plt.title(f'Training profile')
    plt.legend(['training', 'validation'])
    plt.savefig(os.path.join(results_dir, 'training_profile'))

    model_generator.path_to_model = os.path.join(model_generator.base_dir, 'reproducibility', 'model.pt')
    return model_generator
Beispiel #26
0
def plot_perceptron(results, save_dir=None, show_plot=False, name='train'):
    fig = plt.figure()
    plt.title(f"Accuracy: {results['accuracy_value']:.2f} %")
    plt.plot(TorchUtils.get_numpy_from_tensor(results['norm_inputs']),
             label='Norm. Waveform')
    plt.plot(TorchUtils.get_numpy_from_tensor(results['predictions']),
             '.',
             label='Predicted labels')
    plt.plot(TorchUtils.get_numpy_from_tensor(results['targets']),
             'g',
             label='Targets')
    plt.plot(np.arange(len(results['predictions'])),
             TorchUtils.get_numpy_from_tensor(
                 torch.ones_like(results['predictions']) *
                 results['threshold']),
             'k:',
             label='Threshold')
    plt.legend()
    if show_plot:
        plt.show()
    if save_dir is not None:
        plt.savefig(os.path.join(save_dir, name + '_accuracy.jpg'))
    plt.close()
    return fig
Beispiel #27
0
def get_error(model_data_path, test_data_path, steps=1, batch_size=2048):

    inputs, targets, info = load_data(test_data_path, steps)
    error = np.zeros_like(targets)
    prediction = np.zeros_like(targets)
    model = SurrogateModel({'torch_model_dict': model_data_path})
    with torch.no_grad():
        i_start = 0
        i_end = batch_size
        threshold = (inputs.shape[0] - batch_size)
        while i_end <= inputs.shape[0]:
            prediction[i_start:i_end] = TorchUtils.get_numpy_from_tensor(
                model(TorchUtils.get_tensor_from_numpy(inputs[i_start:i_end])))
            error[i_start:
                  i_end] = prediction[i_start:i_end] - targets[i_start:i_end]
            i_start += batch_size
            i_end += batch_size
            if i_end > threshold and i_end < inputs.shape[0]:
                i_end = inputs.shape[0]
        main_path = os.path.dirname(os.path.dirname(model_data_path))
        path = create_directory(os.path.join(main_path, 'test_model'))
        mse = plot_all(targets, prediction, path, name='TEST')

    return mse
Beispiel #28
0
def load_data(base_dir):
    import os
    import torch
    import pickle
    from bspyalgo.utils.io import load_configs

    model_dir = os.path.join(base_dir, 'reproducibility', 'model.pt')
    results_dir = os.path.join(base_dir, 'reproducibility', 'results.pickle')
    configs_dir = os.path.join(base_dir, 'reproducibility', 'configs.json')
    model = torch.load(model_dir,
                       map_location=TorchUtils.get_accelerator_type())
    results = pickle.load(open(results_dir, "rb"))
    configs = load_configs(configs_dir)
    configs['results_base_dir'] = base_dir
    return model, results, configs
Beispiel #29
0
 def remove_duplicates(self, pool):
     '''
     Check the entire pool for any duplicate genomes and replace them by
     the genome put through a triangular distribution
     '''
     if torch.unique(pool, dim=0).shape != pool.shape:
         for i in range(self.genome_no):
             for j in range(self.genome_no):
                 if (j != i and torch.eq(pool[i], pool[j]).all()):
                     for k in range(0, len(self.gene_range)):
                         if self.gene_range[k][0] != self.gene_range[k][1]:
                             pool[j][k] = TorchUtils.get_tensor_from_numpy(
                                 np.random.triangular(
                                     self.gene_range[k][0], pool[j][k],
                                     self.gene_range[k][1]))
                         else:
                             pool[j][k] = self.gene_range[k][0]
     return pool
Beispiel #30
0
    def search_solution(self, gap):
        self.init_dirs(gap)

        self.task.configs['ring_data']['gap'] = gap
        inputs, targets, mask = self.data_loader.generate_new_data(
            self.configs['algorithm_configs']['processor'], gap=gap)
        self.reset(inputs.shape[0])
        for run in range(self.configs['runs']):
            print(f'########### RUN {run} ################')
            seed = TorchUtils.init_seed(None, deterministic=True)
            results = self.task.run_task(inputs, targets, mask)
            results['seed'] = seed
            self.update_search_stats(results, run)
            if self.best_run == None or results[
                    'best_performance'] < self.best_run['best_performance']:
                self.update_best_run(results, run)
                self.task.plot_results(results)

        self.close_search()