Example #1
0
 def reset(self):
     # This function needs to be checked
     self.input_node1.reset()
     self.input_node2.reset()
     self.hidden_node1.reset()
     self.hidden_node2.reset()
     self.output_node.reset()
     self.bn1 = TorchUtils.format_tensor(nn.BatchNorm1d(2, affine=False))
     self.bn2 = TorchUtils.format_tensor(nn.BatchNorm1d(2, affine=False))
Example #2
0
 def __init__(self, configs):
     super().__init__(configs)
     self.init_model(configs)
     self.init_clipping_values(configs['waveform']['output_clipping_value'])
     self.bn1 = TorchUtils.format_tensor(nn.BatchNorm1d(2, affine=False))
     self.bn2 = TorchUtils.format_tensor(nn.BatchNorm1d(2, affine=False))
     self.init_current_to_voltage_conversion_variables()
     self.init_control_voltage_no()
     if self.configs['debug']:
         self.forward = self.forward_with_debug
     else:
         self.forward = self.forward_
Example #3
0
 def evaluate_node(self, x, x_indices, controls, c_indices):
     expand_controls = controls.expand(x.size()[0], -1)
     data = torch.empty((x.size()[0], x.size()[1] + controls.size()[1]))
     data = TorchUtils.format_tensor(data)
     data[:, x_indices] = x
     data[:, c_indices] = expand_controls
     return self.node(data) * self.node.amplification
Example #4
0
    def mutation(self, pool):
        '''
        Mutate all genes but the first partition[0] with a triangular
        distribution in gene range with mode=gene to be mutated.
        '''
        # Check if the mask requires to
        mask = TorchUtils.get_tensor_from_numpy(
            np.random.choice([0, 1],
                             size=pool[self.partition[0]:].shape,
                             p=[1 - self.mutation_rate, self.mutation_rate]))
        mutated_pool = np.zeros(
            (self.genome_no - self.partition[0], len(self.gene_range)))
        gene_range = TorchUtils.get_numpy_from_tensor(self.gene_range)
        for i in range(0, len(gene_range)):
            if gene_range[i][0] == gene_range[i][1]:
                mutated_pool[:, i] = gene_range[i][0] * np.ones(
                    mutated_pool[:, i].shape)
            else:
                mutated_pool[:, i] = np.random.triangular(
                    gene_range[i][0],
                    TorchUtils.get_numpy_from_tensor(pool[self.partition[0]:,
                                                          i]),
                    gene_range[i][1])

        mutated_pool = TorchUtils.get_tensor_from_numpy(mutated_pool)
        pool[self.partition[0]:] = ((TorchUtils.format_tensor(
            torch.ones(pool[self.partition[0]:].shape)) - mask) *
                                    pool[self.partition[0]:] +
                                    mask * mutated_pool)
        return pool
Example #5
0
def evaluate_criterion(outputs_pool, target, criterion, clipvalue=[-np.inf, np.inf]):
    genome_no = len(outputs_pool)
    criterion_pool = TorchUtils.format_tensor(torch.zeros(genome_no))
    for j in range(genome_no):
        output = outputs_pool[j]
        if torch.any(output < clipvalue[0]) or torch.any(output > clipvalue[1]):
            # print(f'Clipped at {clipvalue} nA')
            result = criterion(None, None, default_value=True)
        else:
            result = criterion(output, target)
        criterion_pool[j] = result
    return criterion_pool
Example #6
0
def evaluate_population(inputs, pool, model):
    '''Optimisation function of the platform '''
    output_popul = TorchUtils.format_tensor(torch.zeros((len(pool),) + (len(inputs), 1)))
    for j in range(len(pool)):

        # control_voltage_genes = self.get_control_voltages(gene_pool[j], len(inputs_wfm))  # , gene_pool[j, self.gene_trafo_index]
        # inputs_without_offset_and_scale = self._input_trafo(inputs_wfm, gene_pool[j, self.gene_trafo_index])
        # assert False, 'Check the case for inputing voltages with plateaus to check if it works when merging control voltages and inputs'
        model.set_control_voltages(pool[j])
        output_popul[j] = model(inputs)
        # output_popul[j] = self.processor.get_output(merge_inputs_and_control_voltages_in_numpy(inputs_without_offset_and_scale, control_voltage_genes, self.input_indices, self.control_voltage_indices))
    return output_popul
Example #7
0
 def find_gate_with_torch(self, encoded_inputs, encoded_gate, mask):
     encoded_gate = TorchUtils.format_tensor(encoded_gate)
     excel_results = self.optimize(encoded_inputs, encoded_gate, mask)
     excel_results['accuracy'], _, _ = perceptron(
         excel_results['best_output'][excel_results['mask']],
         TorchUtils.get_numpy_from_tensor(
             encoded_gate[excel_results['mask']]))
     excel_results['encoded_gate'] = encoded_gate.cpu()
     # excel_results['targets'] = excel_results
     excel_results['correlation'] = corr_coeff(
         excel_results['best_output'][excel_results['mask']].T,
         excel_results['targets'].cpu()[excel_results['mask']].T)
     return excel_results
Example #8
0
def train_perceptron(dataloaders,
                     node=None,
                     lrn_rate=0.0007,
                     mini_batch=8,
                     epochs=100,
                     validation=False,
                     verbose=True):
    # Initialise key elements of the trainer
    node = TorchUtils.format_tensor(node)
    loss = torch.nn.BCELoss()
    optimizer = torch.optim.Adam(node.parameters(),
                                 lr=lrn_rate,
                                 betas=(0.999, 0.999))
    best_accuracy = -1
    looper = trange(epochs, desc='Calculating accuracy')

    for epoch in looper:
        for inputs, targets in dataloaders[0]:
            optimizer.zero_grad()
            predictions = node(inputs)
            cost = loss(predictions, targets)
            cost.backward()
            optimizer.step()
        with torch.no_grad():
            inputs, targets = dataloaders[1].dataset[:]
            accuracy, predicted_class = evaluate_accuracy(
                inputs, targets, node)
            if accuracy > best_accuracy:
                best_accuracy = accuracy
                decision_boundary = get_decision_boundary(node)
                if best_accuracy >= 100.:
                    looper.set_description(
                        f'Reached 100/% accuracy. Stopping at Epoch: {epoch+1}  Accuracy {best_accuracy}, loss: {cost.item()}'
                    )
                    break
        if verbose:
            looper.set_description(
                f'Epoch: {epoch+1}  Accuracy {best_accuracy}, loss: {cost.item()}'
            )

    return best_accuracy, predicted_class, decision_boundary, node
Example #9
0
 def forward_amplification_and_noise(self, x):
     output = self.forward_amplification(x)
     noise = self.error * TorchUtils.format_tensor(torch.randn(output.shape))
     return output + noise
Example #10
0
    NODE_CONFIGS = load_configs(
        '/home/hruiz/Documents/PROJECTS/DARWIN/Code/packages/brainspy/brainspy-processors/configs/configs_nn_model.json'
    )
    nr_nodes = 5
    input_list = [[0, 3, 4]] * nr_nodes
    data_dim = 20
    linear_layer = nn.Linear(data_dim,
                             len(input_list[0]) * nr_nodes).to(
                                 device=TorchUtils.get_accelerator_type())
    dnpu_layer = DNPU_Layer(input_list, NODE_CONFIGS)
    model = nn.Sequential(linear_layer, dnpu_layer)

    # Generate data
    nr_train_samples = 50
    nr_val_samples = 10
    x = TorchUtils.format_tensor(
        torch.rand(nr_train_samples + nr_val_samples, data_dim))
    y = TorchUtils.format_tensor(
        5. * torch.ones(nr_train_samples + nr_val_samples, nr_nodes))

    inp_train = x[:nr_train_samples]
    t_train = y[:nr_train_samples]
    inp_val = x[nr_train_samples:]
    t_val = y[nr_train_samples:]

    node_params_start = [
        p.clone().cpu().detach() for p in model.parameters()
        if not p.requires_grad
    ]
    learnable_params_start = [
        p.clone().cpu().detach() for p in model.parameters() if p.requires_grad
    ]
Example #11
0
 def sample_controls(self, low, high):
     samples = TorchUtils.format_tensor(torch.rand(1, len(low)))
     return low + (high - low) * samples