예제 #1
0
    def set_parameters(self, ensembles):
        # set the new parameter for the network
        conv_params = ensembles.mean(0)
        t = time.time()
        ds = self._shape_parameter_to_conv_net(conv_params)
        self.timings['shape_parameters_ens'].append(time.time() - t)
        t = time.time()
        self.conv_net.set_parameter(ds)
        self.timings['set_parameters_cnn'].append(time.time() - t)
        print('---- Train -----')
        print('Generation ', self.generation)
        generation_change = 1
        with torch.no_grad():
            inputs = self.inputs.to(device)
            labels = self.labels.to(device)

            if self.generation % generation_change == 0:
                self.inputs, self.labels = self.dataiter_mnist()
                self.inputs = self.inputs.to(device)
                self.labels = self.labels.to(device)
                print('New MNIST set used at generation {}'.format(
                    self.generation))

            outputs, act1, act2 = self.conv_net(inputs)
            conv_loss = self.criterion(outputs, labels).item()
            train_cost = _calculate_cost(_encode_targets(labels, 10),
                                         F.softmax(outputs, dim=1), 'MSE')
            train_acc = score(labels, torch.argmax(F.softmax(outputs, dim=1),
                                                   1))
            print('Cost: ', train_cost)
            print('Accuracy: ', train_acc)
            print('Loss:', conv_loss)

            print('---- Test -----')
            test_output, act1, act2 = self.conv_net(self.test_input)
            test_loss = self.criterion(test_output, self.test_label).item()
            test_acc = score(self.test_label, torch.argmax(test_output, 1))
            test_cost = _calculate_cost(_encode_targets(self.test_label, 10),
                                        test_output, 'MSE')
            print('Test accuracy', test_acc)
            print('Test loss: {}'.format(test_loss))
            print('-----------------')
            conv_params = []
            for idx, c in enumerate(ensembles):
                t = time.time()
                ds = self._shape_parameter_to_conv_net(c)
                self.timings['shape_parameters'].append(time.time() - t)
                t = time.time()
                self.conv_net.set_parameter(ds)
                self.timings['set_parameters'].append(time.time() - t)
                params, _, _ = self.conv_net(inputs)
                conv_params.append(params.t())
            conv_params = torch.stack(conv_params)
            outs = {
                'conv_params': conv_params,
                'conv_loss': float(conv_loss),
                'input': self.inputs.squeeze(),
                'targets': self.labels
            }
        return outs
    def set_parameters(self, ensembles):
        # set the new parameter for the network
        conv_params = ensembles.mean(0)
        ds = self._shape_parameter_to_conv_net(conv_params)
        self.conv_net.set_parameter(ds)
        print('---- Train -----')
        print('Iteration ', self.generation)
        generation_change = config['repetitions']
        with torch.no_grad():
            inputs = self.inputs.to(device)
            labels = self.labels.to(device)

            if self.generation % generation_change == 0:
                self.inputs, self.labels = self.dataiter_mnist()
                self.inputs = self.inputs.to(device)
                self.labels = self.labels.to(device)
                print('New MNIST set used at generation {}'.format(
                    self.generation))
                # append the outputs
                self.targets.append(self.labels.cpu().numpy())
            # get network predicitions
            outputs, act1, act2 = self.conv_net(inputs)
            act3 = outputs
            # save all important calculations
            self.act_func['act1'] = act1.cpu().numpy()
            self.act_func['act2'] = act2.cpu().numpy()
            self.act_func['act3'] = act3.cpu().numpy()
            self.act_func['act1_mean'].append(act1.mean().item())
            self.act_func['act2_mean'].append(act2.mean().item())
            self.act_func['act3_mean'].append(act3.mean().item())
            self.act_func['act1_std'].append(act1.std().item())
            self.act_func['act2_std'].append(act2.std().item())
            self.act_func['act3_std'].append(act3.std().item())
            self.output_activity_train.append(
                F.softmax(outputs, dim=1).cpu().numpy())
            conv_loss = self.criterion(outputs, labels).item()
            self.train_loss.append(conv_loss)
            train_cost = _calculate_cost(
                _encode_targets(labels, 10),
                F.softmax(outputs, dim=1).cpu().numpy(), 'MSE')
            train_acc = score(
                labels.cpu().numpy(),
                np.argmax(F.softmax(outputs, dim=1).cpu().numpy(), 1))
            print('Cost: ', train_cost)
            print('Accuracy: ', train_acc)
            print('Loss:', conv_loss)
            self.train_cost.append(train_cost)
            self.train_acc.append(train_acc)
            self.train_pred.append(
                np.argmax(F.softmax(outputs, dim=1).cpu().numpy(), 1))

            print('---- Test -----')
            test_output, act1, act2 = self.conv_net(self.test_input)
            test_loss = self.criterion(test_output, self.test_label).item()
            self.test_act_func['act1'] = act1.cpu().numpy()
            self.test_act_func['act2'] = act2.cpu().numpy()
            self.test_act_func['act1_mean'].append(act1.mean().item())
            self.test_act_func['act2_mean'].append(act2.mean().item())
            self.test_act_func['act3_mean'].append(test_output.mean().item())
            self.test_act_func['act1_std'].append(act1.std().item())
            self.test_act_func['act2_std'].append(act2.std().item())
            self.test_act_func['act3_std'].append(test_output.std().item())
            test_output = test_output.cpu().numpy()
            self.test_act_func['act3'] = test_output
            test_acc = score(self.test_label.cpu().numpy(),
                             np.argmax(test_output, 1))
            test_cost = _calculate_cost(
                _encode_targets(self.test_label.cpu().numpy(), 10),
                test_output, 'MSE')
            print('Test accuracy', test_acc)
            print('Test loss: {}'.format(test_loss))
            self.test_acc.append(test_acc)
            self.test_pred.append(np.argmax(test_output, 1))
            self.test_cost.append(test_cost)
            self.output_activity_test.append(test_output)
            self.test_loss.append(test_loss)
            print('-----------------')
            conv_params = []
            for idx, c in enumerate(ensembles):
                ds = self._shape_parameter_to_conv_net(c)
                self.conv_net.set_parameter(ds)
                params, _, _ = self.conv_net(inputs)
                conv_params.append(params.t().cpu().numpy())

            outs = {
                'conv_params': torch.tensor(conv_params).to(device),
                'conv_loss': float(conv_loss),
                'input': self.inputs.squeeze(),
                'targets': self.labels
            }
        return outs
예제 #3
0
    def set_parameters(self, ensembles):
        # set the new parameter for the network
        conv_params = ensembles.mean(0)
        ds = self._shape_parameter_to_conv_net(conv_params)
        self.conv_net.set_parameter(ds)
        print('---- Train -----')
        print('Generation ', self.generation)
        generation_change = 10
        with torch.no_grad():
            inputs = self.inputs.to(device)
            labels = self.labels.to(device)

            if self.generation % generation_change == 0:
                self.inputs, self.labels = self.dataiter_mnist()
                self.inputs = self.inputs.to(device)
                self.labels = self.labels.to(device)
                print('New MNIST set used at generation {}'.format(
                    self.generation))
                # append the outputs
                self.targets.append(self.labels.cpu().numpy())

            outputs = self.conv_net(inputs)
            self.output_activity_train.append(outputs.cpu().numpy())
            conv_loss = self.criterion(outputs, labels).item()
            train_cost = _calculate_cost(_encode_targets(labels, 10),
                                         outputs.cpu().numpy(), 'MSE')
            train_acc = score(labels.cpu().numpy(),
                              np.argmax(outputs.cpu().numpy(), 1))
            print('Cost: ', train_cost)
            print('Accuracy: ', train_acc)
            print('Loss:', conv_loss)
            self.train_cost.append(train_cost)
            self.train_acc.append(train_acc)
            self.train_pred.append(np.argmax(outputs.cpu().numpy(), 1))

            print('---- Test -----')
            test_output = self.conv_net(self.test_input)
            test_output = test_output.cpu().numpy()
            test_acc = score(self.test_label.cpu().numpy(),
                             np.argmax(test_output, 1))
            test_cost = _calculate_cost(
                _encode_targets(self.test_label.cpu().numpy(), 10),
                test_output, 'MSE')
            print('Test accuracy', test_acc)
            self.test_acc.append(test_acc)
            self.test_pred.append(np.argmax(test_output, 1))
            self.test_cost.append(test_cost)
            self.output_activity_test.append(test_output)
            print('-----------------')
            conv_params = []
            for c in ensembles:
                ds = self._shape_parameter_to_conv_net(c)
                self.conv_net.set_parameter(ds)
                conv_params.append(self.conv_net(inputs).t().cpu().numpy())

            outs = {
                'conv_params': torch.tensor(conv_params).to(device),
                'conv_loss': float(conv_loss),
                'input': self.inputs.squeeze(),
                'targets': self.labels
            }
            # if self.generation % generation_change == 0:
            # min_ens = ensembles.min()
            # max_ens = ensembles.max()
            # len_ens = len(ensembles.mean(0))
            # ens_mean = ensembles.mean(0)
            # ens_std = ensembles.std(0)
            # for _ in range(100):
            #   conv_params.append(
            # ensembles.mean(0) + np.random.uniform(min_ens, max_ens,
            #                                       len_ens))
            #     ens_mean + np.random.normal(ens_mean, ens_std,
            #                               size=ensembles.shape))
        return outs