コード例 #1
0
ファイル: test_multilayer.py プロジェクト: ywu40/dnner
 def test_Normal_Linear_Linear(self):
     layers = [Normal(0, 1), Linear(0), Linear(0.01)]
     entropy_expected = 0.244437
     entropy = dnner.compute_entropy(layers=layers,
                                     weights=self.weights,
                                     verbose=0)
     self.assertAlmostEqual(entropy, entropy_expected, places=6)
コード例 #2
0
ファイル: test_multilayer.py プロジェクト: ywu40/dnner
    def test_save_fixed_points(self):
        layers = [Normal(0, 1), Linear(0), Linear(0.01)]

        # Run 2 iterations without saving fixed points
        entropy1 = dnner.compute_entropy(layers=layers,
                                         weights=self.weights,
                                         v0=[(1, 1)],
                                         max_iter=10)

        # Run 1+1 iterations while saving fixed points
        _, extra = dnner.compute_entropy(layers=layers,
                                         weights=self.weights,
                                         return_extra=True,
                                         v0=[(1, 1)],
                                         max_iter=5)

        entropy2, _ = dnner.compute_entropy(layers=layers,
                                            weights=self.weights,
                                            return_extra=True,
                                            start_at=extra,
                                            max_iter=5)

        self.assertEqual(entropy1, entropy2)
コード例 #3
0
ファイル: normal_hardtanh_relu.py プロジェクト: ywu40/dnner
n_cols2 = n_rows1 = int(np.ceil(alpha1 * n_cols1))
n_rows2 = int(np.ceil(alpha2 * n_cols2))

# Generate random weights
np.random.seed(42)  # set random seed
w1 = np.random.randn(n_rows1, n_cols1) / np.sqrt(n_cols1)
w2 = np.random.randn(n_rows2, n_cols2) / np.sqrt(n_cols2)

# Pre-compute spectra
start_time = time()
eigvals1 = np.linalg.eigvalsh(w1.T.dot(w1))
eigvals2 = np.linalg.eigvalsh(w2.T.dot(w2))
print("Computed spectra in %gs" % (time() - start_time))

weights = [(alpha1, eigvals1), (alpha2, eigvals2)]  # from x to y
layers = [Normal(0, 1), HardTanh(0), ReLU(var_noise)]

# Compute entropy using both our scheme and ML-VAMP
print("Computing entropy with fixed-point iteration...")
start_time = time()
e1, x1 = dnner.compute_entropy(layers=layers, weights=weights,
        return_extra=True, max_iter=100, tol=1e-7, verbose=0,
        v0=1e-10, use_vamp=False)
elapsed1 = time() - start_time

fp1 = np.hstack(x1["fixed_points"])
print("Entropy = %g (ran in %gs)" % (e1, elapsed1))

print("Computing entropy with the ML-VAMP SE...")
start_time = time()
e2, x2 = dnner.compute_entropy(layers=layers, weights=weights,
コード例 #4
0
 def test_Normal_ReLU(self):
     layers = [Normal(0, 1), ReLU(0.01)]
     mi_expected = 2.021801
     mi = dnner.compute_mi(layers=layers, weights=self.weights, verbose=0)
     self.assertAlmostEqual(mi, mi_expected, places=6)
コード例 #5
0
 def test_Normal_Probit(self):
     layers = [Normal(0, 1), Probit(0.01)]
     mi_expected = 0.897281
     mi = dnner.compute_mi(layers=layers, weights=self.weights, verbose=0)
     self.assertAlmostEqual(mi, mi_expected, places=6)
コード例 #6
0
 def test_Normal_Linear(self):
     layers = [Normal(0, 1), Linear(0.01)]
     mi_expected = 2.501166  # could also compare to exact
     mi = dnner.compute_mi(layers=layers, weights=self.weights, verbose=0)
     self.assertAlmostEqual(mi, mi_expected, places=6)
コード例 #7
0
    def match_mi_layers(self, datamodel, trainee):
        '''
        Returns the list [prior, activation1, activation2, ..] according to 
        the prior, the generative model (if present) and the trainee for 
        layers until the activation after the "up_to"th Activation layer of 
        the trainee ! (i.e. counting layer as we usually do, and not necessary 
        as it is implemented in Keras).
        '''

        self.mi_layers = []

        # taking care of the prior
        if datamodel.prior[0] == Prior.NORMAL:
            self.mi_layers.append(Normal(datamodel.prior[1], datamodel.prior[2]))
        elif datamodel.prior[0] == Prior.BERNOULLI:
            self.mi_layers.append(Bimodal(datamodel.prior[1]))
        elif datamodel.prior[0] == Prior.GB:
            self.mi_layers.append(SpikeSlab(datamodel.prior[1],
                                        datamodel.prior[2],
                                        datamodel.prior[3]))
        else:
            raise ValueError("Prior not available in dnner")


        # taking care of generative/datamodel
        if isinstance(datamodel, Decoder):
            for layer in range(len(datamodel.activations)):
                if datamodel.activations[layer] == 'relu':
                    self.mi_layers.append(ReLU(datamodel.layer_noises[layer]))
                    # self.mi_layers.append(LeakyReLU(datamodel.layer_noises[layer], 0))
                elif datamodel.activations[layer] == 'linear':
                    self.mi_layers.append(Linear(datamodel.layer_noises[layer]))
                elif datamodel.activations[layer] == 'probit':
                    self.mi_layers.append(Probit(datamodel.layer_noises[layer]))
                else:
                    raise ValueError("interface not available in dnner")

            self.dnner_weights = [(weight.shape[1]/weight.shape[0],
                                np.linalg.eigvalsh(weight.dot(weight.T))) for
                                weight in datamodel.weights]

        # taking care of student/encoder (inter_mi_noise)
        for layer in range(self.up_to - 1):
            if trainee.activations[layer] == 'relu':
                self.mi_layers.append(ReLU(self.inter_mi_noise))
                # self.mi_layers.append(LeakyReLU(datamodel.layer_noises[layer], 0))
            elif trainee.activations[layer] == 'linear':
                self.mi_layers.append(Linear(self.inter_mi_noise))
            elif trainee.activations[layer] == 'probit':
                self.mi_layers.append(Probit(self.inter_mi_noise))
            elif trainee.activations[layer] == 'hardtanh':
                self.mi_layers.append(HardTanh(self.inter_mi_noise))
            else:
                raise ValueError("Activation {:d} not available in dnner")

        # passing mi_noise to last layer
        if trainee.activations[self.up_to - 1] == 'relu':
            self.mi_layers.append(ReLU(self.mi_noise))
            # self.mi_layers.append(LeakyReLU(self.mi_noise, 0))
        elif trainee.activations[self.up_to - 1] == 'linear':
            self.mi_layers.append(Linear(self.mi_noise))
        elif trainee.activations[self.up_to - 1] == 'probit':
            self.mi_layers.append(Probit(self.mi_noise))
        elif trainee.activations[self.up_to - 1] == 'hardtanh':
            self.mi_layers.append(HardTanh(self.mi_noise))
        else:
            raise ValueError("Activation not available in dnner")

        if len(self.mi_layers) < 2:
            raise ValueError("Issue matching MI layers")