示例#1
0
    def test_allpenelties_gradients(self):
        ''' Checks all possible combinations (act fct., penalties, etc.) by finite differences.

        '''
        sys.stdout.write('Auto encoder -> Performing finite differences check of all possible contractive+sparse+slow auto encoder models ...')
        sys.stdout.flush()
        data = generate_bars_and_stripes_complete(5)[0:2]
        data_next = generate_bars_and_stripes_complete(5)[2:4]
        self.check_all(data=data, epsilon=0.0001, contractive=0.1, sparseness=0.1, desired_sparseness=0.1,
                       data_next=data_next, slowness_penalty=0.1)
        print(' successfully passed!')
        sys.stdout.flush()
class TestSampler(unittest.TestCase):
    bbrbmData = generate_bars_and_stripes_complete(2)
    bbrbmData = numx.vstack((bbrbmData[0], bbrbmData, bbrbmData[5]))
    bbrbmw = numx.array([[0.12179488, 2.95950177, 0.33513356, 35.05380642],
                         [0.20318085, -28.62372894, 26.52611278, 28.41793445],
                         [-0.19105386, -28.58530584, -26.52747507, 28.78447320],
                         [0.08953740, -59.82556859, -0.06665933, -27.71723459]])
    bbrbmbv = numx.array([[-19.24399659, -13.26258696, 13.25909850, 43.74408543]])
    bbrbmbh = numx.array([[-0.11155958, 57.02097584, -0.13331758, -32.25991501]])
    bbrbm = Model.BinaryBinaryRBM(4, 4, bbrbmData, bbrbmw, bbrbmbv, bbrbmbh, 0.0, 0.0)
    epsilon = 0.05
    num_samples = 2000.0

    @classmethod
    def execute_sampler(cls, sampler, num_samples):
        dictC = {'[ 0.  0.  0.  0.]': 0,  # 2
                 '[ 1.  1.  1.  1.]': 0,  # 2
                 '[ 0.  0.  1.  1.]': 0,  # 1
                 '[ 1.  1.  0.  0.]': 0,  # 1
                 '[ 1.  0.  1.  0.]': 0,  # 1
                 '[ 0.  1.  0.  1.]': 0,  # 1
                 '[ 0.  1.  1.  0.]': 0,
                 '[ 1.  0.  0.  1.]': 0,
                 '[ 0.  0.  0.  1.]': 0,
                 '[ 0.  0.  1.  0.]': 0,
                 '[ 0.  1.  0.  0.]': 0,
                 '[ 1.  0.  0.  0.]': 0,
                 '[ 0.  1.  1.  1.]': 0,
                 '[ 1.  1.  1.  0.]': 0,
                 '[ 1.  0.  1.  1.]': 0,
                 '[ 1.  1.  0.  1.]': 0}
        for _ in range(numx.int32(num_samples)):
            if isinstance(sampler, Sampler.GibbsSampler):
                # Start form random since model is rather deterministic
                samples = sampler.sample(numx.random.rand(1, 4), 1, ret_states=True)
            else:
                if isinstance(sampler, Sampler.PersistentGibbsSampler):
                    # Start form random since model is rather deterministic
                    sampler.chains = numx.random.rand(1, 4)
                    samples = sampler.sample(1, 1, ret_states=True)
                else:
                    samples = sampler.sample(1, 1, ret_states=True)

            dictC[str(samples[0])] += 1
        probCD1 = dictC['[ 0.  0.  0.  0.]'] / num_samples
        probCD2 = dictC['[ 1.  1.  1.  1.]'] / num_samples
        probCS1 = dictC['[ 0.  0.  1.  1.]'] / num_samples
        probCS2 = dictC['[ 1.  1.  0.  0.]'] / num_samples
        probCS3 = dictC['[ 1.  0.  1.  0.]'] / num_samples
        probCS4 = dictC['[ 0.  1.  0.  1.]'] / num_samples
        sumProbs = probCD1 + probCD2 + probCS1 + probCS2 + probCS3 + probCS4
        return [probCD1, probCD2, probCS1, probCS2, probCS3, probCS4, sumProbs]

    def test_Gibbs_sampler(self):
        sys.stdout.write('RBM Sampler -> Performing GibbsSampler test ... ')
        sys.stdout.flush()
        numx.random.seed(42)
        sampler = Sampler.GibbsSampler(self.bbrbm)
        probCD1, probCD2, probCS1, probCS2, probCS3, probCS4, sumProbs = self.execute_sampler(sampler, self.num_samples)
        assert numx.all(numx.abs(1.0 / 4.0 - probCD1) < self.epsilon)
        assert numx.all(numx.abs(1.0 / 4.0 - probCD2) < self.epsilon)
        assert numx.all(numx.abs(1.0 / 8.0 - probCS1) < self.epsilon)
        assert numx.all(numx.abs(1.0 / 8.0 - probCS2) < self.epsilon)
        assert numx.all(numx.abs(1.0 / 8.0 - probCS3) < self.epsilon)
        assert numx.all(numx.abs(1.0 / 8.0 - probCS4) < self.epsilon)
        assert numx.all(numx.abs(1.0 - sumProbs) < self.epsilon)
        print('successfully passed!')
        sys.stdout.flush()

    def test_Persistent_Gibbs_sampler(self):
        sys.stdout.write('RBM Sampler -> Performing PersistentGibbsSampler test ... ')
        sys.stdout.flush()
        numx.random.seed(42)
        sampler = Sampler.PersistentGibbsSampler(self.bbrbm, 1)
        probCD1, probCD2, probCS1, probCS2, probCS3, probCS4, sumProbs = self.execute_sampler(sampler, self.num_samples)
        assert numx.all(numx.abs(1.0 / 4.0 - probCD1) < self.epsilon)
        assert numx.all(numx.abs(1.0 / 4.0 - probCD2) < self.epsilon)
        assert numx.all(numx.abs(1.0 / 8.0 - probCS1) < self.epsilon)
        assert numx.all(numx.abs(1.0 / 8.0 - probCS2) < self.epsilon)
        assert numx.all(numx.abs(1.0 / 8.0 - probCS3) < self.epsilon)
        assert numx.all(numx.abs(1.0 / 8.0 - probCS4) < self.epsilon)
        assert numx.all(numx.abs(1.0 - sumProbs) < self.epsilon)
        print('successfully passed!')
        sys.stdout.flush()

    def test_Parallel_Tempering_sampler(self):
        sys.stdout.write('RBM Sampler -> Performing ParallelTemperingSampler test ... ')
        sys.stdout.flush()
        numx.random.seed(42)
        sampler = Sampler.ParallelTemperingSampler(self.bbrbm, 10)
        probCD1, probCD2, probCS1, probCS2, probCS3, probCS4, sumProbs = self.execute_sampler(sampler, self.num_samples)
        assert numx.all(numx.abs(1.0 / 4.0 - probCD1) < self.epsilon)
        assert numx.all(numx.abs(1.0 / 4.0 - probCD2) < self.epsilon)
        assert numx.all(numx.abs(1.0 / 8.0 - probCS1) < self.epsilon)
        assert numx.all(numx.abs(1.0 / 8.0 - probCS2) < self.epsilon)
        assert numx.all(numx.abs(1.0 / 8.0 - probCS3) < self.epsilon)
        assert numx.all(numx.abs(1.0 / 8.0 - probCS4) < self.epsilon)
        assert numx.all(numx.abs(1.0 - sumProbs) < self.epsilon)
        print('successfully passed!')
        sys.stdout.flush()

    def test_Independent_Parallel_Tempering_sampler(self):
        sys.stdout.write('RBM Sampler -> Performing IndependentParallelTemperingSampler test ... ')
        sys.stdout.flush()
        numx.random.seed(42)
        sampler = Sampler.IndependentParallelTemperingSampler(self.bbrbm, 10, 10)
        probCD1, probCD2, probCS1, probCS2, probCS3, probCS4, sumProbs = self.execute_sampler(sampler, self.num_samples)
        assert numx.all(numx.abs(1.0 / 4.0 - probCD1) < self.epsilon)
        assert numx.all(numx.abs(1.0 / 4.0 - probCD2) < self.epsilon)
        assert numx.all(numx.abs(1.0 / 8.0 - probCS1) < self.epsilon)
        assert numx.all(numx.abs(1.0 / 8.0 - probCS2) < self.epsilon)
        assert numx.all(numx.abs(1.0 / 8.0 - probCS3) < self.epsilon)
        assert numx.all(numx.abs(1.0 / 8.0 - probCS4) < self.epsilon)
        assert numx.all(numx.abs(1.0 - sumProbs) < self.epsilon)
        print('successfully passed!')
        sys.stdout.flush()
示例#3
0
class TestBinaryBinaryRBM(unittest.TestCase):
    # Known RBM
    bbrbmData = generate_bars_and_stripes_complete(2)
    bbrbmData = numx.vstack((bbrbmData[0], bbrbmData, bbrbmData[5]))
    bbrbmw = numx.array(
        [[0.12179488, 2.95950177, 0.33513356, 35.05380642],
         [0.20318085, -28.62372894, 26.52611278, 28.41793445],
         [-0.19105386, -28.58530584, -26.52747507, 28.78447320],
         [0.08953740, -59.82556859, -0.06665933, -27.71723459]])
    bbrbmbv = numx.array(
        [[-19.24399659, -13.26258696, 13.25909850, 43.74408543]])
    bbrbmbh = numx.array(
        [[-0.11155958, 57.02097584, -0.13331758, -32.25991501]])
    bbrbm = Model.BinaryBinaryRBM(4, 4, bbrbmData, bbrbmw, bbrbmbv, bbrbmbh,
                                  0.0, 0.0)

    bbrbmTruelogZ = 59.6749019726
    bbrbmTrueLL = -1.7328699078
    bbrbmBestLLPossible = -1.732867951

    epsilon = 0.00001

    def test___init__(self):
        sys.stdout.write('BinaryBinaryRBM -> Performing init test ...')
        sys.stdout.flush()
        assert numx.all(self.bbrbm.bv_base == numx.array([[0, 0, 0, 0]]))
        print(' successfully passed!')
        sys.stdout.flush()

    def test__add_visible_units(self):
        sys.stdout.write(
            'BinaryBinaryRBM -> Performing add_visible_units test ...')
        sys.stdout.flush()
        localmodel = copy.deepcopy(self.bbrbm)
        localmodel._add_visible_units(2, 3)
        assert numx.all(localmodel.bv_base == numx.array([[0, 0, 0, 0, 0, 0]]))
        print(' successfully passed!')
        sys.stdout.flush()

    def test__remove_visible_units(self):
        sys.stdout.write(
            'BinaryBinaryRBM -> Performing remove_visible_units test ...')
        sys.stdout.flush()
        localmodel = copy.deepcopy(self.bbrbm)
        localmodel._remove_visible_units([0, 2])
        assert numx.all(localmodel.bv_base == numx.array([[0, 0]]))
        print(' successfully passed!')
        sys.stdout.flush()

    def test__calculate_weight_gradient(self):
        sys.stdout.write(
            'BinaryBinaryRBM -> Performing calculate_weight_gradient test ...')
        sys.stdout.flush()
        deltaW = self.bbrbm._calculate_weight_gradient(
            numx.array([[1, 1, 1, 0], [0, 1, 0, 1]]),
            numx.array([[0, 1, 0, 1], [0, 1, 1, 0]]))
        target = numx.array([[0., 1., 0., 1.], [0., 2., 1., 1.],
                             [0., 1., 0., 1.], [0., 1., 1., 0.]])
        assert numx.all(target == deltaW)
        print(' successfully passed!')
        sys.stdout.flush()

    def test__calculate_visible_bias_gradient(self):
        sys.stdout.write(
            'BinaryBinaryRBM -> Performing calculate_visible_bias_gradient test ...'
        )
        sys.stdout.flush()
        deltaBv = self.bbrbm._calculate_visible_bias_gradient(
            numx.array([[1, 1, 1, 0], [0, 1, 0, 1]]))
        target = numx.array([[1., 2., 1., 1.]])
        assert numx.all(target == deltaBv)
        print(' successfully passed!')
        sys.stdout.flush()

    def test__calculate_hidden_bias_gradient(self):
        sys.stdout.write(
            'BinaryBinaryRBM -> Performing calculate_hidden_bias_gradient test ...'
        )
        sys.stdout.flush()
        deltaBh = self.bbrbm._calculate_hidden_bias_gradient(
            numx.array([[0, 1, 0, 1], [0, 1, 1, 0]]))
        target = numx.array([[0., 2., 1., 1.]])
        assert numx.all(target == deltaBh)
        print(' successfully passed!')
        sys.stdout.flush()

    def test_calculate_gradients(self):
        sys.stdout.write(
            'BinaryBinaryRBM -> Performing calculate_gradients test ...')
        sys.stdout.flush()
        deltaW = self.bbrbm._calculate_weight_gradient(
            numx.array([[1, 1, 1, 0], [0, 1, 0, 1]]),
            numx.array([[0, 1, 0, 1], [0, 1, 1, 0]]))
        deltaBv = self.bbrbm._calculate_visible_bias_gradient(
            numx.array([[1, 1, 1, 0], [0, 1, 0, 1]]))
        deltaBh = self.bbrbm._calculate_hidden_bias_gradient(
            numx.array([[0, 1, 0, 1], [0, 1, 1, 0]]))
        deltas = self.bbrbm.calculate_gradients(
            numx.array([[1, 1, 1, 0], [0, 1, 0, 1]]),
            numx.array([[0, 1, 0, 1], [0, 1, 1, 0]]))
        assert numx.all(deltaW == deltas[0])
        assert numx.all(deltaBv == deltas[1])
        assert numx.all(deltaBh == deltas[2])
        print(' successfully passed!')
        sys.stdout.flush()

    def test_sample_v(self):
        sys.stdout.write('BinaryBinaryRBM -> Performing sample_v test ...')
        sys.stdout.flush()
        assert numx.all(self.bbrbm.sample_v(numx.ones((10000, 4))) == 1.0)
        assert numx.all(self.bbrbm.sample_v(numx.zeros((10000, 4))) == 0.0)
        numx.random.seed(42)
        samples = self.bbrbm.sample_v(numx.ones((10000, 4)) * 0.5)
        assert numx.sum(samples != 0.0) + numx.sum(samples != 1.0) == 40000
        assert numx.abs(numx.sum(samples) / 40000.0 - 0.5) < 0.01
        print(' successfully passed!')
        sys.stdout.flush()

    def test_sample_h(self):
        sys.stdout.write('BinaryBinaryRBM -> Performing sample_h test ...')
        sys.stdout.flush()
        assert numx.all(self.bbrbm.sample_h(numx.ones((10000, 4))) == 1.0)
        assert numx.all(self.bbrbm.sample_h(numx.zeros((10000, 4))) == 0.0)
        numx.random.seed(42)
        samples = self.bbrbm.sample_h(numx.ones((10000, 4)) * 0.5)
        assert numx.sum(samples != 0.0) + numx.sum(samples != 1.0) == 40000
        assert numx.abs(numx.sum(samples) / 40000.0 - 0.5) < 0.01
        print(' successfully passed!')
        sys.stdout.flush()

    def test_probability_v_given_h(self):
        sys.stdout.write(
            'BinaryBinaryRBM -> Performing probability_v_given_h test ...')
        sys.stdout.flush()
        probs = self.bbrbm.probability_v_given_h(self.bbrbmData)
        target = numx.array(
            [[4.38973669e-09, 1.73832475e-06, 9.99998256e-01, 1.00000000e+00],
             [4.38973669e-09, 1.73832475e-06, 9.99998256e-01, 1.00000000e+00],
             [6.93234181e-09, 9.99998583e-01, 1.42771991e-06, 1.00000000e+00],
             [9.99999993e-01, 1.41499740e-06, 9.99998571e-01, 0.00000000e+00],
             [9.56375764e-08, 0.00000000e+00, 1.82363957e-07, 1.13445207e-07],
             [9.99999903e-01, 1.00000000e+00, 9.99999817e-01, 9.99999883e-01],
             [9.99999996e-01, 9.99998259e-01, 1.74236911e-06, 0.00000000e+00],
             [9.99999996e-01, 9.99998259e-01, 1.74236911e-06, 0.00000000e+00]])
        assert numx.all(numx.abs(probs - target) < self.epsilon)
        print(' successfully passed!')
        sys.stdout.flush()

    def test_probability_h_given_v(self):
        sys.stdout.write(
            'BinaryBinaryRBM -> Performing probability_h_given_v test ...')
        sys.stdout.flush()
        probs = self.bbrbm.probability_h_given_v(self.bbrbmData)
        target = numx.array(
            [[4.72138994e-01, 1.00000000e+00, 4.66719883e-01, 9.76996262e-15],
             [4.72138994e-01, 1.00000000e+00, 4.66719883e-01, 9.76996262e-15],
             [4.54918124e-01, 1.00000000e+00, 3.68904907e-12, 1.00000000e+00],
             [5.45166211e-01, 2.24265051e-14, 1.00000000e+00, 1.97064587e-14],
             [5.53152448e-01, 1.00000000e+00, 1.00000000e+00, 1.00000000e+00],
             [4.46931620e-01, 2.33146835e-14, 2.46841436e-12, 2.83661983e-14],
             [5.27945768e-01, 0.00000000e+00, 5.33398782e-01, 1.00000000e+00],
             [5.27945768e-01, 0.00000000e+00, 5.33398782e-01, 1.00000000e+00]])
        assert numx.all(numx.abs(probs - target) < self.epsilon)
        print(' successfully passed!')
        sys.stdout.flush()

    def test_energy(self):
        sys.stdout.write('BinaryBinaryRBM -> Performing energy test ...')
        sys.stdout.flush()
        energies = self.bbrbm.energy(self.bbrbmData, self.bbrbmData)
        target = numx.array([[0.], [0.], [32.49137574], [32.50603837],
                             [0.93641873], [0.91694445], [0.03276686],
                             [0.03276686]])
        assert numx.all(numx.abs(energies - target) < self.epsilon)
        print(' successfully passed!')
        sys.stdout.flush()

    def test_unnormalized_log_probability_v(self):
        sys.stdout.write(
            'BinaryBinaryRBM -> Performing unnormalized_log_probability_v test ...'
        )
        sys.stdout.flush()
        probs = self.bbrbm.unnormalized_log_probability_v(self.bbrbmData)
        target = numx.array([[58.28860656], [58.28860656], [57.59545755],
                             [57.59545757], [57.59545753], [57.59545756],
                             [58.28860656], [58.28860656]])
        assert numx.all(numx.abs(probs - target) < self.epsilon)
        print(' successfully passed!')
        sys.stdout.flush()

    def test_unnormalized_log_probability_h(self):
        sys.stdout.write(
            'BinaryBinaryRBM -> Performing unnormalized_log_probability_h test ...'
        )
        sys.stdout.flush()
        probs = self.bbrbm.unnormalized_log_probability_h(self.bbrbmData)
        target = numx.array([[57.00318742], [57.00318742], [56.98879586],
                             [56.98864114], [56.90941665], [56.90945961],
                             [57.00333938], [57.00333938]])
        assert numx.all(numx.abs(probs - target) < self.epsilon)
        print(' successfully passed!')
        sys.stdout.flush()

    def test_log_probability_v(self):
        sys.stdout.write(
            'BinaryBinaryRBM -> Performing log_probability_v test ...')
        sys.stdout.flush()
        probs = self.bbrbm.log_probability_v(self.bbrbmTruelogZ,
                                             self.bbrbmData)
        target = numx.array([[-1.38629541], [-1.38629541], [-2.07944442],
                             [-2.07944441], [-2.07944444], [-2.07944441],
                             [-1.38629541], [-1.38629541]])
        assert numx.all(numx.abs(probs - target) < self.epsilon)
        print(' successfully passed!')
        sys.stdout.flush()

    def test_log_probability_h(self):
        sys.stdout.write(
            'BinaryBinaryRBM -> Performing log_probability_h test ...')
        sys.stdout.flush()
        probs = self.bbrbm.log_probability_h(self.bbrbmTruelogZ,
                                             self.bbrbmData)
        target = numx.array([[-2.67171456], [-2.67171456], [-2.68610611],
                             [-2.68626083], [-2.76548532], [-2.76544237],
                             [-2.67156259], [-2.67156259]])
        assert numx.all(numx.abs(probs - target) < self.epsilon)
        print(' successfully passed!')
        sys.stdout.flush()

    def test_log_probability_v_h(self):
        sys.stdout.write(
            'BinaryBinaryRBM -> Performing log_probability_v_h test ...')
        sys.stdout.flush()
        h = self.bbrbm.probability_h_given_v(self.bbrbmData)
        probs = self.bbrbm.log_probability_v_h(self.bbrbmTruelogZ,
                                               self.bbrbmData, h)
        target = numx.array([[-2.76881973], [-2.76881973], [-2.76852132],
                             [-2.76850605], [-2.76693057], [-2.76694846],
                             [-2.76879441], [-2.76879441]])
        assert numx.all(numx.abs(probs - target) < self.epsilon)
        print(' successfully passed!')
        sys.stdout.flush()

    def test__base_log_partition(self):
        sys.stdout.write(
            'BinaryBinaryRBM -> Performing base_log_partition test ...')
        sys.stdout.flush()
        localmodel = copy.deepcopy(self.bbrbm)
        localmodel.bv_base += 1.0
        assert numx.abs(localmodel._base_log_partition(False) -
                        5.54517744448) < self.epsilon
        assert numx.abs(localmodel._base_log_partition(True) -
                        8.02563547231) < self.epsilon
        print(' successfully passed!')
        sys.stdout.flush()

    def test__getbasebias(self):
        sys.stdout.write('BinaryBinaryRBM -> Performing getbasebias test ...')
        sys.stdout.flush()
        # zero base bias when data mean is 0.5
        localmodel = copy.deepcopy(self.bbrbm)
        localmodel._data_mean = localmodel._data_mean / 2.0
        assert numx.all(
            numx.abs(localmodel._getbasebias() + 1.09861229) < self.epsilon)
        print(' successfully passed!')
        sys.stdout.flush()
示例#4
0
class TestEstimator(unittest.TestCase):
    # Known model
    bbrbmData = generate_bars_and_stripes_complete(2)
    bbrbmData = numx.vstack((bbrbmData[0], bbrbmData, bbrbmData[5]))
    bbrbmw = numx.array(
        [[0.12179488, 2.95950177, 0.33513356, 35.05380642],
         [0.20318085, -28.62372894, 26.52611278, 28.41793445],
         [-0.19105386, -28.58530584, -26.52747507, 28.78447320],
         [0.08953740, -59.82556859, -0.06665933, -27.71723459]])
    bbrbmbv = numx.array(
        [[-19.24399659, -13.26258696, 13.25909850, 43.74408543]])
    bbrbmbh = numx.array(
        [[-0.11155958, 57.02097584, -0.13331758, -32.25991501]])
    bbrbm = Model.BinaryBinaryRBM(4, 4, bbrbmData, bbrbmw, bbrbmbv, bbrbmbh,
                                  0.0, 0.0)
    bbrbmTruelogZ = 59.6749019726
    bbrbmTrueLL = -1.7328699078
    bbrbmBestLLPossible = -1.732867951

    epsilon = 0.00001

    def test_reconstruction_error(self):
        sys.stdout.write(
            'RBM Estimator -> Performing reconstruction_error test ...')
        sys.stdout.flush()
        numx.random.seed(42)
        rec = Estimator.reconstruction_error(self.bbrbm,
                                             self.bbrbmData,
                                             k=1,
                                             beta=1.0,
                                             use_states=True,
                                             absolut_error=False)
        assert numx.all(numx.abs(rec) < self.epsilon)
        rec = Estimator.reconstruction_error(self.bbrbm,
                                             self.bbrbmData,
                                             k=1,
                                             beta=1.0,
                                             use_states=False,
                                             absolut_error=False)
        assert numx.all(numx.abs(rec) < self.epsilon)
        rec = Estimator.reconstruction_error(self.bbrbm,
                                             self.bbrbmData,
                                             k=1,
                                             beta=1.0,
                                             use_states=True,
                                             absolut_error=True)
        assert numx.all(numx.abs(rec) < self.epsilon)
        rec = Estimator.reconstruction_error(self.bbrbm,
                                             self.bbrbmData,
                                             k=1,
                                             beta=1.0,
                                             use_states=False,
                                             absolut_error=True)
        assert numx.all(numx.abs(rec) < self.epsilon)
        rec = Estimator.reconstruction_error(self.bbrbm,
                                             self.bbrbmData,
                                             k=10,
                                             beta=1.0,
                                             use_states=False,
                                             absolut_error=False)
        assert numx.all(numx.abs(rec) < self.epsilon)
        # Test List
        testList = []
        for i in range(self.bbrbmData.shape[0]):
            testList.append(self.bbrbmData[i].reshape(1, 4))
        rec = Estimator.reconstruction_error(self.bbrbm,
                                             testList,
                                             k=10,
                                             beta=1.0,
                                             use_states=False,
                                             absolut_error=False)
        assert numx.all(numx.abs(rec) < self.epsilon)
        print(' successfully passed!')
        sys.stdout.flush()

    def test_log_likelihood_v(self):
        sys.stdout.write(
            'RBM Estimator -> Performing log_likelihood_v test ...')
        sys.stdout.flush()
        numx.random.seed(42)
        ll = numx.mean(
            Estimator.log_likelihood_v(self.bbrbm, self.bbrbmTruelogZ,
                                       self.bbrbmData, 1.0))
        assert numx.all(numx.abs(ll - self.bbrbmTrueLL) < self.epsilon)
        # Test List
        testList = []
        for i in range(self.bbrbmData.shape[0]):
            testList.append(self.bbrbmData[i].reshape(1, 4))
        ll = numx.mean(
            Estimator.log_likelihood_v(self.bbrbm, self.bbrbmTruelogZ,
                                       testList, 1.0))
        assert numx.all(numx.abs(ll - self.bbrbmTrueLL) < self.epsilon)
        print(' successfully passed!')
        sys.stdout.flush()

    def test_log_likelihood_h(self):
        sys.stdout.write(
            'RBM Estimator -> Performing log_likelihood_h test ...')
        sys.stdout.flush()
        numx.random.seed(42)
        hdata = numx.float64(
            self.bbrbm.probability_h_given_v(self.bbrbmData) < 0.5)
        ll = numx.mean(
            Estimator.log_likelihood_h(self.bbrbm, self.bbrbmTruelogZ, hdata,
                                       1.0))
        assert numx.all(numx.abs(ll + 9.55929166739) < self.epsilon)
        # Test List
        testList = []
        for i in range(hdata.shape[0]):
            testList.append(hdata[i].reshape(1, 4))
        ll = numx.mean(
            Estimator.log_likelihood_v(self.bbrbm, self.bbrbmTruelogZ,
                                       testList, 1.0))
        assert numx.all(numx.abs(ll + 9.55929166739) < self.epsilon)
        print(' successfully passed!')
        sys.stdout.flush()

    def test_partition_function_factorize_v(self):
        sys.stdout.write(
            'RBM Estimator -> Performing partition_function_factorize_v test ...'
        )
        sys.stdout.flush()
        LogZ = Estimator.partition_function_factorize_v(
            self.bbrbm, beta=None, batchsize_exponent='AUTO', status=False)
        assert numx.all(numx.abs(LogZ - self.bbrbmTruelogZ) < self.epsilon)
        LogZ = Estimator.partition_function_factorize_v(self.bbrbm,
                                                        beta=None,
                                                        batchsize_exponent=0,
                                                        status=False)
        assert numx.all(numx.abs(LogZ - self.bbrbmTruelogZ) < self.epsilon)
        LogZ = Estimator.partition_function_factorize_v(self.bbrbm,
                                                        beta=None,
                                                        batchsize_exponent=3,
                                                        status=False)
        assert numx.all(numx.abs(LogZ - self.bbrbmTruelogZ) < self.epsilon)
        LogZ = Estimator.partition_function_factorize_v(self.bbrbm,
                                                        beta=None,
                                                        batchsize_exponent=555,
                                                        status=False)
        assert numx.all(numx.abs(LogZ - self.bbrbmTruelogZ) < self.epsilon)
        print(' successfully passed!')
        sys.stdout.flush()

    def test_partition_function_factorize_h(self):
        sys.stdout.write(
            'RBM Estimator -> Performing partition_function_factorize_v test ...'
        )
        sys.stdout.flush()
        LogZ = Estimator.partition_function_factorize_h(
            self.bbrbm, beta=None, batchsize_exponent='AUTO', status=False)
        assert numx.all(numx.abs(LogZ - self.bbrbmTruelogZ) < self.epsilon)
        LogZ = Estimator.partition_function_factorize_h(self.bbrbm,
                                                        beta=None,
                                                        batchsize_exponent=0,
                                                        status=False)
        assert numx.all(numx.abs(LogZ - self.bbrbmTruelogZ) < self.epsilon)
        LogZ = Estimator.partition_function_factorize_h(self.bbrbm,
                                                        beta=None,
                                                        batchsize_exponent=3,
                                                        status=False)
        assert numx.all(numx.abs(LogZ - self.bbrbmTruelogZ) < self.epsilon)
        LogZ = Estimator.partition_function_factorize_h(self.bbrbm,
                                                        beta=None,
                                                        batchsize_exponent=555,
                                                        status=False)
        assert numx.all(numx.abs(LogZ - self.bbrbmTruelogZ) < self.epsilon)
        print(' successfully passed!')
        sys.stdout.flush()

    def test_annealed_importance_sampling(self):
        sys.stdout.write(
            'RBM Estimator -> Performing annealed_importance_sampling test ...'
        )
        sys.stdout.flush()
        numx.random.seed(42)
        LogZ = Estimator.annealed_importance_sampling(self.bbrbm,
                                                      num_chains=100,
                                                      k=1,
                                                      betas=100,
                                                      status=False)
        assert numx.all(numx.abs(LogZ[0] - self.bbrbmTruelogZ) < 0.5)
        LogZ = Estimator.annealed_importance_sampling(self.bbrbm,
                                                      num_chains=100,
                                                      k=1,
                                                      betas=1000,
                                                      status=False)
        assert numx.all(numx.abs(LogZ[0] - self.bbrbmTruelogZ) < 0.05)
        LogZ = Estimator.annealed_importance_sampling(self.bbrbm,
                                                      num_chains=100,
                                                      k=1,
                                                      betas=10000,
                                                      status=False)
        assert numx.all(numx.abs(LogZ[0] - self.bbrbmTruelogZ) < 0.005)
        print(' successfully passed!')
        sys.stdout.flush()

    def test_reverse_annealed_importance_sampling(self):
        sys.stdout.write(
            'RBM Estimator -> Performing reverse_annealed_importance_sampling test ...'
        )
        sys.stdout.flush()
        numx.random.seed(42)
        LogZ = Estimator.reverse_annealed_importance_sampling(self.bbrbm,
                                                              num_chains=100,
                                                              k=1,
                                                              betas=100,
                                                              status=False)
        assert numx.all(numx.abs(LogZ[0] - self.bbrbmTruelogZ) < 0.5)
        LogZ = Estimator.reverse_annealed_importance_sampling(self.bbrbm,
                                                              num_chains=100,
                                                              k=1,
                                                              betas=1000,
                                                              status=False)
        assert numx.all(numx.abs(LogZ[0] - self.bbrbmTruelogZ) < 0.05)
        LogZ = Estimator.reverse_annealed_importance_sampling(self.bbrbm,
                                                              num_chains=100,
                                                              k=1,
                                                              betas=10000,
                                                              status=False)
        assert numx.all(numx.abs(LogZ[0] - self.bbrbmTruelogZ) < 0.005)
        print(' successfully passed!')
        sys.stdout.flush()
示例#5
0
import pydeep.dbm.binary3Layer.trainer as TRAINER
import pydeep.dbm.binary3Layer.estimator as ESTIMATOR
from pydeep.base.activationfunction import Sigmoid
import pydeep.misc.toyproblems as TOY
import pydeep.misc.visualization as VIS

# Set the same seed value for all algorithms
numx.random.seed(42)

# Set dimensions
v11 = v12 = 2
v21 = v22 = 4
v31 = v32 = 2

# Generate data
train_set = TOY.generate_bars_and_stripes_complete(v11)

N = v11 * v12
M = v21 * v22
O = v31 * v32

# Training parameters
batch_size = train_set.shape[0]
epochs = 100000
k_pos = 3
k_neg = 5
epsilon = 0.005 * numx.array([1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0])

offset_typ = 'DDD'
dbm = MODEL.BinaryBinaryDBM(N, M, O, offset_typ, train_set)
示例#6
0
    def test_trainer(self):
        ''' Checks if Auto encoder converges in terms of rec error.

        '''
        sys.stdout.write(
            'Auto encoder -> Performing trainer convergences check ...')
        sys.stdout.flush()
        data = generate_bars_and_stripes_complete(4)
        data_next = numx.random.permutation(
            generate_bars_and_stripes_complete(4))

        for act_out in [
                AFct.Identity, AFct.SoftSign, AFct.Rectifier, AFct.SoftPlus,
                AFct.Sigmoid, AFct.HyperbolicTangent
        ]:
            for act_in in [AFct.Sigmoid]:
                ae = MODEL.AutoEncoder(number_visibles=16,
                                       number_hiddens=20,
                                       data=data,
                                       visible_activation_function=act_in,
                                       hidden_activation_function=act_out,
                                       cost_function=CFct.CrossEntropyError,
                                       initial_weights='AUTO',
                                       initial_visible_bias='AUTO',
                                       initial_hidden_bias='AUTO',
                                       initial_visible_offsets='AUTO',
                                       initial_hidden_offsets='AUTO')
                self.perform_training(ae=ae,
                                      data=data,
                                      epsilon=0.01,
                                      momentum=0.0,
                                      update_visible_offsets=0.0,
                                      update_hidden_offsets=0.0,
                                      corruptor=None,
                                      reg_L1Norm=0.0,
                                      reg_L2Norm=0.0,
                                      reg_sparseness=0.0,
                                      desired_sparseness=0.0,
                                      reg_contractive=0.0,
                                      reg_slowness=0.0,
                                      data_next=None,
                                      restrict_gradient=0.0,
                                      restriction_norm='Cols')

        for act_out in [
                AFct.Identity, AFct.SoftSign, AFct.Rectifier, AFct.SoftPlus,
                AFct.Sigmoid, AFct.HyperbolicTangent
        ]:
            for act_in in [
                    AFct.Identity, AFct.SoftSign, AFct.Rectifier,
                    AFct.SoftPlus, AFct.Sigmoid, AFct.HyperbolicTangent
            ]:
                ae = MODEL.AutoEncoder(number_visibles=16,
                                       number_hiddens=20,
                                       data=data,
                                       visible_activation_function=act_in,
                                       hidden_activation_function=act_out,
                                       cost_function=CFct.SquaredError,
                                       initial_weights='AUTO',
                                       initial_visible_bias='AUTO',
                                       initial_hidden_bias='AUTO',
                                       initial_visible_offsets='AUTO',
                                       initial_hidden_offsets='AUTO')
                self.perform_training(ae=ae,
                                      data=data,
                                      epsilon=0.01,
                                      momentum=0.0,
                                      update_visible_offsets=0.0,
                                      update_hidden_offsets=0.0,
                                      corruptor=None,
                                      reg_L1Norm=0.0,
                                      reg_L2Norm=0.0,
                                      reg_sparseness=0.0,
                                      desired_sparseness=0.0,
                                      reg_contractive=0.0,
                                      reg_slowness=0.0,
                                      data_next=None,
                                      restrict_gradient=0.0,
                                      restriction_norm='Cols')

        for act_out in [
                AFct.Identity, AFct.SoftSign, AFct.Rectifier, AFct.SoftPlus,
                AFct.Sigmoid, AFct.HyperbolicTangent
        ]:
            for act_in in [
                    AFct.Identity, AFct.SoftSign, AFct.Rectifier,
                    AFct.SoftPlus, AFct.Sigmoid, AFct.HyperbolicTangent
            ]:
                ae = MODEL.AutoEncoder(number_visibles=16,
                                       number_hiddens=20,
                                       data=data,
                                       visible_activation_function=act_in,
                                       hidden_activation_function=act_out,
                                       cost_function=CFct.AbsoluteError,
                                       initial_weights='AUTO',
                                       initial_visible_bias='AUTO',
                                       initial_hidden_bias='AUTO',
                                       initial_visible_offsets='AUTO',
                                       initial_hidden_offsets='AUTO')
                self.perform_training(ae=ae,
                                      data=data,
                                      epsilon=0.005,
                                      momentum=0.0,
                                      update_visible_offsets=0.0,
                                      update_hidden_offsets=0.0,
                                      corruptor=None,
                                      reg_L1Norm=0.0,
                                      reg_L2Norm=0.0,
                                      reg_sparseness=0.0,
                                      desired_sparseness=0.0,
                                      reg_contractive=0.0,
                                      reg_slowness=0.0,
                                      data_next=None,
                                      restrict_gradient=0.0,
                                      restriction_norm='Cols')

        # Normal
        ae = MODEL.AutoEncoder(number_visibles=16,
                               number_hiddens=20,
                               data=None,
                               visible_activation_function=AFct.Sigmoid,
                               hidden_activation_function=AFct.Sigmoid,
                               cost_function=CFct.CrossEntropyError,
                               initial_weights='AUTO',
                               initial_visible_bias='AUTO',
                               initial_hidden_bias='AUTO',
                               initial_visible_offsets=0,
                               initial_hidden_offsets=0,
                               dtype=numx.float64)
        self.perform_training(ae=ae,
                              data=data,
                              epsilon=0.01,
                              momentum=0.0,
                              update_visible_offsets=0.0,
                              update_hidden_offsets=0.0,
                              corruptor=None,
                              reg_L1Norm=0.000,
                              reg_L2Norm=0.000,
                              reg_sparseness=0.0,
                              desired_sparseness=0.1,
                              reg_contractive=0.0,
                              reg_slowness=0.0,
                              data_next=data_next,
                              restrict_gradient=None,
                              restriction_norm='Mat')
        # Centered
        ae = MODEL.AutoEncoder(number_visibles=16,
                               number_hiddens=20,
                               data=data,
                               visible_activation_function=AFct.Sigmoid,
                               hidden_activation_function=AFct.Sigmoid,
                               cost_function=CFct.CrossEntropyError,
                               initial_weights='AUTO',
                               initial_visible_bias='AUTO',
                               initial_hidden_bias='AUTO',
                               initial_visible_offsets='AUTO',
                               initial_hidden_offsets='AUTO',
                               dtype=numx.float64)
        self.perform_training(ae=ae,
                              data=data,
                              epsilon=0.01,
                              momentum=0.0,
                              update_visible_offsets=0.01,
                              update_hidden_offsets=0.01,
                              corruptor=None,
                              reg_L1Norm=0.000,
                              reg_L2Norm=0.000,
                              reg_sparseness=0.0,
                              desired_sparseness=0.1,
                              reg_contractive=0.0,
                              reg_slowness=0.0,
                              data_next=data_next,
                              restrict_gradient=None,
                              restriction_norm='Mat')
        # Momentum
        ae = MODEL.AutoEncoder(number_visibles=16,
                               number_hiddens=20,
                               data=None,
                               visible_activation_function=AFct.Sigmoid,
                               hidden_activation_function=AFct.Sigmoid,
                               cost_function=CFct.CrossEntropyError,
                               initial_weights='AUTO',
                               initial_visible_bias='AUTO',
                               initial_hidden_bias='AUTO',
                               initial_visible_offsets='AUTO',
                               initial_hidden_offsets='AUTO',
                               dtype=numx.float64)
        self.perform_training(ae=ae,
                              data=data,
                              epsilon=0.01,
                              momentum=0.9,
                              update_visible_offsets=0.01,
                              update_hidden_offsets=0.01,
                              corruptor=None,
                              reg_L1Norm=0.000,
                              reg_L2Norm=0.000,
                              reg_sparseness=0.0,
                              desired_sparseness=0.1,
                              reg_contractive=0.0,
                              reg_slowness=0.0,
                              data_next=data_next,
                              restrict_gradient=None,
                              restriction_norm='Mat')
        # L1 L2 Norm
        ae = MODEL.AutoEncoder(number_visibles=16,
                               number_hiddens=20,
                               data=data,
                               visible_activation_function=AFct.Sigmoid,
                               hidden_activation_function=AFct.Sigmoid,
                               cost_function=CFct.CrossEntropyError,
                               initial_weights='AUTO',
                               initial_visible_bias='AUTO',
                               initial_hidden_bias='AUTO',
                               initial_visible_offsets='AUTO',
                               initial_hidden_offsets='AUTO',
                               dtype=numx.float64)
        self.perform_training(ae=ae,
                              data=data,
                              epsilon=0.01,
                              momentum=0.0,
                              update_visible_offsets=0.01,
                              update_hidden_offsets=0.01,
                              corruptor=None,
                              reg_L1Norm=0.0002,
                              reg_L2Norm=0.0002,
                              reg_sparseness=0.0,
                              desired_sparseness=0.1,
                              reg_contractive=0.0,
                              reg_slowness=0.0,
                              data_next=data_next,
                              restrict_gradient=None,
                              restriction_norm='Mat')
        # Sparse
        ae = MODEL.AutoEncoder(number_visibles=16,
                               number_hiddens=20,
                               data=data,
                               visible_activation_function=AFct.Sigmoid,
                               hidden_activation_function=AFct.Sigmoid,
                               cost_function=CFct.CrossEntropyError,
                               initial_weights='AUTO',
                               initial_visible_bias='AUTO',
                               initial_hidden_bias='AUTO',
                               initial_visible_offsets='AUTO',
                               initial_hidden_offsets='AUTO',
                               dtype=numx.float64)
        self.perform_training(ae=ae,
                              data=data,
                              epsilon=0.01,
                              momentum=0.0,
                              update_visible_offsets=0.01,
                              update_hidden_offsets=0.01,
                              corruptor=None,
                              reg_L1Norm=0.0,
                              reg_L2Norm=0.0,
                              reg_sparseness=0.1,
                              desired_sparseness=0.1,
                              reg_contractive=0.0,
                              reg_slowness=0.0,
                              data_next=data_next,
                              restrict_gradient=None,
                              restriction_norm='Mat')
        # Contractive
        ae = MODEL.AutoEncoder(number_visibles=16,
                               number_hiddens=20,
                               data=data,
                               visible_activation_function=AFct.Sigmoid,
                               hidden_activation_function=AFct.Sigmoid,
                               cost_function=CFct.CrossEntropyError,
                               initial_weights='AUTO',
                               initial_visible_bias='AUTO',
                               initial_hidden_bias='AUTO',
                               initial_visible_offsets='AUTO',
                               initial_hidden_offsets='AUTO',
                               dtype=numx.float64)
        self.perform_training(ae=ae,
                              data=data,
                              epsilon=0.01,
                              momentum=0.0,
                              update_visible_offsets=0.01,
                              update_hidden_offsets=0.01,
                              corruptor=None,
                              reg_L1Norm=0.0,
                              reg_L2Norm=0.0,
                              reg_sparseness=0.0,
                              desired_sparseness=0.1,
                              reg_contractive=0.1,
                              reg_slowness=0.0,
                              data_next=data_next,
                              restrict_gradient=None,
                              restriction_norm='Mat')
        # Slowness
        ae = MODEL.AutoEncoder(number_visibles=16,
                               number_hiddens=20,
                               data=data,
                               visible_activation_function=AFct.Sigmoid,
                               hidden_activation_function=AFct.Sigmoid,
                               cost_function=CFct.CrossEntropyError,
                               initial_weights='AUTO',
                               initial_visible_bias='AUTO',
                               initial_hidden_bias='AUTO',
                               initial_visible_offsets='AUTO',
                               initial_hidden_offsets='AUTO',
                               dtype=numx.float64)
        self.perform_training(ae=ae,
                              data=data,
                              epsilon=0.01,
                              momentum=0.0,
                              update_visible_offsets=0.01,
                              update_hidden_offsets=0.01,
                              corruptor=None,
                              reg_L1Norm=0.0,
                              reg_L2Norm=0.0,
                              reg_sparseness=0.0,
                              desired_sparseness=0.0,
                              reg_contractive=0.0,
                              reg_slowness=0.1,
                              data_next=data_next,
                              restrict_gradient=None,
                              restriction_norm='Mat')
        # Restrict Mat
        ae = MODEL.AutoEncoder(number_visibles=16,
                               number_hiddens=20,
                               data=data,
                               visible_activation_function=AFct.Sigmoid,
                               hidden_activation_function=AFct.Sigmoid,
                               cost_function=CFct.CrossEntropyError,
                               initial_weights='AUTO',
                               initial_visible_bias='AUTO',
                               initial_hidden_bias='AUTO',
                               initial_visible_offsets='AUTO',
                               initial_hidden_offsets='AUTO',
                               dtype=numx.float64)
        self.perform_training(ae=ae,
                              data=data,
                              epsilon=0.01,
                              momentum=0.0,
                              update_visible_offsets=0.01,
                              update_hidden_offsets=0.01,
                              corruptor=None,
                              reg_L1Norm=0.0,
                              reg_L2Norm=0.0,
                              reg_sparseness=0.0,
                              desired_sparseness=0.0,
                              reg_contractive=0.0,
                              reg_slowness=0.0,
                              data_next=data_next,
                              restrict_gradient=0.1,
                              restriction_norm='Mat')
        # Restrict rows
        ae = MODEL.AutoEncoder(number_visibles=16,
                               number_hiddens=20,
                               data=data,
                               visible_activation_function=AFct.Sigmoid,
                               hidden_activation_function=AFct.Sigmoid,
                               cost_function=CFct.CrossEntropyError,
                               initial_weights='AUTO',
                               initial_visible_bias='AUTO',
                               initial_hidden_bias='AUTO',
                               initial_visible_offsets='AUTO',
                               initial_hidden_offsets='AUTO',
                               dtype=numx.float64)
        self.perform_training(ae=ae,
                              data=data,
                              epsilon=0.01,
                              momentum=0.0,
                              update_visible_offsets=0.01,
                              update_hidden_offsets=0.01,
                              corruptor=None,
                              reg_L1Norm=0.0,
                              reg_L2Norm=0.0,
                              reg_sparseness=0.0,
                              desired_sparseness=0.0,
                              reg_contractive=0.0,
                              reg_slowness=0.0,
                              data_next=data_next,
                              restrict_gradient=0.1,
                              restriction_norm='Rows')
        # Restrict Cols
        ae = MODEL.AutoEncoder(number_visibles=16,
                               number_hiddens=20,
                               data=data,
                               visible_activation_function=AFct.Sigmoid,
                               hidden_activation_function=AFct.Sigmoid,
                               cost_function=CFct.CrossEntropyError,
                               initial_weights='AUTO',
                               initial_visible_bias='AUTO',
                               initial_hidden_bias='AUTO',
                               initial_visible_offsets='AUTO',
                               initial_hidden_offsets='AUTO',
                               dtype=numx.float64)
        self.perform_training(ae=ae,
                              data=data,
                              epsilon=0.01,
                              momentum=0.0,
                              update_visible_offsets=0.01,
                              update_hidden_offsets=0.01,
                              corruptor=None,
                              reg_L1Norm=0.0,
                              reg_L2Norm=0.0,
                              reg_sparseness=0.0,
                              desired_sparseness=0.0,
                              reg_contractive=0.0,
                              reg_slowness=0.0,
                              data_next=data_next,
                              restrict_gradient=0.1,
                              restriction_norm='Cols')
        print(' successfully passed!')
        sys.stdout.flush()
import pydeep.rbm.estimator as ESTIMATOR

import pydeep.misc.statistics as STATISTICS
import pydeep.misc.toyproblems as TOY_DATA
import pydeep.misc.visualization as VISUALIZATION
import pydeep.misc.measuring as MEASURE

# Set random seed (optional)
numx.random.seed(42)

# Input and hidden dimensionality
v1 = v2 = 3
h1 = h2 = 4

# Load data , get it from 'deeplearning.net/data/mnist/mnist.pkl.gz'
train_data = TOY_DATA.generate_bars_and_stripes_complete(v1)

# Training paramters
batch_size = train_data.shape[0]
epochs = 20000

# Create trainer and model
rbm = MODEL.BinaryBinaryRBM(number_visibles=v1 * v2,
                            number_hiddens=h1 * h2,
                            data=train_data)
trainer = TRAINER.PCD(rbm, batch_size)

# Measuring time
measurer = MEASURE.Stopwatch()

# Train model