Esempio n. 1
0
 def test_DropoutPerLayer(self):
     nn = MLPR(layers=[L("Rectifier", units=8, dropout=0.25), L("Linear")],
               regularize='dropout',
               n_iter=1)
     assert_equal(nn.regularize, 'dropout')
     self._run(nn)
     assert_in('Using `dropout` for regularization.', self.output.getvalue())
    def run_EqualityTest(self, copier, asserter):
        for activation in ["Rectifier", "Sigmoid", "Maxout", "Tanh"]:
            nn1 = MLPR(layers=[L(activation, units=16, pieces=2), L("Linear", units=1)], random_state=1234)
            nn1._initialize(self.a_in, self.a_out)

            nn2 = copier(nn1, activation)
            asserter(numpy.all(nn1.predict(self.a_in) == nn2.predict(self.a_in)))
Esempio n. 3
0
 def test_RegularizeExplicitL1(self):
     nn = MLPR(layers=[L("Tanh", units=8), L("Linear",)],
               regularize='L1',
               n_iter=1)
     assert_equal(nn.regularize, 'L1')
     self._run(nn)
     assert_in('Using `L1` for regularization.', self.output.getvalue())
Esempio n. 4
0
 def test_RegularizeCustomParam(self):
     nn = MLPR(layers=[L("Tanh", units=8), L("Linear",)],
               weight_decay=0.01,
               n_iter=1)
     assert_equal(nn.weight_decay, 0.01)
     self._run(nn)
     assert_in('Using `L2` for regularization.', self.output.getvalue())
Esempio n. 5
0
 def test_DropoutPerLayer(self):
     nn = MLPR(layers=[L("Maxout", units=8, pieces=2, dropout=0.25), L("Linear")],
               regularize='dropout',
               n_iter=1)
     assert_equal(nn.regularize, 'dropout')
     self._run(nn)
     assert_true(nn.cost is not None)
Esempio n. 6
0
 def test_DropoutExplicit(self):
     nn = MLPR(layers=[L("Tanh", units=8), L("Linear",)],
               regularize='dropout',
               n_iter=1)
     assert_equal(nn.regularize, 'dropout')
     self._run(nn)
     assert_in('Using `dropout` for regularization.', self.output.getvalue())
Esempio n. 7
0
 def test_DropoutExplicit(self):
     nn = MLPR(layers=[L("Tanh", units=8), L("Linear",)],
               regularize='dropout',
               n_iter=1)
     assert_equal(nn.regularize, 'dropout')
     self._run(nn)
     assert_true(nn.cost is not None)
Esempio n. 8
0
 def test_RegularizeCustomParam(self):
     nn = MLPR(layers=[L("Tanh", units=8), L("Linear",)],
               weight_decay=0.01,
               n_iter=1)
     assert_equal(nn.weight_decay, 0.01)
     self._run(nn)
     assert_true(nn.cost is not None)
Esempio n. 9
0
 def test_RegularizeExplicitL2(self):
     nn = MLPR(layers=[L("Sigmoid", units=8), L("Softmax",)],
               regularize='L2',
               n_iter=1)
     assert_equal(nn.regularize, 'L2')
     self._run(nn)
     assert_in('Using `L2` for regularization.', self.output.getvalue())
Esempio n. 10
0
 def test_DropoutAsFloat(self):
     nn = MLPR(layers=[L("Tanh", units=8), L("Linear",)],
               dropout_rate=0.25,
               n_iter=1)
     assert_equal(nn.regularize, 'dropout')
     assert_equal(nn.dropout_rate, 0.25)
     self._run(nn)
     assert_true(nn.cost is not None)
Esempio n. 11
0
 def test_GaussianNoise(self):
     nn = MLPR(layers=[
         L("Rectifier", units=12),
         N(ly.GaussianNoiseLayer),
         L("Linear")
     ],
               n_iter=1)
     self._run(nn)
Esempio n. 12
0
 def test_DropoutAsFloat(self):
     nn = MLPR(layers=[L("Tanh", units=8), L("Linear",)],
               dropout_rate=0.25,
               n_iter=1)
     assert_equal(nn.regularize, 'dropout')
     assert_equal(nn.dropout_rate, 0.25)
     self._run(nn)
     assert_in('Using `dropout` for regularization.', self.output.getvalue())
Esempio n. 13
0
 def test_RandomLayerParams(self):
     clf = RandomizedSearchCV(
         self.__estimator__(layers=[L("Softmax", units=12),
                                    L("Linear")],
                            n_iter=1),
         param_distributions={'hidden0__units': randint(4, 12)},
         n_iter=2)
     clf.fit(self.a_in, self.a_out)
Esempio n. 14
0
 def test_AutomaticRegularize(self):
     nn = MLPR(
         layers=[L("Tanh", units=8, weight_decay=0.0001),
                 L("Linear")],
         n_iter=1)
     self._run(nn)
     assert_in('Using `L2` for regularization, auto-enabled from layers.',
               self.output.getvalue())
Esempio n. 15
0
 def ctor(_, activation):
     nn = MLPR(layers=[
         L(activation, units=16, pieces=2),
         L("Linear", units=1)
     ],
               random_state=1234)
     nn._initialize(self.a_in, self.a_out)
     return nn
Esempio n. 16
0
 def setUp(self):
     self.nn = MLPR(layers=[
         L("Rectifier", units=16),
         L("Sigmoid", dropout=0.2, units=12),
         L("ExpLin", weight_decay=0.001, units=8),
         L("Tanh", normalize='batch', units=4),
         L("Linear")
     ],
                    n_iter=1)
Esempio n. 17
0
 def setUp(self):
     self.nn = MLPR(
         layers=[
             L("Rectifier", units=16),
             L("Sigmoid", units=12),
             L("ExpLin", units=8),
             L("Tanh", units=4),
             L("Linear")],
         n_iter=1)
Esempio n. 18
0
    def test_RegressorLayerParams(self):
        a_in = numpy.random.uniform(0.0, 1.0, (64, 16))
        a_out = numpy.zeros((64, 1))

        clf = GridSearchCV(MLPR(layers=[L("Rectifier", units=12),
                                        L("Linear")],
                                n_iter=1),
                           param_grid={'hidden0__units': [4, 8, 12]})
        clf.fit(a_in, a_out)
Esempio n. 19
0
 def test_RegularizePerLayer(self):
     nn = MLPR(layers=[
         L("Rectifier", units=8, weight_decay=0.01),
         L("Linear", weight_decay=0.001)
     ],
               n_iter=1)
     self._run(nn)
     assert_in('Using `L2` for regularization, auto-enabled from layers.',
               self.output.getvalue())
Esempio n. 20
0
 def test_RandomMultipleJobs(self):
     clf = RandomizedSearchCV(
         self.__estimator__(
             layers=[L("Sigmoid", units=12),
                     L(self.__output__)], n_iter=1),
         param_distributions={'hidden0__units': randint(4, 12)},
         n_iter=4,
         n_jobs=4)
     clf.fit(self.a_in, self.a_out)
Esempio n. 21
0
 def setUp(self):
     self.nn = MLPR(layers=[
         L("Rectifier", units=16),
         L("Sigmoid", units=12),
         L("Maxout", units=16, pieces=2),
         L("Tanh", units=4),
         L("Linear")
     ],
                    n_iter=1)
Esempio n. 22
0
 def make(self, activation, seed=1234, train=False, **keywords):
     nn = MLPR(
         layers=[L(activation, units=16, **keywords),
                 L("Linear", units=1)],
         random_state=seed,
         n_iter=1)
     if train:
         nn.fit(self.a_in, self.a_out)
     else:
         nn._initialize(self.a_in, self.a_out)
     return nn
Esempio n. 23
0
    def test_BatchNormPerLayer(self):
        nn = MLPR(layers=[
            C("Sigmoid", normalize='batch', channels=2, kernel_shape=(3, 3)),
            L("Rectifier", normalize='batch', units=8),
            L("Linear", )
        ],
                  n_iter=1)
        self._run(nn)
        assert_in('Using `batch` normalization, auto-enabled from layers.',
                  self.output.getvalue())

        assert_in('Reshaping input array', self.buf.getvalue())
        self.buf = io.StringIO()
Esempio n. 24
0
    def test_TrainConstantOneEpoch(self):
        for t in ['csr_matrix', 'csc_matrix']:
            sparse_matrix = getattr(scipy.sparse, t)
            X_s, y_s = sparse_matrix((8, 16), dtype=numpy.float32), sparse_matrix((8, 16), dtype=numpy.float32)
            X, y = X_s.toarray(), y_s.toarray()
            
            nn1 = MLP(layers=[L("Linear")], n_iter=1, random_state=1234)
            nn1._fit(X, y)

            nn2 = MLP(layers=[L("Linear")], n_iter=1, random_state=1234)
            nn2._fit(X_s, y_s)

            assert_true(numpy.all(nn1._predict(X_s) == nn1._predict(X_s)))
Esempio n. 25
0
    def run_EqualityTest(self, copier, asserter):
        # Only PyLearn2 supports Maxout.
        extra =  ["Maxout"] if sknn.backend.name == 'pylearn2' else []
        for activation in ["Rectifier", "Sigmoid", "Tanh", "ExpLin"] + extra:
            nn1 = MLPR(layers=[L(activation, units=16), L("Linear", units=1)], random_state=1234)
            nn1._initialize(self.a_in, self.a_out)

            nn2 = copier(nn1, activation)
            print('activation', activation)
            a_out1 = nn1.predict(self.a_in)
            a_out2 = nn2.predict(self.a_in)
            print(a_out1, a_out2)
            asserter(numpy.all(nn1.predict(self.a_in) - nn2.predict(self.a_in) < 1E-6))
Esempio n. 26
0
    def test_SetLayerParamsDict(self):
        nn = MLPR(layers=[L("Sigmoid", units=32), L("Linear", name='abcd')])
        a_in, a_out = numpy.zeros((8, 16)), numpy.zeros((8, 4))
        nn._initialize(a_in, a_out)

        weights = numpy.random.uniform(-1.0, +1.0, (32, 4))
        biases = numpy.random.uniform(-1.0, +1.0, (4, ))
        nn.set_parameters({'abcd': (weights, biases)})

        p = nn.get_parameters()
        assert_true((
            p[1].weights.astype('float32') == weights.astype('float32')).all())
        assert_true(
            (p[1].biases.astype('float32') == biases.astype('float32')).all())
Esempio n. 27
0
    def test_BatchNormExplicit(self):
        nn = MLPR(layers=[
            C("Tanh", channels=2, kernel_shape=(3, 3)),
            L("Sigmoid", units=8),
            L("Linear", )
        ],
                  normalize='batch',
                  n_iter=1)
        assert_equal(nn.normalize, 'batch')
        self._run(nn)
        assert_in('Using `batch` normalization.', self.output.getvalue())

        assert_in('Reshaping input array', self.buf.getvalue())
        self.buf = io.StringIO()
Esempio n. 28
0
 def test_HorizontalKernel(self):
     self._run(
         MLPR(layers=[
             C("Rectifier", channels=4, kernel_shape=(1, 16)),
             L("Linear")
         ],
              n_iter=1))
Esempio n. 29
0
 def test_VerboseClassifier(self):
     nn = MLPC(layers=[L("Softmax")], verbose=1, n_iter=1)
     a_in, a_out = numpy.zeros((8,16)), numpy.zeros((8,1), dtype=numpy.int32)
     nn.fit(a_in, a_out)
     assert_in("Epoch       Training Error       Validation Error       Time", self.buf.getvalue())
     assert_in("    1       ", self.buf.getvalue())
     assert_in("    N/A     ", self.buf.getvalue())
Esempio n. 30
0
 def test_VerboseRegressor(self):
     nn = MLPR(layers=[L("Linear")], verbose=1, n_iter=1)
     a_in, a_out = numpy.zeros((8,16)), numpy.zeros((8,4))
     nn.fit(a_in, a_out)
     assert_in("Epoch       Training Error       Validation Error       Time", self.buf.getvalue())
     assert_in("    1       ", self.buf.getvalue())
     assert_in("    N/A     ", self.buf.getvalue())