def testGroupConvolutionGrad(self):
     np.random.seed(1701)
     output_blob = base.Blob()
     checker = gradcheck.GradChecker(1e-3)
     shapes = [(1, 5, 5, 4)]
     num_kernels = 1
     group = 2
     params = [(3, 1, 'valid'), (3, 1, 'same'), (3, 1, 'full'),
               (2, 1, 'valid'), (2, 1, 'full'), (3, 2, 'valid'),
               (3, 2, 'same'), (3, 2, 'full')]
     for shape in shapes:
         for ksize, stride, mode in params:
             print(ksize, stride, mode, shape)
             input_blob = base.Blob(shape,
                                    filler=fillers.GaussianRandFiller())
             layer = core_layers.GroupConvolutionLayer(
                 name='gconv',
                 ksize=ksize,
                 stride=stride,
                 mode=mode,
                 num_kernels=num_kernels,
                 group=group,
                 filler=fillers.GaussianRandFiller())
             result = checker.check(layer, [input_blob], [output_blob])
             self.assertEqual(output_blob.data().shape[-1],
                              num_kernels * group)
             print(result)
             self.assertTrue(result[0])
     # check if we will be able to produce an exception
     input_blob = base.Blob((1, 5, 5, 3),
                            filler=fillers.GaussianRandFiller())
     self.assertRaises(RuntimeError, checker.check, layer, [input_blob],
                       [output_blob])
Exemple #2
0
 def testSquaredLossGrad(self):
     np.random.seed(1701)
     shapes = [(4, 3), (1, 10), (4, 3, 2)]
     layer = core_layers.SquaredLossLayer(name='squared')
     checker = gradcheck.GradChecker(1e-6)
     for shape in shapes:
         input_blob = base.Blob(shape, filler=fillers.GaussianRandFiller())
         target_blob = base.Blob(shape, filler=fillers.GaussianRandFiller())
         result = checker.check(layer, [input_blob, target_blob], [],
                                check_indices=[0])
         print(result)
         self.assertTrue(result[0])
         # also, check if weight works.
         self._testWeight(layer, [input_blob, target_blob])
Exemple #3
0
 def _testSolver(self, solver):
     # We are going to test if the solver correctly deals with the mpi case
     # where multiple nodes host different data. To this end we will
     # create a dummy regression problem which, when run under mpi with
     # >1 nodes, will create a different result from a single-node run.
     np.random.seed(1701)
     X = base.Blob((10, 1),
                   filler=fillers.GaussianRandFiller(mean=mpi.RANK,
                                                     std=0.01))
     Y = base.Blob((10, 1),
                   filler=fillers.ConstantFiller(value=mpi.RANK + 1.))
     decaf_net = base.Net()
     decaf_net.add_layer(core_layers.InnerProductLayer(name='ip',
                                                       num_output=1),
                         needs='X',
                         provides='pred')
     decaf_net.add_layer(core_layers.SquaredLossLayer(name='loss'),
                         needs=['pred', 'Y'])
     decaf_net.finish()
     solver.solve(decaf_net, previous_net={'X': X, 'Y': Y})
     w, b = decaf_net.layers['ip'].param()
     print w.data(), b.data()
     if mpi.SIZE == 1:
         # If size is 1, we are fitting y = 0 * x + 1
         np.testing.assert_array_almost_equal(w.data(), 0., 2)
         np.testing.assert_array_almost_equal(b.data(), 1., 2)
     else:
         # if size is not 1, we are fitting y = x + 1
         np.testing.assert_array_almost_equal(w.data(), 1., 2)
         np.testing.assert_array_almost_equal(b.data(), 1., 2)
     self.assertTrue(True)
Exemple #4
0
    def testMultinomialLogisticLossGrad(self):
        np.random.seed(1701)
        layer = core_layers.MultinomialLogisticLossLayer(name='loss')
        checker = gradcheck.GradChecker(1e-6)
        shape = (10, 5)
        # check index input
        input_blob = base.Blob(shape, filler=fillers.GaussianRandFiller())
        target_blob = base.Blob(shape[:1],
                                dtype=np.int,
                                filler=fillers.RandIntFiller(high=shape[1]))
        result = checker.check(layer, [input_blob, target_blob], [],
                               check_indices=[0])
        print(result)
        self.assertTrue(result[0])
        # also, check if weight works.
        self._testWeight(layer, [input_blob, target_blob])

        # check full input
        target_blob = base.Blob(shape, filler=fillers.RandFiller())
        # normalize target
        target_data = target_blob.data()
        target_data /= target_data.sum(1)[:, np.newaxis]
        result = checker.check(layer, [input_blob, target_blob], [],
                               check_indices=[0])
        print(result)
        self.assertTrue(result[0])
        # also, check if weight works.
        self._testWeight(layer, [input_blob, target_blob])
Exemple #5
0
 def testConvolutionGrad(self):
     np.random.seed(1701)
     output_blob = base.Blob()
     checker = gradcheck.GradChecker(1e-4)
     shapes = [(1,5,5,1), (1,5,5,3)]
     num_kernels = 2
     params = [(3,1,'valid'), (3,1,'same'), (3,1,'full'), (2,1,'valid'), (2,1,'full'),
               (3,2,'valid'), (3,2,'same'), (3,2,'full')]
     for shape in shapes:
         for ksize, stride, mode in params:
             print(ksize, stride, mode, shape)
             input_blob = base.Blob(shape, filler=fillers.GaussianRandFiller())
             layer = core_layers.ConvolutionLayer(
                 name='conv', ksize=ksize, stride=stride, mode=mode,
                 num_kernels=num_kernels,
                 filler=fillers.GaussianRandFiller())
             result = checker.check(layer, [input_blob], [output_blob])
             print(result)
             self.assertTrue(result[0])
Exemple #6
0
 def testReLUGrad(self):
     np.random.seed(1701)
     shapes = [(4, 3), (1, 10), (2, 5, 5, 1), (2, 5, 5, 3)]
     output_blob = base.Blob()
     layer = core_layers.ReLULayer(name='relu')
     checker = gradcheck.GradChecker(1e-5)
     for shape in shapes:
         input_blob = base.Blob(shape, filler=fillers.GaussianRandFiller())
         result = checker.check(layer, [input_blob], [output_blob])
         print(result)
         self.assertTrue(result[0])
Exemple #7
0
    def testDropoutGrad(self):
        np.random.seed(1701)
        input_blob = base.Blob((4, 3), filler=fillers.GaussianRandFiller())
        output_blob = base.Blob()
        checker = gradcheck.GradChecker(1e-5)

        layer = core_layers.DropoutLayer(name='dropout',
                                         ratio=0.5,
                                         debug_freeze=True)
        result = checker.check(layer, [input_blob], [output_blob])
        print(result)
        self.assertTrue(result[0])
 def testSplitGrad(self):
     np.random.seed(1701)
     output_blobs = [base.Blob(), base.Blob()]
     checker = gradcheck.GradChecker(1e-5)
     shapes = [(5, 4), (5, 1), (1, 5), (1, 5, 5), (1, 5, 5, 3),
               (1, 5, 5, 1)]
     for shape in shapes:
         input_blob = base.Blob(shape, filler=fillers.GaussianRandFiller())
         layer = base.SplitLayer(name='split')
         result = checker.check(layer, [input_blob], output_blobs)
         print(result)
         self.assertTrue(result[0])
Exemple #9
0
 def testMeanNormalizeLayer(self):
     np.random.seed(1701)
     output_blob = base.Blob()
     checker = gradcheck.GradChecker(1e-5)
     shapes = [(1,5,5,1), (1,5,5,3), (5,5), (1,5)]
     for shape in shapes:
         input_blob = base.Blob(shape, filler=fillers.GaussianRandFiller())
         layer = core_layers.MeanNormalizeLayer(
             name='normalize')
         result = checker.check(layer, [input_blob], [output_blob])
         print(result)
         self.assertTrue(result[0])
 def testPaddingGrad(self):
     np.random.seed(1701)
     output_blob = base.Blob()
     checker = gradcheck.GradChecker(1e-5)
     shapes = [(1, 5, 5, 1), (1, 5, 5, 3), (1, 4, 3, 1), (1, 4, 3, 3)]
     pads = [1, 2, 3]
     for pad in pads:
         for shape in shapes:
             input_blob = base.Blob(shape,
                                    filler=fillers.GaussianRandFiller())
             layer = core_layers.PaddingLayer(name='padding', pad=pad)
             result = checker.check(layer, [input_blob], [output_blob])
             print(result)
             self.assertTrue(result[0])
 def testSoftmaxGrad(self):
     np.random.seed(1701)
     input_blob = base.Blob((10,5), filler=fillers.GaussianRandFiller())
     output_blob = base.Blob()
     layer = core_layers.SoftmaxLayer(name='softmax')
     checker = gradcheck.GradChecker(1e-5)
     result = checker.check(layer, [input_blob], [output_blob])
     print(result)
     self.assertTrue(result[0])
     # Also, let's check the result
     pred = input_blob.data()
     prob = np.exp(pred) / np.exp(pred).sum(1)[:, np.newaxis]
     np.testing.assert_array_almost_equal(
         output_blob.data(), prob)
Exemple #12
0
 def testLogisticLossGrad(self):
     np.random.seed(1701)
     layer = core_layers.LogisticLossLayer(name='logistic')
     checker = gradcheck.GradChecker(1e-6)
     input_blob = base.Blob((10, 1), filler=fillers.GaussianRandFiller())
     target_blob = base.Blob((10, ),
                             dtype=np.int,
                             filler=fillers.RandIntFiller(high=2))
     result = checker.check(layer, [input_blob, target_blob], [],
                            check_indices=[0])
     print(result)
     self.assertTrue(result[0])
     # also, check if weight works.
     self._testWeight(layer, [input_blob, target_blob])
Exemple #13
0
 def testIm2colGrad(self):
     np.random.seed(1701)
     output_blob = base.Blob()
     checker = gradcheck.GradChecker(1e-4)
     shapes = [(1, 5, 5, 1), (1, 5, 5, 3), (1, 4, 3, 1), (1, 4, 3, 3)]
     params = [(2, 1), (2, 2), (3, 1), (3, 2)]
     for psize, stride in params:
         for shape in shapes:
             input_blob = base.Blob(shape,
                                    filler=fillers.GaussianRandFiller())
             layer = core_layers.Im2colLayer(name='im2col',
                                             psize=psize,
                                             stride=stride)
             result = checker.check(layer, [input_blob], [output_blob])
             print(result)
             self.assertTrue(result[0])
Exemple #14
0
 def testPoolingGrad(self):
     np.random.seed(1701)
     output_blob = base.Blob()
     checker = gradcheck.GradChecker(1e-4)
     shapes = [(1, 7, 7, 1), (2, 7, 7, 1), (1, 7, 7, 3), (1, 8, 8, 3),
               (1, 13, 13, 1), (1, 13, 13, 2)]
     params = [(3, 2, 'max'), (3, 2, 'ave'), (3, 3, 'max'), (3, 3, 'ave'),
               (5, 3, 'max'), (5, 3, 'ave'), (5, 5, 'max'), (5, 5, 'ave')]
     for shape in shapes:
         for psize, stride, mode in params:
             print(psize, stride, mode, shape)
             input_blob = base.Blob(shape,
                                    filler=fillers.GaussianRandFiller())
             layer = core_layers.PoolingLayer(name='pool',
                                              psize=psize,
                                              stride=stride,
                                              mode=mode)
             result = checker.check(layer, [input_blob], [output_blob])
             print(result)
             self.assertTrue(result[0])
Exemple #15
0
 def testIm2col(self):
     np.random.seed(1701)
     output_blob = base.Blob()
     shapes = [(1, 5, 5, 1), (1, 5, 5, 3), (1, 4, 3, 1), (1, 4, 3, 3),
               (3, 5, 5, 1), (3, 5, 5, 3), (3, 4, 3, 1), (3, 4, 3, 3)]
     params = [(2, 1), (2, 2), (3, 1), (3, 2)]
     for psize, stride in params:
         for shape in shapes:
             input_blob = base.Blob(shape,
                                    filler=fillers.GaussianRandFiller())
             layer = core_layers.Im2colLayer(name='im2col',
                                             psize=psize,
                                             stride=stride)
             layer.forward([input_blob], [output_blob])
             # compare against naive implementation
             for i in range(0, shape[1] - psize - 1, stride):
                 for j in range(0, shape[2] - psize - 1, stride):
                     np.testing.assert_array_almost_equal(
                         output_blob.data()[:, i, j].flatten(),
                         input_blob.data()[:, i * stride:i * stride + psize,
                                           j * stride:j * stride +
                                           psize, :].flatten())
Exemple #16
0
 def testPoolingGrad(self):
     np.random.seed(1701)
     output_blob = base.Blob()
     input_blob = base.Blob((1, 8, 8, 3),
                            filler=fillers.GaussianRandFiller())
     psize = 3
     stride = 2
     mode = 'max'
     layer = core_layers.PoolingLayer(name='pool',
                                      psize=psize,
                                      stride=stride,
                                      mode=mode)
     layer.forward([input_blob], [output_blob])
     img = input_blob.data()[0]
     output = output_blob.data()[0]
     print img.shape, output.shape
     for i in range(output.shape[0]):
         for j in range(output.shape[1]):
             for c in range(output.shape[2]):
                 self.assertAlmostEqual(
                     output[i, j, c],
                     img[i * stride:i * stride + psize,
                         j * stride:j * stride + psize, c].max())
     mode = 'ave'
     layer = core_layers.PoolingLayer(name='pool',
                                      psize=psize,
                                      stride=stride,
                                      mode=mode)
     layer.forward([input_blob], [output_blob])
     img = input_blob.data()[0]
     output = output_blob.data()[0]
     print img.shape, output.shape
     for i in range(output.shape[0]):
         for j in range(output.shape[1]):
             for c in range(output.shape[2]):
                 self.assertAlmostEqual(
                     output[i, j, c],
                     img[i * stride:i * stride + psize,
                         j * stride:j * stride + psize, c].mean())
    def testInnerproductGrad(self):
        np.random.seed(1701)
        input_blob = base.Blob((4, 3), filler=fillers.GaussianRandFiller())
        output_blob = base.Blob()
        checker = gradcheck.GradChecker(1e-5)

        ip_layer = core_layers.InnerProductLayer(
            name='ip',
            num_output=5,
            bias=True,
            filler=fillers.GaussianRandFiller(),
            bias_filler=fillers.GaussianRandFiller(),
            reg=None)
        result = checker.check(ip_layer, [input_blob], [output_blob])
        print(result)
        self.assertTrue(result[0])

        ip_layer = core_layers.InnerProductLayer(
            name='ip',
            num_output=5,
            bias=False,
            filler=fillers.GaussianRandFiller(),
            reg=None)
        result = checker.check(ip_layer, [input_blob], [output_blob])
        print(result)
        self.assertTrue(result[0])

        ip_layer = core_layers.InnerProductLayer(
            name='ip',
            num_output=5,
            bias=True,
            filler=fillers.GaussianRandFiller(),
            bias_filler=fillers.GaussianRandFiller(),
            reg=regularization.L2Regularizer(weight=0.1))
        result = checker.check(ip_layer, [input_blob], [output_blob])
        print(result)
        self.assertTrue(result[0])
def main():
    logging.getLogger().setLevel(logging.INFO)
    ######################################
    # First, let's create the decaf layer.
    ######################################
    logging.info('Loading data and creating the network...')
    decaf_net = base.Net()
    # add data layer
    dataset = mnist.MNISTDataLayer(name='mnist',
                                   rootfolder=ROOT_FOLDER,
                                   is_training=True)
    decaf_net.add_layer(dataset, provides=['image-all', 'label-all'])
    # add minibatch layer for stochastic optimization
    minibatch_layer = core_layers.BasicMinibatchLayer(name='batch',
                                                      minibatch=MINIBATCH)
    decaf_net.add_layer(minibatch_layer,
                        needs=['image-all', 'label-all'],
                        provides=['image', 'label'])
    # add the two_layer network
    decaf_net.add_layers([
        core_layers.FlattenLayer(name='flatten'),
        core_layers.InnerProductLayer(
            name='ip1',
            num_output=NUM_NEURONS,
            filler=fillers.GaussianRandFiller(std=0.1),
            bias_filler=fillers.ConstantFiller(value=0.1)),
        core_layers.ReLULayer(name='relu1'),
        core_layers.InnerProductLayer(
            name='ip2',
            num_output=NUM_CLASS,
            filler=fillers.GaussianRandFiller(std=0.3))
    ],
                         needs='image',
                         provides='prediction')
    # add loss layer
    loss_layer = core_layers.MultinomialLogisticLossLayer(name='loss')
    decaf_net.add_layer(loss_layer, needs=['prediction', 'label'])
    # finish.
    decaf_net.finish()
    ####################################
    # Decaf layer finished construction!
    ####################################

    # now, try to solve it
    if METHOD == 'adagrad':
        # The Adagrad Solver
        solver = core_solvers.AdagradSolver(base_lr=0.02,
                                            base_accum=1.e-6,
                                            max_iter=1000)
    elif METHOD == 'sgd':
        solver = core_solvers.SGDSolver(base_lr=0.1,
                                        lr_policy='inv',
                                        gamma=0.001,
                                        power=0.75,
                                        momentum=0.9,
                                        max_iter=1000)
    solver.solve(decaf_net)
    visualize.draw_net_to_file(decaf_net, 'mnist.png')
    decaf_net.save('mnist_2layers.decafnet')

    ##############################################
    # Now, let's load the net and run predictions
    ##############################################
    prediction_net = base.Net.load('mnist_2layers.decafnet')
    visualize.draw_net_to_file(prediction_net, 'mnist_test.png')
    # obtain the test data.
    dataset_test = mnist.MNISTDataLayer(name='mnist',
                                        rootfolder=ROOT_FOLDER,
                                        is_training=False)
    test_image = base.Blob()
    test_label = base.Blob()
    dataset_test.forward([], [test_image, test_label])
    # Run the net.
    pred = prediction_net.predict(image=test_image)['prediction']
    accuracy = (pred.argmax(1) == test_label.data()).sum() / float(
        test_label.data().size)
    print 'Testing accuracy:', accuracy
    print 'Done.'