예제 #1
0
    def testKLDivergenceLossGrad(self):
        np.random.seed(1701)
        layer = core_layers.KLDivergenceLossLayer(name='loss')
        checker = gradcheck.GradChecker(1e-6)
        shape = (4, 5)
        # For the input, we make sure it is not too close to 0 (which would
        # create numerical issues).
        input_blob = base.Blob(shape,
                               filler=fillers.RandFiller(min=0.1, max=0.9))
        # normalize input blob
        input_data = input_blob.data()
        input_data /= input_data.sum(1)[:, np.newaxis]
        # check index input
        target_blob = base.Blob(shape[:1],
                                dtype=np.int,
                                filler=fillers.RandIntFiller(high=shape[1]))
        result = checker.check(layer, [input_blob, target_blob], [],
                               check_indices=[0])
        print(result)
        self.assertTrue(result[0])
        # also, check if weight works.
        self._testWeight(layer, [input_blob, target_blob])

        # check full input
        target_blob = base.Blob(shape, filler=fillers.RandFiller())
        # normalize target
        target_data = target_blob.data()
        target_data /= target_data.sum(1)[:, np.newaxis]
        result = checker.check(layer, [input_blob, target_blob], [],
                               check_indices=[0])
        print(result)
        self.assertTrue(result[0])
        # also, check if weight works.
        self._testWeight(layer, [input_blob, target_blob])
예제 #2
0
 def testIdentityLayer(self):
     layer = identity.IdentityLayer(name='identity')
     np.random.seed(1701)
     filler = fillers.RandFiller()
     bottom = base.Blob((100, 4), filler=filler)
     top = base.Blob()
     # run the dropout layer
     layer.forward([bottom], [top])
     # simulate a diff
     fillers.RandFiller().fill(top.init_diff())
     layer.backward([bottom], [top], True)
     np.testing.assert_array_equal(top.data(), bottom.data())
     np.testing.assert_array_equal(top.diff(), bottom.diff())
예제 #3
0
    def testMultinomialLogisticLossGrad(self):
        np.random.seed(1701)
        layer = core_layers.MultinomialLogisticLossLayer(name='loss')
        checker = gradcheck.GradChecker(1e-6)
        shape = (10, 5)
        # check index input
        input_blob = base.Blob(shape, filler=fillers.GaussianRandFiller())
        target_blob = base.Blob(shape[:1],
                                dtype=np.int,
                                filler=fillers.RandIntFiller(high=shape[1]))
        result = checker.check(layer, [input_blob, target_blob], [],
                               check_indices=[0])
        print(result)
        self.assertTrue(result[0])
        # also, check if weight works.
        self._testWeight(layer, [input_blob, target_blob])

        # check full input
        target_blob = base.Blob(shape, filler=fillers.RandFiller())
        # normalize target
        target_data = target_blob.data()
        target_data /= target_data.sum(1)[:, np.newaxis]
        result = checker.check(layer, [input_blob, target_blob], [],
                               check_indices=[0])
        print(result)
        self.assertTrue(result[0])
        # also, check if weight works.
        self._testWeight(layer, [input_blob, target_blob])
예제 #4
0
 def testLocalResponseNormalizeLayer(self):
     np.random.seed(1701)
     output_blob = base.Blob()
     checker = gradcheck.GradChecker(1e-6)
     shapes = [(1,10), (5,10)]
     alphas = [1.0, 2.0]
     betas = [0.75, 1.0]
     for shape in shapes:
         for alpha in alphas:
             for beta in betas:
                 input_blob = base.Blob(shape, filler=fillers.RandFiller())
                 # odd size
                 layer = core_layers.LocalResponseNormalizeLayer(
                     name='normalize', k = 1., alpha=alpha, beta=beta, size=5)
                 result = checker.check(layer, [input_blob], [output_blob])
                 print(result)
                 self.assertTrue(result[0])
                 layer = core_layers.LocalResponseNormalizeLayer(
                     name='normalize', k = 2., alpha=alpha, beta=beta, size=5)
                 result = checker.check(layer, [input_blob], [output_blob])
                 print(result)
                 self.assertTrue(result[0])
                 # even size
                 layer = core_layers.LocalResponseNormalizeLayer(
                     name='normalize', k = 1., alpha=alpha, beta=beta, size=6)
                 result = checker.check(layer, [input_blob], [output_blob])
                 print(result)
                 self.assertTrue(result[0])
예제 #5
0
 def testResponseNormalizeLayer(self):
     np.random.seed(1701)
     output_blob = base.Blob()
     checker = gradcheck.GradChecker(1e-5)
     shapes = [(1,5,5,1), (1,5,5,3), (5,5), (1,5)]
     for shape in shapes:
         input_blob = base.Blob(shape,
                                filler=fillers.RandFiller(min=0.1, max=1.))
         layer = core_layers.ResponseNormalizeLayer(
             name='normalize')
         result = checker.check(layer, [input_blob], [output_blob])
         print(result)
         self.assertTrue(result[0])
예제 #6
0
 def testAutoencoderLossGrad(self):
     np.random.seed(1701)
     shapes = [(4, 3), (1, 10), (4, 3, 2)]
     layer = core_layers.AutoencoderLossLayer(name='loss', ratio=0.5)
     checker = gradcheck.GradChecker(1e-5)
     for shape in shapes:
         input_blob = base.Blob(shape,
                                filler=fillers.RandFiller(min=0.05,
                                                          max=0.95))
         result = checker.check(layer, [input_blob], [])
         print(result)
         self.assertTrue(result[0])
         # also, check if weight works.
         self._testWeight(layer, [input_blob])
예제 #7
0
 def testdropoutlayer(self):
     layer = dropout.DropoutLayer(name='dropout', ratio=0.5)
     np.random.seed(1701)
     filler = fillers.RandFiller(min=1, max=2)
     bottom = base.Blob((100, 4), filler=filler)
     top = base.Blob()
     # run the dropout layer
     layer.forward([bottom], [top])
     # simulate a diff
     fillers.RandFiller().fill(top.init_diff())
     layer.backward([bottom], [top], True)
     np.testing.assert_array_equal(top.data()[top.data() != 0] * 0.5,
                                   bottom.data()[top.data() != 0])
     np.testing.assert_array_equal(bottom.diff()[top.data() == 0], 0)
     np.testing.assert_array_equal(bottom.diff()[top.data() != 0],
                                   top.diff()[top.data() != 0] * 2.)
     # test if debug_freeze works
     layer = dropout.DropoutLayer(name='dropout',
                                  ratio=0.5,
                                  debug_freeze=True)
     layer.forward([bottom], [top])
     snapshot = top.data().copy()
     layer.forward([bottom], [top])
     np.testing.assert_array_equal(snapshot, top.data())