Exemplo n.º 1
0
 def test_bn_ext_ml_three_blobs(self):
     mean_blob = np.array([1., 2.])
     variance_blob = np.array([3., 4.])
     scale_blob = np.array([
         5.,
     ])
     blobs = [mean_blob, variance_blob, scale_blob]
     res = batch_norm_ext(FakeBNProtoLayer(10), FakeModelLayer(blobs))
     exp_res = {
         'type':
         'BatchNormalization',
         'eps':
         10,
         'infer':
         copy_shape_infer,
         'mean':
         mean_blob * 0.2,
         'variance':
         variance_blob * 0.2,
         'embedded_inputs': [(1, 'gamma', {
             'bin': 'gamma'
         }), (2, 'beta', {
             'bin': 'beta'
         }), (3, 'mean', {
             'bin': 'biases'
         }), (4, 'variance', {
             'bin': 'weights'
         })]
     }
     for i in exp_res:
         if i in ('mean', 'variance'):
             np.testing.assert_array_equal(res[i], exp_res[i])
         else:
             self.assertEqual(res[i], exp_res[i])
Exemplo n.º 2
0
    def test_scale_ext(self):
        mean_blob = np.array([1., 2.])
        variance_blob = np.array([3., 4.])
        blobs = [mean_blob, variance_blob]
        params = {'type': 'Scale', 'axis': 0, 'bias_term': True}

        res = scale_ext(FakeProtoLayer(FakeMultiParam(params)),
                        FakeModelLayer(blobs))
        exp_res = {
            'op':
            'ScaleShift',
            'type':
            'ScaleShift',
            'axis':
            0,
            'infer':
            copy_shape_infer,
            'weights':
            mean_blob,
            'biases':
            variance_blob,
            'embedded_inputs': [(1, 'weights', {
                'bin': 'weights'
            }), (2, 'biases', {
                'bin': 'biases'
            })]
        }
        for i in exp_res:
            if i in ('weights', 'biases'):
                np.testing.assert_array_equal(res[i], exp_res[i])
            else:
                self.assertEqual(res[i], exp_res[i])
Exemplo n.º 3
0
 def test_inner_product_ext(self):
     params = {'num_output': 10, 'bias_term': True}
     mean_blob = np.array([1., 2.])
     variance_blob = np.array([3., 4.])
     blobs = [mean_blob, variance_blob]
     res = inner_product_ext(FakeProtoLayer(FakeMultiParam(params)),
                             FakeModelLayer(blobs))
     exp_res = {
         'type':
         'FullyConnected',
         'out-size':
         10,
         'infer':
         caffe_inner_product,
         'weights':
         mean_blob,
         'biases':
         variance_blob,
         'embedded_inputs': [(1, 'weights', {
             'bin': 'weights'
         }), (2, 'biases', {
             'bin': 'biases'
         })]
     }
     for i in exp_res:
         if i in ('weights', 'biases'):
             np.testing.assert_array_equal(res[i], exp_res[i])
         else:
             self.assertEqual(res[i], exp_res[i])
Exemplo n.º 4
0
    def test_bias(self, embed_input_mock):
        embed_input_mock.return_value = {}
        params = {'axis': 1}
        add_node = FakeNode(FakeBiasProtoLayer(FakeMultiParam(params)),
                            FakeModelLayer([1, 2, 3, 4, 5]))
        BiasToAdd.extract(add_node)

        exp_res = {'type': "Add", 'axis': 1}

        for key in exp_res.keys():
            self.assertEqual(add_node[key], exp_res[key])
Exemplo n.º 5
0
 def test_weights_biases_layer_bias(self, embed_input_mock):
     weights_biases(True, FakeModelLayer([[1, 2], [3, 4]]))
     calls = [call({}, 1, 'weights', [1, 2]), call({}, 2, 'biases', [3, 4])]
     embed_input_mock.assert_has_calls(calls)
Exemplo n.º 6
0
 def test_weights_biases_layer_no_bias(self, embed_input_mock):
     weights_biases(False, FakeModelLayer([
         [1, 2],
     ]))
     calls = [call({}, 1, 'weights', [1, 2])]
     embed_input_mock.assert_has_calls(calls)
Exemplo n.º 7
0
 def test_bn_ext_ml_one_blob(self):
     self.assertRaises(AssertionError, batch_norm_ext, FakeBNProtoLayer(10),
                       FakeModelLayer([np.array([1, 2])]))