Esempio n. 1
0
    def test_add_layer_top(self):
        # Drop output layer so that layers can be added to top
        self.mh.drop_layer_top()
        layer1 = layer_factory.FullyConnected(name='fc1', num_hidden=5)
        assert 'fc1' not in list(self.mh.layer_type_dict.keys())

        outputs_pre = self.mh.symbol.get_internals().list_outputs()
        self.mh.add_layer_top([layer1])
        outputs_post = self.mh.symbol.get_internals().list_outputs()

        assert 'fc1' in list(self.mh.layer_type_dict.keys())
        assert outputs_post == outputs_pre + [
            'fc1_weight', 'fc1_bias', 'fc1_output'
        ]
Esempio n. 2
0
    def test_add_layer_bottom_list(self):
        layer1 = layer_factory.Convolution(name='conv1_1',
                                           kernel=(3, 3),
                                           num_filter=10)
        layer2 = layer_factory.FullyConnected(name='fc1', num_hidden=10)
        for layer_name in ['fc1', 'conv1_1']:
            assert layer_name not in list(self.mh.layer_type_dict.keys())

        outputs_pre = self.mh.symbol.get_internals().list_outputs()
        self.mh.add_layer_bottom([layer1, layer2])
        outputs_post = self.mh.symbol.get_internals().list_outputs()

        for layer_name in ['fc1', 'conv1_1']:
            assert layer_name in list(self.mh.layer_type_dict.keys())
        assert outputs_post == [
            self.data_name, 'conv1_1_weight', 'conv1_1_bias', 'conv1_1_output',
            'fc1_weight', 'fc1_bias', 'fc1_output'
        ] + outputs_pre[1:]
Esempio n. 3
0
    def test_add_layer_top_2(self):
        self.mh.drop_layer_top()
        layer1 = layer_factory.FullyConnected(name='fc1', num_hidden=5)
        layer2 = layer_factory.Convolution(name='conv1_1',
                                           kernel=(3, 3),
                                           num_filter=10)
        for layer_name in ['fc1', 'conv1_1']:
            assert layer_name not in list(self.mh.layer_type_dict.keys())

        outputs_pre = self.mh.symbol.get_internals().list_outputs()
        self.mh.add_layer_top([layer1])
        self.mh.add_layer_top([layer2])
        outputs_post = self.mh.symbol.get_internals().list_outputs()

        for layer_name in ['fc1', 'conv1_1']:
            assert layer_name in list(self.mh.layer_type_dict.keys())
        assert outputs_post == outputs_pre + [
            'fc1_weight', 'fc1_bias', 'fc1_output', 'conv1_1_weight',
            'conv1_1_bias', 'conv1_1_output'
        ]
Esempio n. 4
0
    def test_create_layer(self):
        layer = layer_factory.FullyConnected(name='fc1',
                                             num_hidden=10,
                                             no_bias=False)
        sym = layer.create_layer(self.data)

        assert sorted(sym.attr_dict().keys()) == sorted(
            ['fc1', 'fc1_weight', 'fc1_bias'])
        assert sym.attr_dict()['fc1'] == {
            'num_hidden': '10',
            'no_bias': 'False'
        }
        assert sym.attr_dict()['fc1_weight'] == {
            'num_hidden': '10',
            'no_bias': 'False'
        }
        assert sym.attr_dict()['fc1_bias'] == {
            'num_hidden': '10',
            'no_bias': 'False'
        }
        assert sym.get_internals().list_outputs() == [
            'data', 'fc1_weight', 'fc1_bias', 'fc1_output'
        ]
Esempio n. 5
0
 def test_add_layer_top_model_error(self):
     # Assert model error raised when a layer is added above an output layer
     layer1 = layer_factory.FullyConnected(name='fc1', num_hidden=5)
     with self.assertRaises(model_handler.exceptions.ModelError):
         self.mh.add_layer_top([layer1])