def test_multiple_branch(self): from dlpy.sequential import Sequential model = Sequential(self.s, model_table='Simple_CNN') model.add(InputLayer(3, 48, 96, scale=1 / 255, random_mutation='none')) model.add(Conv2d(64, 7, include_bias=True, act='relu')) model.add(Pooling(2)) model.add(Conv2d(64, 3, include_bias=True, act='relu')) model.add(Pooling(2)) model.add(Conv2d(64, 3, include_bias=True, act='relu')) model.add(Pooling(2)) model.add(Conv2d(64, 3, include_bias=True, act='relu')) model.add(Pooling(2)) model.add(Dense(16)) model.add(OutputLayer(n=1, act='sigmoid')) branch = model.to_functional_model(stop_layers=model.layers[-1]) inp1 = Input(**branch.layers[0].config) # tensor branch1 = branch(inp1) # tensor inp2 = Input(**branch.layers[0].config) # tensor branch2 = branch(inp2) # tensor inp3 = Input(**branch.layers[0].config) # tensor branch3 = branch(inp3) # tensor triplet = OutputLayer(n=1)(branch1 + branch2 + branch3) triplet_model = Model(self.s, inputs=[inp1, inp2, inp3], outputs=triplet) triplet_model.compile() triplet_model.print_summary() self.assertEqual(len(triplet_model.layers), 31) triplet_model.share_weights({'Convo.1': ['Convo.1_2', 'Convo.1_3']}) triplet_model.compile()
def test_mix_cnn_rnn_network(self): from dlpy.applications import ResNet50_Caffe from dlpy import Sequential from dlpy.blocks import Bidirectional # the case is to test if CNN and RNN model can be connect using functional api # the model_type is expected to be RNN in 19w47. # CNN model = ResNet50_Caffe(self.s) cnn_head = model.to_functional_model(stop_layers=model.layers[-1]) # RNN model_rnn = Sequential(conn=self.s, model_table='rnn') model_rnn.add(Bidirectional(n=100, n_blocks=2)) model_rnn.add(OutputLayer('fixed')) f_rnn = model_rnn.to_functional_model() # connecting inp = Input(**cnn_head.layers[0].config) x = cnn_head(inp) y = f_rnn(x) cnn_rnn = Model(self.s, inp, y) cnn_rnn.compile() # check type self.assertTrue(cnn_rnn.model_type == 'RNN') self.assertTrue(cnn_rnn.layers[-1].name == 'fixed') f_rnn = model_rnn.to_functional_model() # connecting inp = Input(**cnn_head.layers[0].config) x = cnn_head(inp) y = f_rnn(x) cnn_rnn = Model(self.s, inp, y) cnn_rnn.compile() # it should be fixed if I create f_rnn again. self.assertTrue(cnn_rnn.layers[-1].name == 'fixed') inp = Input(**cnn_head.layers[0].config) x = cnn_head(inp) y = f_rnn(x) cnn_rnn = Model(self.s, inp, y) cnn_rnn.compile() # it should be fixed if I create f_rnn again. self.assertTrue(cnn_rnn.layers[-1].name == 'fixed_2')
def test_sequential_conversion(self): from dlpy.sequential import Sequential model1 = Sequential(self.s) model1.add(InputLayer(3, 224, 224)) model1.add(Conv2d(8, 7)) model1.add(Pooling(2)) model1.add(Conv2d(8, 7)) model1.add(Pooling(2)) model1.add(Dense(16)) model1.add(OutputLayer(act='softmax', n=2)) func_model = model1.to_functional_model() func_model.compile() func_model.print_summary()
def test_stop_layers(self): from dlpy.sequential import Sequential model1 = Sequential(self.s) model1.add(InputLayer(3, 224, 224)) model1.add(Conv2d(8, 7)) model1.add(Pooling(2)) model1.add(Conv2d(8, 7)) model1.add(Pooling(2)) model1.add(Dense(16)) model1.add(OutputLayer(act='softmax', n=2)) inputlayer = model1.layers[0] inp = Input(**inputlayer.config) func_model = model1.to_functional_model( stop_layers=[model1.layers[-1]]) x = func_model(inp) out = Keypoints(n=10)(x) func_model_keypoints = Model(self.s, inp, out) func_model_keypoints.compile() func_model_keypoints.print_summary()