def test_new_bidirectional4(self): model = Sequential(self.s, model_table='new_table4') model.add(InputLayer()) model.add(Recurrent(n=10, name='rec1')) model.add(Bidirectional(n=20, src_layers=['rec1'])) model.add(OutputLayer()) model.print_summary()
def test_simple_cnn_seq2(self): model1 = Sequential(self.s, model_table='table8') model1.add(InputLayer(3, 224, 224)) model1.add(Conv2d(8, 7)) model1.add(Pooling(2)) model1.add(Conv2d(8, 7)) model1.add(Pooling(2)) model1.add(Dense(16)) model1.add(OutputLayer(act='softmax', n=2)) model1.print_summary()
def test_stride(self): model = Sequential(self.s, model_table = 'Simple_CNN_3classes_cropped') model.add(InputLayer(1, width = 36, height = 144, #offsets = myimage.channel_means, name = 'input1', random_mutation = 'random', random_flip = 'HV')) model.add(Conv2d(64, 3, 3, include_bias = False, act = 'identity')) model.add(BN(act = 'relu')) model.add(Conv2d(64, 3, 3, include_bias = False, act = 'identity')) model.add(BN(act = 'relu')) model.add(Conv2d(64, 3, 3, include_bias = False, act = 'identity')) model.add(BN(act = 'relu')) model.add(Pooling(height = 2, width = 2, stride_vertical = 2, stride_horizontal = 1, pool = 'max')) # 72, 36 model.add(Conv2d(128, 3, 3, include_bias = False, act = 'identity')) model.add(BN(act = 'relu')) model.add(Conv2d(128, 3, 3, include_bias = False, act = 'identity')) model.add(BN(act = 'relu')) model.add(Conv2d(128, 3, 3, include_bias = False, act = 'identity')) model.add(BN(act = 'relu')) model.add(Pooling(height = 2, width = 2, stride_vertical = 2, stride_horizontal = 1, pool = 'max')) # 36*36 model.add(Conv2d(256, 3, 3, include_bias = False, act = 'identity')) model.add(BN(act = 'relu')) model.add(Conv2d(256, 3, 3, include_bias = False, act = 'identity')) model.add(BN(act = 'relu')) model.add(Conv2d(256, 3, 3, include_bias = False, act = 'identity')) model.add(BN(act = 'relu')) model.add(Pooling(2, pool = 'max')) # 18 * 18 model.add(Conv2d(512, 3, 3, include_bias = False, act = 'identity')) model.add(BN(act = 'relu')) model.add(Conv2d(512, 3, 3, include_bias = False, act = 'identity')) model.add(BN(act = 'relu')) model.add(Conv2d(512, 3, 3, include_bias = False, act = 'identity')) model.add(BN(act = 'relu')) model.add(Pooling(2, pool = 'max')) # 9 * 9 model.add(Conv2d(1024, 3, 3, include_bias = False, act = 'identity')) model.add(BN(act = 'relu')) model.add(Conv2d(1024, 3, 3, include_bias = False, act = 'identity')) model.add(BN(act = 'relu')) model.add(Conv2d(1024, 3, 3, include_bias = False, act = 'identity')) model.add(BN(act = 'relu')) model.add(Pooling(9)) model.add(Dense(256, dropout = 0.5)) model.add(OutputLayer(act = 'softmax', n = 3, name = 'output1')) self.assertEqual(model.summary['Output Size'].values[-3], (1, 1, 1024)) model.print_summary() # 2d print summary numerical check self.assertEqual(model.summary.iloc[1, -1], 2985984)
def test_conv1d_model(self): # a model from https://blog.goodaudience.com/introduction-to-1d-convolutional-neural-networks-in-keras-for-time-sequences-3a7ff801a2cf Conv1D = Conv1d MaxPooling1D=Pooling model_m = Sequential(self.s) model_m.add(InputLayer(width=80*3, height=1, n_channels=1)) model_m.add(Conv1D(100, 10, act='relu')) model_m.add(Conv1D(100, 10, act='relu')) model_m.add(MaxPooling1D(3)) model_m.add(Conv1D(160, 10, act='relu')) model_m.add(Conv1D(160, 10, act='relu')) model_m.add(GlobalAveragePooling1D(dropout=0.5)) model_m.add(OutputLayer(n=6, act='softmax')) # use assertEqual to check whether the layer output size matches the expected value for MaxPooling1D self.assertEqual(model_m.layers[3].output_size, (1, 80, 100)) model_m.print_summary() # 1d print summary numerical check self.assertEqual(model_m.summary.iloc[1, -1], 240000)
def test_model_crnn_bug(self): model = Sequential(self.s, model_table='crnn') model.add(InputLayer(3,256,16)) model.add(Reshape(height=16,width=256,depth=3)) model.add(Conv2d(64,3,3,stride=1,padding=1)) # size = 16x256x64 model.add(Pooling(2,2,2)) # size = 8x128x64 model.add(Conv2d(128,3,3,stride=1,padding=1)) # size = 8x128x128 model.add(Pooling(2,2,2)) # size = 4x64x128 model.add(Conv2d(256,3,3,stride=1,padding=1,act='IDENTITY')) # size = 4x64x256 model.add(BN(act='RELU')) # size = 4x64x256 model.add(Conv2d(256,3,3,stride=1,padding=1)) # size = 4x64x256 model.add(Pooling(1,2,stride_horizontal=1, stride_vertical=2)) #, padding=1)) # size = 2x64x256 #model.add(Pooling(1,2,stride=2,stride_horizontal=1, stride_vertical=2,)) # size = 2x64x256 model.add(Conv2d(512,3,3,stride=1,padding=1, act='IDENTITY')) # size = 2x64x512 model.add(BN(act='RELU')) model.add(Conv2d(512,3,3,stride=1,padding=1)) # size = 2x64x512 model.add(Pooling(1,2,stride_horizontal=1, stride_vertical=2)) #, padding=1)) # size = 1x64x512 #model.add(Pooling(1,2,stride=2,stride_horizontal=1, stride_vertical=2,)) # size = 1x64x512 model.add(Conv2d(512,3,3,stride=1,padding=1, act='IDENTITY')) # size = 1x64x512 model.add(BN(act='RELU')) model.add(Reshape(order='DWH',width=64, height=512, depth=1)) model.add(Recurrent(512,output_type='SAMELENGTH')) model.add(OutputLayer(error='CTC')) model.print_summary()
def test_new_bidirectional3(self): model = Sequential(self.s, model_table='new_table3') model.add(Bidirectional(n=[10, 20, 30], n_blocks=3)) model.add(OutputLayer()) model.print_summary()
def test_new_bidirectional1(self): model = Sequential(self.s, model_table='new_table1') model.add(Bidirectional(n=10)) model.add(OutputLayer()) model.print_summary()