def build_model(num_class, max_features=15000, dim=200, max_len=300, dropout_rate=0.2, gru_size=100): model = Sequential() model.add(Embedding(max_features+1, dim, input_length=max_len)) model.add(SpatialDropout1D(dropout_rate)) model.add(SeparableConv1D(32, kernel_size=3, padding='same', activation='relu')) model.add(MaxPooling1D(pool_size=2)) model.add(SeparableConv1D(64, kernel_size=3, padding='same', activation='relu')) model.add(MaxPooling1D(pool_size=2)) model.add(GRU(gru_size)) model.add(Dense(num_class, activation='sigmoid', kernel_initializer='normal')) model.compile(loss='binary_crossentropy', optimizer='rmsprop', metrics=[total_acc, binary_acc]) return model
def C(filters, kernel_size, input_dim, conv_border_mode, conv_stride, dilation): # Layer C1: first layer """ Build a deep network for speech """ # Main acoustic input input_data = Input(shape=(None, input_dim)) # TODO: Specify the layers in your network conv = SeparableConv1D(filters=filters, kernel_size=kernel_size, strides=conv_stride, padding=conv_border_mode, dilation_rate=dilation, depth_multiplier=1, activation=None, use_bias=True, depthwise_initializer='glorot_uniform', pointwise_initializer='glorot_uniform', bias_initializer='zeros', name='c')(input_data) conv_bn = BatchNormalization(name='batch_norm1')(conv) relu = Activation('relu', name='relu1')(conv_bn) # Specify the model model = Model(inputs=input_data, outputs=relu) # TODO: Specify model.output_length #model.output_length = lambda x: cnn_output_length(x,kernel_size,conv_border_mode,conv_stride,dilation) return model
def Pointwise(filters, kernel_size, input_dim, conv_border_mode, conv_stride, dilation): """ Build a deep network for speech """ # Main acoustic input input_data = Input(shape=(None, input_dim)) # TODO: Specify the layers in your network conv = SeparableConv1D(filters=filters, kernel_size=kernel_size, strides=1, padding=conv_border_mode, name='c')(input_data) # Specify the model model = Model(inputs=input_data, outputs=conv) # TODO: Specify model.output_length #model.output_length = lambda x: cnn_output_length(x,kernel_size,conv_border_mode,conv_stride,dilation) return model
def create_channel(x, filter_size, feature_map): """ Creates a layer, working channel wise Arguments: x : Input for convoltuional channel filter_size : Filter size for creating Conv1D feature_map : Feature map Returns: x : Channel including (Conv1D + GlobalMaxPooling + Dense + Dropout) """ x = SeparableConv1D(feature_map, kernel_size=filter_size, activation='relu', strides=1, padding='same', depth_multiplier=4)(x) x = GlobalMaxPooling1D()(x) x = Dense(hidden_units)(x) x = Dropout(dropout_rate)(x) return x
def tcsconv(filters, kernel_size, input_dim): # Layer B: output_length = input_length input_data = Input(shape=(None, input_dim)) x0 = SeparableConv1D( filters, kernel_size=1, # x0: pointwise + batchnorm strides=1, padding='same', activation=None, name='c1')(input_data) x0 = BatchNormalization(name='batch_norm1')(x0) # x1*1 x1 = SeparableConv1D( filters, kernel_size, # x1: normal separable conv + batchnorm + relu strides=1, padding='same', data_format='channels_last', dilation_rate=1, depth_multiplier=1, activation=None, use_bias=True, depthwise_initializer='glorot_uniform', pointwise_initializer='glorot_uniform', bias_initializer='zeros', name='layer_separable_conv1')(input_data) x1 = BatchNormalization(name='conv_batch_norm1')(x1) x1 = Activation('relu', name='relu1')(x1) # x1*2 x1 = SeparableConv1D( filters, kernel_size, # x1: normal separable conv + batchnorm + relu strides=1, padding='same', data_format='channels_last', dilation_rate=1, depth_multiplier=1, activation=None, use_bias=True, depthwise_initializer='glorot_uniform', pointwise_initializer='glorot_uniform', bias_initializer='zeros', name='layer_separable_conv2')(x1) x1 = BatchNormalization(name='conv_batch_norm2')(x1) x1 = Activation('relu', name='relu2')(x1) # x1*3 x1 = SeparableConv1D( filters, kernel_size, # x1: normal separable conv + batchnorm + relu strides=1, padding='same', data_format='channels_last', dilation_rate=1, depth_multiplier=1, activation=None, use_bias=True, depthwise_initializer='glorot_uniform', pointwise_initializer='glorot_uniform', bias_initializer='zeros', name='layer_separable_conv3')(x1) x1 = BatchNormalization(name='conv_batch_norm3')(x1) x1 = Activation('relu', name='relu3')(x1) # x1*4 x1 = SeparableConv1D( filters, kernel_size, # x1: normal separable conv + batchnorm + relu strides=1, padding='same', data_format='channels_last', dilation_rate=1, depth_multiplier=1, activation=None, use_bias=True, depthwise_initializer='glorot_uniform', pointwise_initializer='glorot_uniform', bias_initializer='zeros', name='layer_separable_conv4')(x1) x1 = BatchNormalization(name='conv_batch_norm4')(x1) x1 = Activation('relu', name='relu4')(x1) # x2*1 x2 = SeparableConv1D( filters, kernel_size, # x2: normal separable conv + batchnorm strides=1, padding='same', data_format='channels_last', dilation_rate=1, depth_multiplier=1, activation=None, use_bias=True, depthwise_initializer='glorot_uniform', pointwise_initializer='glorot_uniform', bias_initializer='zeros', name='layer_separable_conv')(x1) x2 = BatchNormalization(name='conv_batch_norm')(x2) added = Add()([x0, x2]) out = Activation('relu', name='relu')( added) # out: out(x3) := ( x1 ->x1 ->x1 ->x1 ->x2) + x0 tcsconv_B = Model(inputs=input_data, outputs=out) #tcsconv_B.output_length = lambda x: input_length return tcsconv_B