Beispiel #1
0
    def init_model(self, input_shape, num_classes, **kwargs):
        inputs = Input(shape=input_shape)
        # bnorm_1 = BatchNormalization(axis=2)(inputs)
        lstm_1 = Bidirectional(CuDNNLSTM(64,
                                         name='blstm_1',
                                         return_sequences=True),
                               merge_mode='concat')(inputs)
        activation_1 = Activation('tanh')(lstm_1)
        dropout1 = SpatialDropout1D(0.5)(activation_1)
        attention_1 = Attention(8, 16)([dropout1, dropout1, dropout1])
        pool_1 = GlobalMaxPool1D()(attention_1)
        dropout2 = Dropout(rate=0.5)(pool_1)
        dense_1 = Dense(units=256, activation='relu')(dropout2)
        outputs = Dense(units=num_classes, activation='softmax')(dense_1)

        model = TFModel(inputs=inputs, outputs=outputs)
        optimizer = optimizers.Adam(
            # learning_rate=1e-3,
            lr=1e-3,
            beta_1=0.9,
            beta_2=0.999,
            epsilon=1e-08,
            decay=0.0002,
            amsgrad=True)
        model.compile(optimizer=optimizer,
                      loss='sparse_categorical_crossentropy',
                      metrics=['accuracy'])
        model.summary()
        self._model = model
        self.is_init = True
    def init_model(self, input_shape, num_classes, **kwargs):
        inputs = Input(shape=input_shape)
        # bnorm_1 = BatchNormalization(axis=-1)(inputs)
        x = Bidirectional(CuDNNLSTM(96, name='blstm1', return_sequences=True),
                          merge_mode='concat')(inputs)
        # activation_1 = Activation('tanh')(lstm_1)
        x = SpatialDropout1D(0.1)(x)
        x = Attention(8, 16)([x, x, x])
        x1 = GlobalMaxPool1D()(x)
        x2 = GlobalAvgPool1D()(x)
        x = Concatenate(axis=-1)([x1, x2])
        x = Dense(units=128, activation='elu')(x)
        x = Dense(units=64, activation='elu')(x)
        x = Dropout(rate=0.4)(x)
        outputs = Dense(units=num_classes, activation='softmax')(x)

        model = TFModel(inputs=inputs, outputs=outputs)
        optimizer = optimizers.Adam(
            # learning_rate=1e-3,
            lr=1e-3,
            beta_1=0.9,
            beta_2=0.999,
            epsilon=1e-08,
            decay=0.0002,
            amsgrad=True)
        model.compile(optimizer=optimizer,
                      loss='sparse_categorical_crossentropy',
                      metrics=['accuracy'])
        model.summary()
        self._model = model
        self.is_init = True
Beispiel #3
0
 def init_model(self, input_shape, num_classes, **kwargs):
     inputs = Input(shape=input_shape)
     sequence_len = input_shape[0]
     lstm_units_array = np.array([32, 64, 128, 256, 512])
     lstm_units = lstm_units_array[np.argmin(
         np.abs(lstm_units_array - sequence_len))]
     lstm_1 = CuDNNLSTM(lstm_units, return_sequences=True)(inputs)
     activation_1 = Activation('tanh')(lstm_1)
     if num_classes >= 20:
         if num_classes < 30:
             dropout1 = SpatialDropout1D(0.5)(activation_1)
             attention_1 = Attention(8, 16)([dropout1, dropout1, dropout1])
         else:
             attention_1 = Attention(
                 8, 16)([activation_1, activation_1, activation_1])
         k_num = 10
         kmaxpool_l = Lambda(lambda x: tf.reshape(tf.nn.top_k(
             tf.transpose(x, [0, 2, 1]), k=k_num, sorted=True)[0],
                                                  shape=[-1, k_num, 128]))(
                                                      attention_1)
         flatten = Flatten()(kmaxpool_l)
         dropout2 = Dropout(rate=0.5)(flatten)
     else:
         dropout1 = SpatialDropout1D(0.5)(activation_1)
         attention_1 = Attention(8, 16)([dropout1, dropout1, dropout1])
         pool_l = GlobalMaxPool1D()(attention_1)
         dropout2 = Dropout(rate=0.5)(pool_l)
     dense_1 = Dense(units=256, activation='relu')(dropout2)
     #         dense_1 = Dense(units=256, activation='softplus',kernel_regularizer=regularizers.l2(0.01),
     #                        activity_regularizer=regularizers.l1(0.01))(dropout2)
     #dense_1 = DropConnect(Dense(units=256, activation='softplus'), prob=0.5)(dropout2)
     outputs = Dense(units=num_classes, activation='softmax')(dense_1)
     loss_fun = CategoricalCrossentropy(label_smoothing=0.2)
     model = TFModel(inputs=inputs, outputs=outputs)
     optimizer = optimizers.Nadam(lr=0.002,
                                  beta_1=0.9,
                                  beta_2=0.999,
                                  epsilon=None,
                                  schedule_decay=0.004)
     model.compile(
         optimizer=optimizer,
         loss=loss_fun,
         #loss="sparse_categorical_crossentropy",
         metrics=['accuracy'])
     model.summary()
     self._model = model
     self.is_init = True
Beispiel #4
0
    def init_model(self, config):

        input_shape = config['max_len']
        num_classes = config['num_classes']

        inputs = Input(shape=(input_shape, 96))
        x = inputs
        cnn1 = Conv1D(50,
                      kernel_size=1,
                      strides=1,
                      padding='same',
                      kernel_initializer='he_normal')(x)
        cnn1 = BatchNormalization(axis=-1)(cnn1)
        cnn1 = LeakyReLU()(cnn1)
        cnn1 = GlobalMaxPooling1D()(
            cnn1)  # CNN_Dynamic_MaxPooling(cnn1,50,2,2)

        cnn2 = Conv1D(50,
                      kernel_size=3,
                      strides=1,
                      padding='same',
                      kernel_initializer='he_normal')(x)
        cnn2 = BatchNormalization(axis=-1)(cnn2)
        cnn2 = LeakyReLU()(cnn2)
        cnn2 = GlobalMaxPooling1D()(cnn2)

        cnn3 = Conv1D(50,
                      kernel_size=5,
                      strides=1,
                      padding='same',
                      kernel_initializer='he_normal')(x)
        cnn3 = BatchNormalization(axis=-1)(cnn3)
        cnn3 = LeakyReLU()(cnn3)
        cnn3 = GlobalMaxPooling1D()(cnn3)
        x = concatenate([cnn1, cnn2, cnn3], axis=-1)

        x = Dense(units=num_classes, activation='softmax')(x)
        model = TFModel(inputs=inputs, outputs=x)
        opt = optimizers.rmsprop(lr=0.0001, decay=1e-6)
        model.compile(optimizer=opt,
                      loss="sparse_categorical_crossentropy",
                      metrics=['acc'])
        model.summary()
        self._model = model
        self.is_init = True
Beispiel #5
0
    def init_model(self,
                   input_shape,
                   num_classes,
                   **kwargs):
        inputs = Input(shape=input_shape)
        lstm_1 = CuDNNLSTM(128, return_sequences=True)(inputs)
        activation_1 = Activation('tanh')(lstm_1)
        if num_classes >= 20:
            if num_classes < 30:
                dropout1 = SpatialDropout1D(0.5)(activation_1)
                attention_1 = Attention(8, 16)([dropout1, dropout1, dropout1])
            # no dropout to get more infomation for classifying a large number
            # classes
            else:
                attention_1 = Attention(8, 16)(
                    [activation_1, activation_1, activation_1])
            k_num = 10
            kmaxpool_l = Lambda(
                lambda x: tf.reshape(tf.nn.top_k(tf.transpose(x, [0, 2, 1]), k=k_num, sorted=True)[0],
                                     shape=[-1, k_num, 128]))(attention_1)
            flatten = Flatten()(kmaxpool_l)
            dropout2 = Dropout(rate=0.5)(flatten)
        else:
            dropout1 = SpatialDropout1D(0.5)(activation_1)
            attention_1 = Attention(8, 16)([dropout1, dropout1, dropout1])
            pool_l = GlobalMaxPool1D()(attention_1)
            dropout2 = Dropout(rate=0.5)(pool_l)
        dense_1 = Dense(units=256, activation='softplus')(dropout2)
        outputs = Dense(units=num_classes, activation='softmax')(dense_1)

        model = TFModel(inputs=inputs, outputs=outputs)
        optimizer = optimizers.Nadam(
            lr=0.002,
            beta_1=0.9,
            beta_2=0.999,
            epsilon=None,
            schedule_decay=0.004)
        model.compile(
            optimizer=optimizer,
            loss='sparse_categorical_crossentropy',
            metrics=['accuracy'])
        model.summary()
        self._model = model
        self.is_init = True
Beispiel #6
0
    def init_model(self,
                   input_shape,
                   num_classes,
                   **kwargs):
        inputs = Input(shape=input_shape)
        # bnorm_1 = BatchNormalization(axis=2)(inputs)
        sequence_len = input_shape[0]
        lstm_units_array = np.array([32, 64, 128, 256, 512])
        lstm_units = lstm_units_array[np.argmin(np.abs(lstm_units_array-sequence_len))]
        lstm_1 = Bidirectional(CuDNNLSTM(lstm_units, name='blstm_1',
                                         return_sequences=True),
                               merge_mode='concat')(inputs)
        activation_1 = Activation('tanh')(lstm_1)
        dropout1 = SpatialDropout1D(0.5)(activation_1)
        if lstm_units <=128:
            attention_1 = Attention(8, 16)([dropout1, dropout1, dropout1])
        else:
            attention_1 = Attention(8, 16)([dropout1, dropout1, dropout1])
        pool_1 = GlobalMaxPool1D()(attention_1)
        dropout2 = Dropout(rate=0.5)(pool_1)
        dense_1 = Dense(units=256, activation='relu')(dropout2)
#         dense_1 = Dense(units=256, activation='relu',kernel_regularizer=regularizers.l2(0.01),
#                        activity_regularizer=regularizers.l1(0.01))(dropout2)
        #dense_1 = DropConnect(Dense(units=256, activation='relu'), prob=0.5)(dropout2)
        outputs = Dense(units=num_classes, activation='softmax')(dense_1)

        model = TFModel(inputs=inputs, outputs=outputs)
        loss_fun = CategoricalCrossentropy(label_smoothing=0.2)
        optimizer = optimizers.Adam(
            # learning_rate=1e-3,
            lr=1e-3,
            beta_1=0.9,
            beta_2=0.999,
            epsilon=1e-08,
            decay=0.0002,
            amsgrad=True)
        model.compile(
            optimizer=optimizer,
            loss=loss_fun,
            #loss="sparse_categorical_crossentropy",
            metrics=['accuracy'])
        model.summary()
        self._model = model
        self.is_init = True
    def init_model(self, input_shape, num_classes, **kwargs):
        # FIXME: keras sequential model is better than keras functional api,
        # why???
        inputs = Input(shape=input_shape)
        # dropout0 = SpatialDropout2D(rate=0.1, data_format='channels_last')(inputs)
        min_size = min(input_shape[:2])
        pool_l = None
        for i in range(5):
            if i == 0:
                conv_l = Conv2D(64,
                                3,
                                input_shape=input_shape,
                                padding='same',
                                data_format='channels_last')(inputs)
            else:
                conv_l = Conv2D(64, 3, padding='same')(pool_l)
            activation_l = Activation('relu')(conv_l)
            bn_l = BatchNormalization()(activation_l)
            pool_l = MaxPooling2D(pool_size=(2, 2))(bn_l)
            min_size //= 2
            if min_size < 2:
                break

        avgpool_l = GlobalAveragePooling2D(data_format='channels_last')(pool_l)
        maxpool_l = GlobalMaxPooling2D(data_format='channels_last')(pool_l)
        concat = Concatenate()([avgpool_l, maxpool_l])
        flatten = Flatten()(concat)
        bn1 = BatchNormalization()(flatten)
        dense1 = Dense(256, activation='relu')(bn1)
        bn2 = BatchNormalization()(dense1)
        dropout1 = Dropout(rate=0.5)(bn2)
        outputs = Dense(num_classes, activation='softmax')(dropout1)

        model = TFModel(inputs=inputs, outputs=outputs)
        # optimizer = tf.keras.optimizers.Adadelta()
        optimizer = tf.keras.optimizers.Adam()
        # optimizer = optimizers.SGD(lr=1e-3, decay=2e-4, momentum=0.9, clipvalue=5)
        model.compile(loss='sparse_categorical_crossentropy',
                      optimizer=optimizer,
                      metrics=['accuracy'])
        model.summary()
        self.is_init = True
        self._model = model
    def init_model(self,
                   input_shape,
                   num_classes,
                   **kwargs):
        layers = 5
        filters_size = [64, 128, 256, 512, 512]
        kernel_size = (3, 3)
        pool_size = [(2, 2), (2, 2), (2, 2), (4, 1), (4, 1)]

        freq_axis = 2
        channel_axis = 3

        channel_size = 128
        min_size = min(input_shape[:2])
        melgram_input = Input(shape=input_shape)
        # x = ZeroPadding2D(padding=(0, 37))(melgram_input)

        x = Reshape((input_shape[0], input_shape[1], 1))(melgram_input)
        x = BatchNormalization(axis=freq_axis, name='bn_0_freq')(x)

        # Conv block 1
        x = Convolution2D(
            filters=filters_size[0],
            kernel_size=kernel_size,
            padding='same',
            name='conv1')(x)
        x = ELU()(x)
        x = BatchNormalization(axis=channel_axis, name='bn1')(x)
        x = MaxPooling2D(
            pool_size=pool_size[0],
            strides=pool_size[0],
            name='pool1')(x)
        x = Dropout(0.1, name='dropout1')(x)

        min_size = min_size // pool_size[0][0]

        for layer in range(1, layers):
            min_size = min_size // pool_size[layer][0]
            if min_size < 1:
                break
            x = Convolution2D(
                filters=filters_size[layer],
                kernel_size=kernel_size,
                padding='same',
                name='conv' + str(layer + 1))(x)
            x = ELU()(x)
            x = BatchNormalization(axis=channel_axis, name='bn'+str(layer + 1)+'')(x)
            x = MaxPooling2D(
                pool_size=pool_size[layer],
                strides=pool_size[layer],
                name='pool'+str(layer + 1)+'')(x)
            x = Dropout(0.1, name='dropout'+str(layer + 1)+'')(x)

        x = Reshape((-1, channel_size))(x)

        gru_units = 32
        if num_classes > 32:
            gru_units = int(num_classes * 1.5)
        # GRU block 1, 2, output
        x = CuDNNGRU(gru_units, return_sequences=True, name='gru1')(x)
        x = CuDNNGRU(gru_units, return_sequences=False, name='gru2')(x)
        x = Dropout(0.3)(x)
        outputs = Dense(num_classes, activation='softmax', name='output')(x)

        model = TFModel(inputs=melgram_input, outputs=outputs)
        optimizer = optimizers.Adam(
            # learning_rate=1e-3,
            lr=1e-3,
            beta_1=0.9,
            beta_2=0.999,
            epsilon=1e-08,
            decay=1e-4,
            amsgrad=True)
        model.compile(
            optimizer=optimizer,
            loss="sparse_categorical_crossentropy",
            metrics=['accuracy'])
        model.summary()
        self._model = model
        self.is_init = True
Beispiel #9
0
    def init_model(self, input_shape, num_classes, **kwargs):
        freq_axis = 2
        channel_axis = 3
        channel_size = 128
        min_size = min(input_shape[:2])
        melgram_input = Input(shape=input_shape)
        # x = ZeroPadding2D(padding=(0, 37))(melgram_input)
        # x = BatchNormalization(axis=freq_axis, name='bn_0_freq')(x)

        x = Reshape((input_shape[0], input_shape[1], 1))(melgram_input)
        # Conv block 1
        x = Convolution2D(64, 3, 1, padding='same', name='conv1')(x)
        x = BatchNormalization(axis=channel_axis, name='bn1')(x)
        x = ELU()(x)
        x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), name='pool1')(x)
        x = Dropout(0.1, name='dropout1')(x)

        # Conv block 2
        x = Convolution2D(channel_size, 3, 1, padding='same', name='conv2')(x)
        x = BatchNormalization(axis=channel_axis, name='bn2')(x)
        x = ELU()(x)
        x = MaxPooling2D(pool_size=(3, 3), strides=(3, 3), name='pool2')(x)
        x = Dropout(0.1, name='dropout2')(x)

        # Conv block 3
        x = Convolution2D(channel_size, 3, 1, padding='same', name='conv3')(x)
        x = BatchNormalization(axis=channel_axis, name='bn3')(x)
        x = ELU()(x)
        x = MaxPooling2D(pool_size=(3, min_size / 6),
                         strides=(3, min_size / 6),
                         name='pool3')(x)
        x = Dropout(0.1, name='dropout3')(x)

        # if min_size // 24 >= 4:
        #     # Conv block 4
        #     x = Convolution2D(
        #         channel_size,
        #         3,
        #         1,
        #         padding='same',
        #         name='conv4')(x)
        #     x = BatchNormalization(axis=channel_axis, name='bn4')(x)
        #     x = ELU()(x)
        #     x = MaxPooling2D(pool_size=(4, 4), strides=(4, 4), name='pool4')(x)
        #     x = Dropout(0.1, name='dropout4')(x)

        x = Reshape((-1, channel_size))(x)
        avg = GlobalAvgPool1D()(x)
        max = GlobalMaxPool1D()(x)
        x = concatenate([avg, max], axis=-1)
        # x = Dense(max(int(num_classes*1.5), 128), activation='relu', name='dense1')(x)
        x = Dropout(0.3)(x)
        outputs = Dense(num_classes, activation='softmax', name='output')(x)

        model = TFModel(inputs=melgram_input, outputs=outputs)

        optimizer = optimizers.Nadam(lr=0.002,
                                     beta_1=0.9,
                                     beta_2=0.999,
                                     epsilon=None,
                                     schedule_decay=0.004)
        model.compile(optimizer=optimizer,
                      loss='sparse_categorical_crossentropy',
                      metrics=['accuracy'])
        model.summary()
        self._model = model
        self.is_init = True
Beispiel #10
0
    def init_model(self, input_shape, num_classes, **kwargs):
        freq_axis = 2
        channel_axis = 3
        channel_size = 128
        min_size = min(input_shape[:2])
        melgram_input = Input(shape=input_shape)
        # x = ZeroPadding2D(padding=(0, 37))(melgram_input)
        # x = BatchNormalization(axis=freq_axis, name='bn_0_freq')(x)

        x = Reshape((input_shape[0], input_shape[1], 1))(melgram_input)
        # Conv block 1
        x = Convolution2D(64, 3, 1, padding='same', name='conv1')(x)
        x = BatchNormalization(axis=channel_axis, name='bn1')(x)
        x = ELU()(x)
        x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), name='pool1')(x)
        x = Dropout(0.1, name='dropout1')(x)

        # Conv block 2
        x = Convolution2D(channel_size, 3, 1, padding='same', name='conv2')(x)
        x = BatchNormalization(axis=channel_axis, name='bn2')(x)
        x = ELU()(x)
        x = MaxPooling2D(pool_size=(3, 3), strides=(3, 3), name='pool2')(x)
        x = Dropout(0.1, name='dropout2')(x)

        # Conv block 3
        x = Convolution2D(channel_size, 3, 1, padding='same', name='conv3')(x)
        x = BatchNormalization(axis=channel_axis, name='bn3')(x)
        x = ELU()(x)
        x = MaxPooling2D(pool_size=(4, 4), strides=(4, 4), name='pool3')(x)
        x = Dropout(0.1, name='dropout3')(x)

        if min_size // 24 >= 4:
            # Conv block 4
            x = Convolution2D(channel_size, 3, 1, padding='same',
                              name='conv4')(x)
            x = BatchNormalization(axis=channel_axis, name='bn4')(x)
            x = ELU()(x)
            x = MaxPooling2D(pool_size=(4, 4), strides=(4, 4), name='pool4')(x)
            x = Dropout(0.1, name='dropout4')(x)

        x = Reshape((-1, channel_size))(x)

        gru_units = 128
        if num_classes > gru_units:
            gru_units = int(num_classes * 1.5)
        # GRU block 1, 2, output
        x = CuDNNGRU(gru_units, return_sequences=True, name='gru1')(x)
        x = CuDNNGRU(gru_units, return_sequences=False, name='gru2')(x)
        # x = Dense(max(int(num_classes*1.5), 128), activation='relu', name='dense1')(x)
        x = Dropout(0.3)(x)
        outputs = Dense(num_classes, activation='softmax', name='output')(x)

        model = TFModel(inputs=melgram_input, outputs=outputs)
        optimizer = optimizers.Adam(
            # learning_rate=1e-3,
            lr=1e-3,
            beta_1=0.9,
            beta_2=0.999,
            epsilon=1e-08,
            decay=1e-4,
            amsgrad=True)
        model.compile(optimizer=optimizer,
                      loss="sparse_categorical_crossentropy",
                      metrics=['accuracy'])
        model.summary()
        self._model = model
        self.is_init = True
Beispiel #11
0
    def init_model(self, input_shape, num_classes, **kwargs):

        freq_axis = 2
        channel_axis = 3
        channel_size = 128
        min_size = min(input_shape[:2])
        inputs = Input(shape=input_shape)
        # x = ZeroPadding2D(padding=(0, 37))(melgram_input)
        # x = BatchNormalization(axis=freq_axis, name='bn_0_freq')(x)

        x = Reshape((input_shape[0], input_shape[1], 1))(inputs)
        # Conv block 1
        x = Convolution2D(64, 3, 1, padding='same', name='conv1')(x)
        x = BatchNormalization(axis=channel_axis, name='bn1')(x)
        x = ELU()(x)
        x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), name='pool1')(x)
        x = Dropout(0.1, name='dropout1')(x)

        # Conv block 2
        x = Convolution2D(channel_size, 3, 1, padding='same', name='conv2')(x)
        x = BatchNormalization(axis=channel_axis, name='bn2')(x)
        x = ELU()(x)
        x = MaxPooling2D(pool_size=(3, 3), strides=(3, 3), name='pool2')(x)
        x = Dropout(0.1, name='dropout2')(x)

        # Conv block 3
        x = Convolution2D(channel_size, 3, 1, padding='same', name='conv3')(x)
        x = BatchNormalization(axis=channel_axis, name='bn3')(x)
        x = ELU()(x)
        x = MaxPooling2D(pool_size=(4, 4), strides=(4, 4), name='pool3')(x)
        x = Dropout(0.1, name='dropout3')(x)

        if min_size // 24 >= 4:
            # Conv block 4
            x = Convolution2D(channel_size, 3, 1, padding='same',
                              name='conv4')(x)
            x = BatchNormalization(axis=channel_axis, name='bn4')(x)
            x = ELU()(x)
            x = MaxPooling2D(pool_size=(4, 4), strides=(4, 4), name='pool4')(x)
            x = Dropout(0.1, name='dropout4')(x)

        x = Reshape((-1, channel_size))(x)

        avg = GlobalAvgPool1D()(x)
        max = GlobalMaxPool1D()(x)
        x = concatenate([avg, max], axis=-1)
        # x = Dense(max(int(num_classes*1.5), 128), activation='relu', name='dense1')(x)
        x = Dropout(0.3)(x)
        outputs1 = Dense(num_classes, activation='softmax', name='output')(x)

        # bnorm_1 = BatchNormalization(axis=2)(inputs)
        lstm_1 = Bidirectional(CuDNNLSTM(64,
                                         name='blstm_1',
                                         return_sequences=True),
                               merge_mode='concat')(inputs)
        activation_1 = Activation('tanh')(lstm_1)
        dropout1 = SpatialDropout1D(0.5)(activation_1)
        attention_1 = Attention(8, 16)([dropout1, dropout1, dropout1])
        pool_1 = GlobalMaxPool1D()(attention_1)
        dropout2 = Dropout(rate=0.5)(pool_1)
        dense_1 = Dense(units=256, activation='relu')(dropout2)
        outputs2 = Dense(units=num_classes, activation='softmax')(dense_1)

        outputs = Average()([outputs1, outputs2])
        model = TFModel(inputs=inputs, outputs=outputs)
        optimizer = optimizers.Adam(
            # learning_rate=1e-3,
            lr=1e-3,
            beta_1=0.9,
            beta_2=0.999,
            epsilon=1e-08,
            decay=0.0002,
            amsgrad=True)
        model.compile(optimizer=optimizer,
                      loss='sparse_categorical_crossentropy',
                      metrics=['accuracy'])
        model.summary()
        self._model = model
        self.is_init = True