Пример #1
0
def test_globalpooling_1d_supports_masking():
    # Test GlobalAveragePooling1D supports masking
    model = Sequential()
    model.add(Masking(mask_value=0., input_shape=(3, 4)))
    model.add(pooling.GlobalAveragePooling1D())
    model.compile(loss='mae', optimizer='adam')

    model_input = np.random.randint(low=1, high=5, size=(2, 3, 4))
    model_input[0, 1:, :] = 0
    output = model.predict(model_input)
    assert np.array_equal(output[0], model_input[0, 0, :])
Пример #2
0
def squeeze_excite_block(input):
    ''' Create a squeeze-excite block
    Args:
        input: input tensor
        filters: number of output filters
        k: width factor
    Returns: a keras tensor
    '''
    from keras.layers import pooling, multiply, Reshape, Dense

    filters = input._keras_shape[-1] # channel_axis = -1 for TF

    se = pooling.GlobalAveragePooling1D()(input)
    se = Reshape((1, filters))(se)
    se = Dense(filters // 16,  activation='relu', kernel_initializer='he_normal', use_bias=False)(se)
    se = Dense(filters, activation='sigmoid', kernel_initializer='he_normal', use_bias=False)(se)
    se = multiply([input, se])
    return se
Пример #3
0
    def creat_model(self):

        from keras.layers import Input, Conv1D, Dense
        from keras.layers import normalization, Activation, pooling
        from keras.models import Model 
        from keras.callbacks import ReduceLROnPlateau, ModelCheckpoint, TensorBoard
        from keras.layers.core import Dropout
        import os

        # Input layer
        input_layer = Input(self.input_shape, name="main_input")

        # ResNet Units
        n_filters = 64
        n_layers = 3
        kernel_sizes = [7, 5, 3]   # #elements has to be eqaul to n_layers
        resnet_unit_input = input_layer
        for i in range(self.n_resnet_units):
            if i == 0:
                first_resnet_unit=True
            else:
                first_resnet_unit=False
            resnet_unit_output = self.__create_resnet_unit(resnet_unit_input, n_filters=n_filters,
                                                           n_layers=n_layers, kernel_sizes=kernel_sizes,
                                                           first_resnet_unit=first_resnet_unit)
            resnet_unit_input = resnet_unit_output

        # Global pooling layer
        gap_layer_main = pooling.GlobalAveragePooling1D()(resnet_unit_output)

        # Output layer
        # Use softmax
        output_layers = []
        n_output_resnet_units = 0
        for i in range(self.n_classes):
            ####################
            # Add resnet layer to each output before global avg pooling
            if n_output_resnet_units > 0:
                resnet_unit_input_j = resnet_unit_input
                for j in range(n_output_resnet_units):
                    resnet_unit_output_j = self.__create_resnet_unit(resnet_unit_input_j, n_filters=n_filters,
                                                                   n_layers=n_layers, kernel_sizes=kernel_sizes,
                                                                   first_resnet_unit=False)

                    resnet_unit_input_j = resnet_unit_output_j
                # Global pooling layer
                gap_layer = pooling.GlobalAveragePooling1D()(resnet_unit_output_j)
            else:
                gap_layer = gap_layer_main

            ####################
            output_layers.append(Dense(2, activation="softmax", name="output_"+str(i))(gap_layer))

        # Put all the model components together
        model = Model(inputs=input_layer, outputs=output_layers)

        # configure the model
        model.compile(loss=self.loss, optimizer=self.optimizer,
                      metrics=self.metrics, loss_weights=self.loss_weights)

        # Reduce the learning rate if plateau occurs on the loss curve
        reduce_lr = ReduceLROnPlateau(monitor='loss', factor=0.5, patience=10, 
                                      min_lr=0.00001)

        # Save the model at certain checkpoints 
        fname = "weights.epoch_{epoch:02d}.val_loss_{val_loss:.2f}.hdf5"
        file_path = os.path.join(self.out_dir, fname)
        model_checkpoint = ModelCheckpoint(file_path, monitor='val_loss', save_best_only=False, period=5)
        
#        # For TensorBoard visualization
#        log_dir = os.path(out_dir, "logs")
#        TensorBoard(log_dir=log_dir, batch_size=self.batch_size, update_freq='epoch')

        self.callbacks = [reduce_lr,model_checkpoint]

        return model
Пример #4
0
    def creat_model(self):

        from keras.layers import Input, Conv1D, Dense, LSTM, Masking, Permute
        from keras.layers import normalization, Activation, pooling, concatenate
        from keras.models import Model 
        from keras.callbacks import ReduceLROnPlateau, ModelCheckpoint, TensorBoard
        from keras.layers.core import Dropout
        import os

        # Input layer 
        input_layer = Input(self.input_shape)

        # Permute the input layer
        input_layer_shuffle = Permute((1,2))(input_layer)

        # Add masking
        input_layer_masked = Masking(mask_value=0.0)(input_layer_shuffle)

        # LSTM
        lstm_layer = LSTM(32)(input_layer_masked)
        # Dropout
        lstm_layer = Dropout(0.1, seed=100)(lstm_layer)


        # Add CNN layer + squeeze-excite block
        conv_layer = Conv1D(filters=32, kernel_size=8, strides=1, padding="same",\
                            kernel_initializer='he_uniform')(input_layer)
        conv_layer = normalization.BatchNormalization()(conv_layer)
        conv_layer = Activation(activation="relu")(conv_layer)
        conv_layer = squeeze_excite_block(conv_layer)

        # Add CNN layer + squeeze-excite block
        conv_layer = Conv1D(filters=64, kernel_size=5, strides=1, padding="same",\
                            kernel_initializer='he_uniform')(conv_layer)
        conv_layer = normalization.BatchNormalization()(conv_layer)
        conv_layer = Activation(activation="relu")(conv_layer)
        conv_layer = squeeze_excite_block(conv_layer)

        # Add CNN layer
        conv_layer = Conv1D(filters=32, kernel_size=3, strides=1, padding="same",\
                            kernel_initializer='he_uniform')(conv_layer)
        conv_layer = normalization.BatchNormalization()(conv_layer)
        conv_layer = Activation(activation="relu")(conv_layer)

        # Global pooling layer
        conv_layer = pooling.GlobalAveragePooling1D()(conv_layer)
        concat_layer = concatenate([lstm_layer, conv_layer])


        # Softmax output layer
        output_layer = Dense(self.n_classes, activation="softmax")(concat_layer)


        # Put all the model components together
        model = Model(inputs=input_layer, outputs=output_layer)

        # configure the model
        model.compile(loss=self.loss, optimizer=self.optimizer,
                      metrics=self.metrics, loss_weights=self.loss_weights)

        # Reduce the learning rate if plateau occurs on the loss curve
        reduce_lr = ReduceLROnPlateau(monitor='loss', factor=0.5, patience=10, 
                                      min_lr=0.00001)

        # Save the model at certain checkpoints 
        fname = "weights.epoch_{epoch:02d}.val_loss_{val_loss:.2f}.hdf5"
        file_path = os.path.join(self.out_dir, fname)
        model_checkpoint = ModelCheckpoint(file_path, monitor='val_loss', save_best_only=False, period=5)
        
        self.callbacks = [reduce_lr,model_checkpoint]

        return model
Пример #5
0
    def creat_model(self):

        from keras.layers import Input, Conv1D, Dense
        from keras.layers import normalization, Activation, pooling
        from keras.models import Model 
        from keras.callbacks import ReduceLROnPlateau, ModelCheckpoint, TensorBoard
        from keras.layers.core import Dropout
        import os

        # Input layer
        input_layer = Input(self.input_shape, name="main_input")

        # First CNN layer
        conv_layer = Conv1D(filters=64, kernel_size=7, strides=1, padding="same")(input_layer)
        conv_layer = normalization.BatchNormalization()(conv_layer)
        conv_layer = Activation(activation="relu")(conv_layer)
        conv_layer = Dropout(0.2, seed=100)(conv_layer)

        conv_layer = Conv1D(filters=64, kernel_size=7, strides=1, padding="same")(input_layer)
        conv_layer = normalization.BatchNormalization()(conv_layer)
        conv_layer = Activation(activation="relu")(conv_layer)
        conv_layer = Dropout(0.2, seed=100)(conv_layer)

        # Second CNN layer
        conv_layer = Conv1D(filters=128, kernel_size=5, strides=1, padding="same")(conv_layer)
        conv_layer = normalization.BatchNormalization()(conv_layer)
        conv_layer = Activation(activation="relu")(conv_layer)
        conv_layer = Dropout(0.2, seed=100)(conv_layer)

        conv_layer = Conv1D(filters=128, kernel_size=5, strides=1, padding="same")(conv_layer)
        conv_layer = normalization.BatchNormalization()(conv_layer)
        conv_layer = Activation(activation="relu")(conv_layer)
        conv_layer = Dropout(0.2, seed=100)(conv_layer)

        conv_layer = Conv1D(filters=128, kernel_size=5, strides=1, padding="same")(conv_layer)
        conv_layer = normalization.BatchNormalization()(conv_layer)
        conv_layer = Activation(activation="relu")(conv_layer)
        conv_layer = Dropout(0.2, seed=100)(conv_layer)


        # Third CNN layer
        conv_layer = Conv1D(filters=64, kernel_size=3, strides=1, padding="same")(conv_layer)
        conv_layer = normalization.BatchNormalization()(conv_layer)
        conv_layer = Activation(activation="relu")(conv_layer)
        conv_layer = Dropout(0.2, seed=100)(conv_layer)

        conv_layer = Conv1D(filters=64, kernel_size=3, strides=1, padding="same")(conv_layer)
        conv_layer = normalization.BatchNormalization()(conv_layer)
        conv_layer = Activation(activation="relu")(conv_layer)

        # Global pooling layer
        gap_layer = pooling.GlobalAveragePooling1D()(conv_layer)

        # Output layer
        # Use softmax
        output_layers = []
        for i in range(self.n_classes):
             output_layers.append(Dense(2, activation="softmax", name="output_"+str(i))(gap_layer))

        # Put all the model components together
        model = Model(inputs=input_layer, outputs=output_layers)

        # configure the model
        model.compile(loss=self.loss, optimizer=self.optimizer,
                      metrics=self.metrics, loss_weights=self.loss_weights)

        # Reduce the learning rate if plateau occurs on the loss curve
        reduce_lr = ReduceLROnPlateau(monitor='loss', factor=0.5, patience=10, 
                                      min_lr=0.00001)

        # Save the model at certain checkpoints 
        fname = "weights.epoch_{epoch:02d}.val_loss_{val_loss:.2f}.hdf5"
        file_path = os.path.join(self.out_dir, fname)
        model_checkpoint = ModelCheckpoint(file_path, monitor='val_loss', save_best_only=False, period=5)
        
#        # For TensorBoard visualization
#        log_dir = os.path(out_dir, "logs")
#        TensorBoard(log_dir=log_dir, batch_size=self.batch_size, update_freq='epoch')

        self.callbacks = [reduce_lr,model_checkpoint]

        return model