Exemplo n.º 1
0
    def _cnn_maxpool_multifilter(self, name: str) -> Model:
        """https://richliao.github.io/supervised/classification/2016/11/26/textclassifier-convolutional/
        """
        convs = []
        filter_sizes = [3, 4, 5]

        _inputs = Input((self.maxlen, ), name='input')
        l_embed = Embedding(input_dim=self.input_dim,
                            output_dim=self.embed_dim,
                            input_length=self.maxlen,
                            name='embedding')(_inputs)

        for fsz in filter_sizes:
            l_conv = Conv1D(filters=self.conv_filters,
                            kernel_size=fsz,
                            activation='relu')(l_embed)
            l_pool = MaxPool1D(self.conv_pool_size)(l_conv)
            convs.append(l_pool)

        l_merge = Concatenate(axis=1)(convs)
        l_cov1 = Conv1D(filters=self.conv_filters,
                        kernel_size=self.conv_kernel_size,
                        activation='relu')(l_merge)
        l_pool1 = MaxPool1D(pool_size=self.conv_pool_size)(l_cov1)
        l_cov2 = Conv1D(filters=self.conv_filters,
                        kernel_size=self.conv_kernel_size,
                        activation='relu')(l_pool1)
        l_pool2 = GlobalMaxPool1D()(l_cov2)
        l_flat = Flatten()(l_pool2)
        l_dense = Dense(self.units, activation='relu')(l_flat)
        _preds = Dense(self.classes, activation='sigmoid', name='fc1')(l_dense)

        return Model(inputs=_inputs, outputs=_preds, name=name)
Exemplo n.º 2
0
def get_locnet(locnet_inputs, output_shape):
    # Define filters
    c1 = 128
    c2 = 64
    c3 = 64

    # CNN
    inputs = locnet_inputs
    x1 = Conv1D(c1, (8), padding='same')(inputs)
    x1 = BatchNormalization()(x1)
    x1 = Activation('relu')(x1)
    x1 = MaxPool1D(2)(x1)
    x1 = Conv1D(c2, (5), padding='same')(x1)
    x1 = BatchNormalization()(x1)
    x1 = Activation('relu')(x1)
    x1 = MaxPool1D(2)(x1)
    x1 = Conv1D(c3, (3), padding='same')(x1)
    x1 = BatchNormalization()(x1)
    x1 = Activation('relu')(x1)
    x1 = MaxPool1D(2)(x1)

    x1 = Flatten()(x1)
    # Output Layer
    locnet_outputs = Dense(output_shape, activation='tanh')(x1)

    # Define model
    locnet = Model(inputs=locnet_inputs,
                   outputs=locnet_outputs,
                   name="Localization Network")

    return locnet
Exemplo n.º 3
0
 def _cnn_maxpool(self, name: str) -> Model:
     """https://richliao.github.io/supervised/classification/2016/11/26/textclassifier-convolutional/
     """
     return Sequential([
         InputLayer(input_shape=(self.maxlen, ), name='input'),
         Embedding(input_dim=self.input_dim,
                   output_dim=self.embed_dim,
                   input_length=self.maxlen,
                   name='embedding'),
         Conv1D(filters=self.conv_filters,
                kernel_size=self.conv_kernel_size,
                activation='relu'),
         MaxPool1D(pool_size=self.conv_pool_size),
         Conv1D(filters=self.conv_filters,
                kernel_size=self.conv_kernel_size,
                activation='relu'),
         MaxPool1D(pool_size=self.conv_pool_size),
         Conv1D(filters=self.conv_filters,
                kernel_size=self.conv_kernel_size,
                activation='relu'),
         GlobalMaxPool1D(),
         Flatten(),
         Dense(self.units, activation='relu'),
         Dense(self.classes, activation='sigmoid', name='fc1'),
     ],
                       name=name)
def define_model():
    input_shape = (99, 13)
    output_shape = 35
    activation = 'relu'

    input_layer = keras.Input(shape=input_shape)

    h = Conv1D(256, 5, activation=activation, padding='same')(input_layer)
    h = BatchNormalization()(h)

    h = Conv1D(256, 5, activation=activation, padding='same')(h)
    #h = BatchNormalization()(h)
    h = MaxPool1D(3)(h)

    h = Conv1D(512, 5, activation=activation, padding='same')(h)
    #h = BatchNormalization()(h)
    h = Dropout(0.35)(h)

    h = Conv1D(512, 5, activation=activation, padding='same')(h)
    h = GlobalAveragePooling1D()(h)
    h = Dropout(0.5)(h)

    output_layer = Dense(35, activation='softmax')(h)

    model = keras.Model(inputs=input_layer, outputs=output_layer)
    return model
Exemplo n.º 5
0
 def _cnn_bilstm_attention_dropout(self, name: str) -> Model:
     """https://qiita.com/fufufukakaka/items/4f9d42a4300392691bf3
     """
     _inputs = Input(shape=(self.maxlen, ), name='input')
     l_embed = Embedding(input_dim=self.input_dim,
                         output_dim=self.embed_dim,
                         input_length=self.maxlen,
                         name='embedding')(_inputs)
     l_drop1 = Dropout(0.2, name='input_dropout')(l_embed)
     l_cov1 = Conv1D(filters=self.conv_filters,
                     kernel_size=self.conv_kernel_size,
                     padding='same',
                     activation='relu')(l_drop1)
     l_pool1 = MaxPool1D(pool_size=self.conv_pool_size)(l_cov1)
     l_bilstm1 = Bidirectional(
         LSTM(units=self.units,
              dropout=0.2,
              recurrent_dropout=0.2,
              return_sequences=True,
              name='bilstm_dropout'))(l_pool1)
     l_flat = Flatten()(self.__attention_3d_block(l_bilstm1,
                                                  l_pool1.shape[1].value))
     l_drop2 = Dropout(0.5, name='hidden_dropout')(l_flat)
     _preds = Dense(self.classes, activation='sigmoid', name='fc1')(l_drop2)
     return Model(inputs=_inputs, outputs=_preds, name=name)
Exemplo n.º 6
0
 def init_model(self,
                input_shape,
                num_classes,
                **kwargs):
     # New model
     model = Sequential()
     model.add(
         Conv1D(256, 8, padding='same', input_shape=(input_shape[0], 1)))  # X_train.shape[0] = No. of Columns
     model.add(Activation('relu'))
     model.add(Conv1D(256, 8, padding='same'))
     model.add(BatchNormalization())
     model.add(Activation('relu'))
     model.add(Dropout(0.25))
     model.add(MaxPool1D(pool_size=8))
     model.add(Conv1D(128, 8, padding='same'))
     model.add(Activation('relu'))
     model.add(Conv1D(128, 8, padding='same'))
     model.add(Activation('relu'))
     model.add(Conv1D(128, 8, padding='same'))
     model.add(Activation('relu'))
     model.add(Conv1D(128, 8, padding='same'))
     model.add(BatchNormalization())
     model.add(Activation('relu'))
     model.add(Dropout(0.25))
     model.add(MaxPool1D(pool_size=8))
     model.add(Conv1D(64, 8, padding='same'))
     model.add(Activation('relu'))
     model.add(Conv1D(64, 8, padding='same'))
     model.add(Activation('relu'))
     model.add(Flatten())
     model.add(Dense(num_classes))  # Target class number
     model.add(Activation('softmax'))
     # opt = keras.optimizers.SGD(lr=0.01, momentum=0.0, decay=1e-6, nesterov=False)
     # opt = keras.optimizers.Adam(lr=0.0001)
     opt = optimizers.rmsprop(lr=0.0001, decay=1e-6)
     model.compile(
         optimizer=opt,
         loss="sparse_categorical_crossentropy",
         metrics=['acc'])
     model.summary()
     self._model = model
     self.is_init = True
Exemplo n.º 7
0
 def add(self,
         pool_size=2,
         strides=None,
         padding='valid',
         data_format='channels_last',
         **kwargs):
     return self._add_layer(
         MaxPool1D(pool_size=pool_size,
                   strides=strides,
                   padding=padding,
                   data_format=data_format,
                   **kwargs))
Exemplo n.º 8
0
 def linear(self):
     inputs = Input(shape=(5, 4547), name='inputs')
     x = Conv1D(filters=6, kernel_size=3, strides=1,
                activation='relu')(inputs)
     x = MaxPool1D(pool_size=3, strides=2)(x)
     x = BatchNormalization()(x)
     # x = Conv1D(filters=6, kernel_size=3, strides=2, activation='relu')(x)
     # x = MaxPool1D(pool_size=3, strides=2)(x)
     x = Flatten(name='flattened')(x)
     x = Dense(units=100, activation='linear')(x)
     x = Dense(units=100, activation='linear')(x)
     handling = Dense(units=1, activation='linear', name='output')(x)
     model = Model(inputs=[inputs], outputs=[handling])
     model.compile(optimizer='adam',
                   loss={'output': 'mean_squared_error'},
                   loss_weights={'output': 0.05})
     return model
Exemplo n.º 9
0
 def _cnn_bilstm_dropout(self, name: str) -> Model:
     return Sequential([
         InputLayer(input_shape=(self.maxlen, ), name='input'),
         Embedding(input_dim=self.input_dim,
                   output_dim=self.embed_dim,
                   input_length=self.maxlen,
                   name='embedding'),
         Dropout(0.2, name='input_dropout'),
         Conv1D(filters=self.conv_filters,
                kernel_size=self.conv_kernel_size,
                padding='same',
                activation='relu'),
         MaxPool1D(pool_size=self.conv_pool_size),
         Bidirectional(LSTM(units=self.units, name='bilstm')),
         Dropout(0.5, name='hidden_dropout'),
         Dense(self.classes, activation='sigmoid', name='fc1'),
     ],
                       name=name)
    def __generate_base_model(self) -> Model:
        sequence_input = Input(shape=(3000, 1))

        # twice convolutional layer
        sequence = Convolution1D(filters=32,
                                 kernel_size=self.__kernel_size,
                                 padding=self.__padding_valid,
                                 activation=activations.relu)(sequence_input)
        sequence = Convolution1D(filters=32,
                                 kernel_size=self.__kernel_size,
                                 padding=self.__padding_valid,
                                 activation=activations.relu)(sequence)

        for filters in [32, 32, 256]:
            # max pool and dropout
            sequence = MaxPool1D(pool_size=self.__pool_size,
                                 padding=self.__padding_valid)(sequence)
            sequence = SpatialDropout1D(rate=self.__dropout_rate)(sequence)

            # twice convolutional layer again
            sequence = Convolution1D(filters=filters,
                                     kernel_size=self.__kernel_size,
                                     padding=self.__padding_valid,
                                     activation=activations.relu)(sequence)
            sequence = Convolution1D(filters=filters,
                                     kernel_size=self.__kernel_size,
                                     padding=self.__padding_valid,
                                     activation=activations.relu)(sequence)
        # finale block
        sequence = GlobalMaxPool1D()(sequence)
        sequence = Dropout(rate=self.__dropout_rate)(sequence)

        sequence = Dense(units=64, activation=activations.relu)(sequence)

        # last dropout and model generation
        model = models.Model(
            inputs=sequence_input,
            outputs=Dropout(rate=self.__dropout_rate)(sequence))

        # compile model
        model.compile(optimizer=optimizers.Adam(),
                      loss=losses.sparse_categorical_crossentropy,
                      metrics=self.__metrics)
        return model
Exemplo n.º 11
0
Arquivo: CNN.py Projeto: yemx21/ECG
def deep_cnnblocks(inputdim, inputshape):
    if inputdim < 8:
        return (Conv1D(8,
                       2,
                       activation=tf.nn.relu,
                       input_shape=inputshape,
                       name='input'), BatchNormalization())
    elif inputdim < 16:
        return (Conv1D(16,
                       2,
                       activation=tf.nn.relu,
                       input_shape=inputshape,
                       name='input'), BatchNormalization())
    elif inputdim < 32:
        return (Conv1D(16,
                       3,
                       activation=tf.nn.relu,
                       input_shape=inputshape,
                       name='input'), Conv1D(16, 3, activation=tf.nn.relu),
                BatchNormalization(), MaxPool1D(2))
    elif inputdim < 64:
        return (Conv1D(16,
                       3,
                       activation=tf.nn.relu,
                       input_shape=inputshape,
                       name='input'), Conv1D(24, 3, activation=tf.nn.relu),
                BatchNormalization(), MaxPool1D(2))
    elif inputdim < 128:
        return (Conv1D(16,
                       3,
                       activation=tf.nn.relu,
                       input_shape=inputshape,
                       name='input'), Conv1D(16, 3, activation=tf.nn.relu),
                BatchNormalization(), MaxPool1D(3), Dropout(0.5),
                Conv1D(16, 3, activation=tf.nn.relu),
                Conv1D(16, 3, activation=tf.nn.relu), BatchNormalization(),
                MaxPool1D(2))
    else:
        return (Conv1D(16,
                       3,
                       activation=tf.nn.relu,
                       input_shape=inputshape,
                       name='input'), Conv1D(16, 3, activation=tf.nn.relu),
                BatchNormalization(), MaxPool1D(3), Dropout(0.5),
                Conv1D(32, 3, activation=tf.nn.relu),
                Conv1D(16, 3, activation=tf.nn.relu), BatchNormalization(),
                MaxPool1D(2))
Exemplo n.º 12
0
def test_delete_channels_maxpooling1d(channel_index):
    layer = MaxPool1D(2)
    layer_test_helper_flatten_1d(layer, channel_index)
                        output_dim=EMBEDDING_DIM,
                        weights=[word_embedding],
                        input_length=SEQUENCE_LENGTH,
                        trainable=False)(net)
    else:
        net = Embedding(input_dim=num_words,
                        output_dim=EMBEDDING_DIM,
                        input_length=SEQUENCE_LENGTH)(net)

    pathway1 = Conv1D(kernel_size=3,
                      strides=1,
                      filters=64,
                      padding='same',
                      activation='relu',
                      name='conv_1')(net)
    pathway1 = MaxPool1D(pool_size=SEQUENCE_LENGTH)(pathway1)
    pathway2 = Conv1D(kernel_size=4,
                      strides=1,
                      filters=64,
                      padding='same',
                      activation='relu',
                      name='conv_2')(net)
    pathway2 = MaxPool1D(pool_size=SEQUENCE_LENGTH)(pathway2)
    pathway3 = Conv1D(kernel_size=5,
                      strides=1,
                      filters=64,
                      padding='same',
                      activation='relu',
                      name='conv_3')(net)
    pathway3 = MaxPool1D(pool_size=SEQUENCE_LENGTH)(pathway3)
    net = concatenate([pathway1, pathway2, pathway3], axis=2)
Exemplo n.º 14
0
    def __init__(self, params=None, is_training=False):
        super(Resnet10, self).__init__()

        self.is_training = is_training
        self.n_cnn_filters = params['n_cnn_filters']
        self.n_cnn_kernels = params['n_cnn_kernels']
        self.n_classes = params['n_classes']

        # Block 1
        self.conv1_1 = Conv1D(self.n_cnn_filters[0],
                              self.n_cnn_kernels[0],
                              activation=None,
                              padding='same',
                              name='conv1_1')
        self.bn1_1 = BatchNormalization(name='batchnorm1_1')
        self.relu1_1 = Activation(activation='relu', name='relu1_1')

        self.conv1_2 = Conv1D(self.n_cnn_filters[0],
                              self.n_cnn_kernels[1],
                              activation=None,
                              padding='same',
                              name='conv1_2')
        self.bn1_2 = BatchNormalization(name='batchnorm1_2')
        self.relu1_2 = Activation(activation='relu', name='relu1_2')

        self.conv1_3 = Conv1D(self.n_cnn_filters[0],
                              self.n_cnn_kernels[2],
                              activation=None,
                              padding='same',
                              name='conv1_3')
        self.bn1_3 = BatchNormalization(name='batchnorm1_3')

        self.shortcut1 = Conv1D(self.n_cnn_filters[0],
                                1,
                                activation=None,
                                padding='same',
                                name='shortcut1')
        self.bn_shortcut1 = BatchNormalization(name='batchnorm_shortcut1')

        self.out_block1 = Activation(activation='relu', name='out_block1')

        # Block 2
        self.conv2_1 = Conv1D(self.n_cnn_filters[1],
                              self.n_cnn_kernels[0],
                              activation=None,
                              padding='same',
                              name='conv2_1')
        self.bn2_1 = BatchNormalization(name='batchnorm2_1')
        self.relu2_1 = Activation(activation='relu', name='relu2_1')

        self.conv2_2 = Conv1D(self.n_cnn_filters[1],
                              self.n_cnn_kernels[1],
                              activation=None,
                              padding='same',
                              name='conv2_2')
        self.bn2_2 = BatchNormalization(name='batchnorm2_2')
        self.relu2_2 = Activation(activation='relu', name='relu2_2')

        self.conv2_3 = Conv1D(self.n_cnn_filters[1],
                              self.n_cnn_kernels[2],
                              activation=None,
                              padding='same',
                              name='conv2_3')
        self.bn2_3 = BatchNormalization(name='batchnorm2_3')

        self.shortcut2 = Conv1D(self.n_cnn_filters[1],
                                1,
                                activation=None,
                                padding='same',
                                name='shortcut2')
        self.bn_shortcut2 = BatchNormalization(name='batchnorm_shortcut2')

        self.out_block2 = Activation(activation='relu', name='out_block2')

        # Block 3
        self.conv3_1 = Conv1D(self.n_cnn_filters[2],
                              self.n_cnn_kernels[0],
                              activation=None,
                              padding='same',
                              name='conv3_1')
        self.bn3_1 = BatchNormalization(name='batchnorm3_1')
        self.relu3_1 = Activation(activation='relu', name='relu3_1')

        self.conv3_2 = Conv1D(self.n_cnn_filters[2],
                              self.n_cnn_kernels[1],
                              activation=None,
                              padding='same',
                              name='conv3_2')
        self.bn3_2 = BatchNormalization(name='batchnorm3_2')
        self.relu3_2 = Activation(activation='relu', name='relu3_2')

        self.conv3_3 = Conv1D(self.n_cnn_filters[2],
                              self.n_cnn_kernels[2],
                              activation=None,
                              padding='same',
                              name='conv3_3')
        self.bn3_3 = BatchNormalization(name='batchnorm3_3')

        self.bn_shortcut3 = BatchNormalization(name='batchnorm_shortcut3')
        self.out_block3 = Activation(activation='relu', name='out_block3')

        # Pool & flatten
        self.pool = MaxPool1D(2, 2, name='pool')
        self.flatten = Flatten(name='flatten')

        # FC
        self.fc1 = Dense(1024, activation='relu', name='fc1')
        self.fc2 = Dense(1024, activation='relu', name='fc2')
        self.fc3 = Dense(self.n_classes, activation=None, name='fc3')
Exemplo n.º 15
0
        X_min = np.min(X_valid[i])
        X_valid[i] = (X_valid[i] - X_min) / (np.max(X_valid[i]) - X_min)

    # reshape
    X_train = X_train.reshape(-1, 1200, 1)
    X_valid = X_valid.reshape(-1, 1200, 1)

    # model definition
    model = keras.models.Sequential()
    model.add(
        Conv1D(16,
               30,
               activation='relu',
               input_shape=(1200, 1),
               padding="same"))
    model.add(MaxPool1D(pool_size=3, strides=3))
    model.add(Conv1D(32, 20, strides=1, activation='relu', padding='same'))
    model.add(MaxPool1D(pool_size=3, strides=3))
    model.add(Dropout(0.25))
    model.add(Flatten())
    model.add(Dense(256))
    model.add(Activation('relu'))
    model.add(Dropout(0.4))
    model.add(Dense(10))
    model.add(Activation('softmax'))
    model.summary()
    sgd = keras.optimizers.SGD(lr=0.01,
                               decay=1e-4,
                               momentum=0.9,
                               nesterov=True)
    model.compile(optimizer='sgd',