def DetectNet(lr,input_shape,filter_num,lstm_units,kernel_size,drop_ratio,lstm_drop_ratio,dense_units):
    '''
        Network architecture of DetectNet
    '''
    ConvInput = Input(input_shape)
    x1 = Convolution1D(filter_num,kernel_size,padding='same',data_format='channels_first',activation='relu')(ConvInput)
    x1 = Dropout(rate=drop_ratio)(x1)
    x2 = Convolution1D(filter_num,kernel_size,padding='same',data_format='channels_first',activation='relu')(x1)
    x2 = Dropout(rate=drop_ratio)(x2)
    x3 = Flatten()(x2)
    # dimension reduction
    x4 = Dense(input_shape[-1],activation='linear')(x3)
    x4 = Reshape(target_shape=(1,input_shape[-1]))(x4)
    
    LSTMInput = concatenate([x4,ConvInput],axis=1)
    
    y1 = LSTM(units=lstm_units,return_sequences=True,recurrent_dropout=lstm_drop_ratio,input_shape=(input_shape[-1],3))(LSTMInput)
    y2 = LSTM(units=lstm_units,dropout=lstm_drop_ratio)(y1) 
    y2 = Flatten()(y2)
    y3 = Dense(dense_units,activation='relu')(y2)
    
    predictions = Dense(2, activation='softmax')(y3)
    model = Model(inputs=ConvInput,outputs=predictions)
    model.compile(loss='categorical_crossentropy',optimizer=Adam(lr=lr), metrics=['accuracy'])
    model.summary()
    return model
def get_model():
    nclass = 1
    inp = Input(shape=(187, 1))
    img_1 = Convolution1D(32,
                          kernel_size=5,
                          activation=activations.relu,
                          padding="valid")(inp)
    for i in range(5):
        img_1 = res_block(img_1, 32, dropout=0.2)
    img_1 = Convolution1D(32,
                          kernel_size=3,
                          activation=activations.relu,
                          padding="same")(img_1)
    img_1 = Convolution1D(32,
                          kernel_size=3,
                          activation=activations.relu,
                          padding="same",
                          name="final_conv")(img_1)
    img_1 = GlobalMaxPool1D()(img_1)
    img_1 = Dropout(rate=0.2)(img_1)

    dense_1 = Dense(32, activation=activations.relu, name="dense_1")(img_1)
    dense_1 = Dense(32, activation=activations.relu, name="dense_2")(dense_1)
    dense_1 = Dense(nclass,
                    activation=activations.sigmoid,
                    name="dense_3_mitbih")(dense_1)

    model = models.Model(inputs=inp, outputs=dense_1)
    opt = optimizers.Adam(0.001)

    model.compile(optimizer=opt,
                  loss=losses.binary_crossentropy,
                  metrics=['acc'])
    model.summary()
    return model
Esempio n. 3
0
def CNN(lr, input_shape, filter_num, kernel_size, drop_ratio, dense_units):
    ConvInput = Input(input_shape)
    x1 = Convolution1D(filter_num,
                       kernel_size,
                       padding='same',
                       data_format='channels_first',
                       input_shape=input_shape,
                       activation='relu')(ConvInput)
    x1 = Dropout(rate=drop_ratio)(x1)
    x2 = Convolution1D(filter_num,
                       kernel_size,
                       padding='same',
                       data_format='channels_first',
                       activation='relu')(x1)
    x2 = Dropout(rate=drop_ratio)(x2)
    x2 = Flatten()(x2)

    x3 = Dense(dense_units, activation='relu')(x2)
    x3 = Dropout(rate=drop_ratio)(x3)
    predictions = Dense(2, activation='softmax')(x3)
    model = Model(inputs=ConvInput, outputs=predictions)

    model.compile(loss='categorical_crossentropy',
                  optimizer=Adam(lr=lr),
                  metrics=['accuracy'])
    model.summary()
    return model
def train_cnn(X_train, y_train, X_test, y_test, X_val, y_val):

    model = keras.Sequential()
    model.add(
        Convolution1D(filters=1500,
                      kernel_size=2,
                      strides=2,
                      activation='relu',
                      input_shape=(48, 272)))
    model.add(MaxPooling1D(2))
    model.add(Dropout(0.2))

    model = keras.Sequential()
    model.add(
        Convolution1D(filters=500, kernel_size=2, strides=2,
                      activation='relu'))
    model.add(MaxPooling1D(2))
    model.add(Dropout(0.2))

    model = keras.Sequential()
    model.add(
        Convolution1D(filters=500, kernel_size=2, strides=2,
                      activation='relu'))
    model.add(MaxPooling1D(2))
    model.add(Dropout(0.2))

    model = keras.Sequential()
    model.add(
        Convolution1D(filters=500, kernel_size=2, strides=2,
                      activation='relu'))
    model.add(MaxPooling1D(2))
    model.add(Dropout(0.2))

    model.add(Flatten())

    for i in range(0, 1):
        model.add(Dense(256, activation="relu"))
        model.add(Dropout(0.2))
    model.add(Dense(2, activation="softmax"))
    model.compile(loss=['categorical_crossentropy'],
                  optimizer=adam,
                  metrics=['accuracy', mil_squared_error])

    print("Fit model on training data")
    history = model.fit(X_train,
                        y_train,
                        batch_size=64,
                        epochs=150,
                        validation_data=(X_val, y_val),
                        verbose=1)

    #from sklearn.metrics import f1_score

    y_preds = [np.argmax(val) for val in model.predict(X_test)]
    y_trues = [np.argmax(val) for val in y_test]
    print(accuracy_score(y_trues, y_preds))

    model.save('..//models//cnn.hdf5')

    return model
def maxpool_block(X, filters, kernel_size=5, dropout=0.1, pool_size=2):
    img_1 = Convolution1D(filters, kernel_size=kernel_size, activation='relu', padding='same')(X)
    img_1 = Convolution1D(filters, kernel_size=kernel_size, activation='relu', padding='same')(img_1)
    img_1 = Add()([X, img_1])
    img_1 = Activation('relu')(img_1)
    img_1 = MaxPool1D(pool_size=2)(img_1)
    return img_1
 def create_model(self):
     # 使用model模式
     main_input = Input(shape=(maxlen, ), dtype='float64')
     embedder = Embedding(max_words + 1, 300, input_length=maxlen)
     embed = embedder(main_input)
     # 3,4,5 windows
     cnn1 = Convolution1D(256,
                          3,
                          padding='same',
                          strides=1,
                          activation='relu')(embed)
     cnn1 = MaxPool1D(pool_size=4)(cnn1)
     cnn2 = Convolution1D(256,
                          4,
                          padding='same',
                          strides=1,
                          activation='relu')(embed)
     cnn2 = MaxPool1D(pool_size=4)(cnn2)
     cnn3 = Convolution1D(256,
                          5,
                          padding='same',
                          strides=1,
                          activation='relu')(embed)
     cnn3 = MaxPool1D(pool_size=4)(cnn3)
     # concat
     cnn = concatenate([cnn1, cnn2, cnn3], axis=-1)
     flat = Flatten()(cnn)
     drop = Dropout(0.5)(flat)
     main_output = Dense(1, activation='sigmoid')(drop)
     model = Model(inputs=main_input, outputs=main_output)
     model.compile(loss='binary_crossentropy',
                   optimizer=Adam(lr=1e-3),
                   metrics=['accuracy'])
     return model
    def create_encoder_network(self, batch_norm=True, encoding_dropout=0.2, lstm_units=320, encoding_d=256):
        input_seqs = Input(shape=(None,), name="input_seqs")  # (batch_number, sequence_length)
        x = Embedding(input_dim=self.vocabulary_size,
                      output_dim=self.vocabulary_size - 1,
                      input_length=None, mask_zero=True, trainable=True)(
            input_seqs)  # (batch_number, sequence_length, 5)

        x = Convolution1D(filters=320, kernel_size=26, activation='relu',
                          data_format="channels_last", name="lstm_conv_1")(
            x)  # (batch_number, sequence_length-5, 1, 192)
        if batch_norm:
            x = BatchNormalization(center=True, scale=True, name="conv1_batch_norm")(x)
        x = MaxPooling1D(pool_size=13, padding="same")(x)
        x = Dropout(encoding_dropout)(x)

        x = Convolution1D(filters=192, kernel_size=6, activation='relu', name="lstm_conv_2")(x)
        if batch_norm:
            x = BatchNormalization(center=True, scale=True, name="conv2_batch_norm")(x)
        x = MaxPooling1D(pool_size=3, padding="same")(x)
        x = Dropout(encoding_dropout)(x)

        x = Bidirectional(LSTM(lstm_units, return_sequences=False, return_state=False),
                          merge_mode='concat')(x)  # (batch_number, 320+320)
        x = Dropout(encoding_dropout)(x)

        x = Dense(encoding_d, activation='linear', name="encoder_output")(x)
        if batch_norm:
            x = BatchNormalization(center=True, scale=True, name="encoder_output_normalized")(x)

        return Model(input_seqs, x, name="encoder_model")
    def compile(self):
        self.model = Sequential()

        self.model.add(
            Embedding(self.max_words,
                      self.embedding_dims,
                      input_length=self.sequence_length,
                      weights=[self.embedding_weight_matrix],
                      trainable=False))
        self.model.add(Dropout(0.5))
        self.model.add(
            Convolution1D(self.n_filters,
                          self.filter_size,
                          padding='valid',
                          activation='relu'))
        self.model.add(MaxPooling1D())

        self.model.add(
            Convolution1D(self.n_filters,
                          self.filter_size,
                          padding='valid',
                          activation='relu'))
        self.model.add(MaxPooling1D())
        self.model.add(Flatten())

        self.model.add(Dense(self.hidden_dims, activation='relu'))
        self.model.add(Dropout(0.5))
        self.model.add(Dense(self.n_classes, activation='softmax'))

        self.model.compile(loss='categorical_crossentropy',
                           optimizer='adam',
                           metrics=['accuracy'])
Esempio n. 9
0
def create_cnn(num_classes: int = 2) -> tf.keras.Model:
    x = Input(shape=(256, ), dtype="int64")
    h = Embedding(en2vec.corpus_size + 1, 128, input_length=256)(x)

    conv1 = Convolution1D(filters=256, kernel_size=10, activation="tanh")(h)
    conv2 = Convolution1D(filters=256, kernel_size=7, activation="tanh")(h)
    conv3 = Convolution1D(filters=256, kernel_size=5, activation="tanh")(h)
    conv4 = Convolution1D(filters=256, kernel_size=3, activation="tanh")(h)

    h = Concatenate()([
        GlobalMaxPooling1D()(conv1),
        GlobalMaxPooling1D()(conv2),
        GlobalMaxPooling1D()(conv3),
        GlobalMaxPooling1D()(conv4),
    ])

    h = Dense(1024, activation="selu", kernel_initializer="lecun_normal")(h)
    h = AlphaDropout(0.1)(h)
    h = Dense(1024, activation="selu", kernel_initializer="lecun_normal")(h)
    h = AlphaDropout(0.1)(h)

    y = Dense(num_classes, activation="softmax")(h)

    model = Model(x, y)
    model.compile(optimizer="adam",
                  loss="categorical_crossentropy",
                  metrics=["accuracy", AUC()])
    return model
Esempio n. 10
0
 def __init__(self,
              output_channels: int,
              input_channels: Optional[int] = None,
              kernel_size: int = 3,
              pooling_size: int = 1,
              batch_normalization: bool = True,
              dropout_rate: float = 0.0,
              l2_regularization: float = 0.0):
     super().__init__()
     leaky_relu = LeakyReLU(alpha=0.01)
     dimension_decrease_factor = 4
     if batch_normalization:
         self.batch_normalization = BatchNormalization(scale=False)
         self.batch_normalization1 = BatchNormalization(scale=False)
         self.batch_normalization2 = BatchNormalization(scale=False)
     else:
         self.batch_normalization = None
     if l2_regularization > 0:
         l2_regularizer = L2(l2_regularization)
     else:
         l2_regularizer = None
     self.dimension_decrease_layer = Convolution1D(
         output_channels // dimension_decrease_factor,
         kernel_size=1,
         activation=leaky_relu,
         kernel_regularizer=l2_regularizer)
     self.convolutional_layer = Convolution1D(
         output_channels // dimension_decrease_factor,
         kernel_size=kernel_size,
         activation=leaky_relu,
         padding='same',
         kernel_regularizer=l2_regularizer)
     self.dimension_increase_layer = Convolution1D(
         output_channels,
         kernel_size=1,
         activation=leaky_relu,
         kernel_regularizer=l2_regularizer)
     if pooling_size > 1:
         self.pooling_layer = MaxPooling1D(pool_size=pooling_size,
                                           padding='same')
     else:
         self.pooling_layer = None
     if input_channels is not None and output_channels != input_channels:
         if output_channels < input_channels:
             raise NotImplementedError(
                 f'Residual blocks with less output channels than input channels is not'
                 f'implemented. Output channels was {output_channels} and input was'
                 f'{input_channels}')
         self.dimension_change_permute0 = Permute((2, 1))
         self.dimension_change_layer = ZeroPadding1D(
             padding=(0, output_channels - input_channels))
         self.dimension_change_permute1 = Permute((2, 1))
     else:
         self.dimension_change_layer = None
     if dropout_rate > 0:
         self.dropout_layer = SpatialDropout1D(rate=dropout_rate)
     else:
         self.dropout_layer = None
 def __init__(self, num_points):
     super(Conv2048, self).__init__()
     self.num_points = num_points
     self.conv1 = Convolution1D(64, 1, activation='relu')
     self.conv2 = Convolution1D(128, 1, activation='relu')
     self.conv3 = Convolution1D(1024, 1, activation='relu')
     self.conv4 = Convolution1D(2048, 1, activation='relu')
     self.max_pooling = MaxPooling1D(pool_size=self.num_points)
     self.flatten = Flatten()
Esempio n. 12
0
def cnn_features_model(n_timesteps, n_features, n_outputs, nb_features):
    input = Input(shape=(n_timesteps, n_features))
    conv_1 = Convolution1D(12,
                           5,
                           input_shape=(n_timesteps, n_features),
                           padding='same',
                           activation='relu')(input)
    bat_1 = BatchNormalization()(conv_1)
    # drop_1 = Dropout(0.3)(bat_1)
    maxp_1 = MaxPooling1D(pool_size=2, padding='same', strides=2)(bat_1)

    conv_2 = Convolution1D(12,
                           5,
                           input_shape=(n_timesteps, n_features),
                           padding='same',
                           activation='relu')(maxp_1)
    bat_2 = BatchNormalization()(conv_2)
    # drop_2 = Dropout(0.3)(conv_2)
    maxp_2 = MaxPooling1D(pool_size=2, padding='same', strides=2)(bat_2)

    conv_3 = Convolution1D(20,
                           5,
                           input_shape=(n_timesteps, n_features),
                           padding='same',
                           activation='relu')(maxp_2)
    bat_3 = BatchNormalization()(conv_3)
    drop_3 = Dropout(0.3)(bat_3)
    maxp_3 = MaxPooling1D(pool_size=2, padding='same', strides=2)(drop_3)

    conv_4 = Convolution1D(16,
                           5,
                           input_shape=(n_timesteps, n_features),
                           padding='same',
                           activation='relu')(maxp_3)
    bat_4 = BatchNormalization()(conv_4)
    # drop_4 = Dropout(0.3)(conv_4)
    maxp_4 = MaxPooling1D(pool_size=2, padding='same', strides=2)(bat_4)

    # drop = Dropout(0.3)(maxp_4)
    seq_features = GlobalAveragePooling1D()(maxp_4)

    other_features = Input(shape=(nb_features, ))
    model = Concatenate()([seq_features, other_features])
    # model.add(Flatten())
    model = Dense(n_outputs,
                  activation='softmax',
                  kernel_regularizer=regularizers.l2(0.2))(model)
    model = Model([input, other_features], model)
    model.compile(
        optimizer=Adam(lr=0.0003),
        # loss='categorical_crossentropy',
        # loss=focal_loss(gamma=2,alpha=1),
        loss=ghm.ghm_class_loss,
        metrics=['categorical_accuracy'])
    return model
 def __init__(self, num_points):
     super(Conv64, self).__init__()
     self.num_points = num_points
     self.conv1 = Convolution1D(64,
                                1,
                                input_shape=(self.num_points, 3),
                                activation='relu')
     self.conv2 = Convolution1D(64,
                                1,
                                input_shape=(self.num_points, 3),
                                activation='relu')
        def conv_bn_relu_3_sandwich(x, filters, kernel_size):
            first_x = x
            for _ in range(3):
                x = Convolution1D(filters, kernel_size, padding='same',
                                  kernel_initializer=weightinit,
                                  kernel_regularizer=l2(regularization))(x)
                x = BatchNormalization()(x)
                x = ReLU()(x)

            first_x = Convolution1D(filters, kernel_size=1, padding='same',
                                    kernel_initializer=weightinit,
                                    kernel_regularizer=l2(regularization))(x)
            x = Add()([x, first_x])
            return x
Esempio n. 15
0
def get_model():
    nclass = 5
    inp = Input(shape=(187, 1))
    img_1 = Convolution1D(16, kernel_size=5, activation=activations.relu, padding="valid")(inp)
    img_1 = Convolution1D(16, kernel_size=5, activation=activations.relu, padding="valid")(img_1)
    img_1 = MaxPool1D(pool_size=2)(img_1)
    img_1 = Dropout(rate=0.1)(img_1)
    img_1 = Convolution1D(32, kernel_size=3, activation=activations.relu, padding="valid")(img_1)
    img_1 = Convolution1D(32, kernel_size=3, activation=activations.relu, padding="valid")(img_1)
    img_1 = MaxPool1D(pool_size=2)(img_1)
    img_1 = Dropout(rate=0.1)(img_1)
    img_1 = Convolution1D(32, kernel_size=3, activation=activations.relu, padding="valid")(img_1)
    img_1 = Convolution1D(32, kernel_size=3, activation=activations.relu, padding="valid")(img_1)
    img_1 = MaxPool1D(pool_size=2)(img_1)
    img_1 = Dropout(rate=0.1)(img_1)
    img_1 = Convolution1D(256, kernel_size=3, activation=activations.relu, padding="valid")(img_1)
    img_1 = Convolution1D(256, kernel_size=3, activation=activations.relu, padding="valid")(img_1)
    img_1 = GlobalMaxPool1D()(img_1)
    img_1 = Dropout(rate=0.2)(img_1)

    dense_1 = Dense(64, activation=activations.relu, name="dense_1")(img_1)
    dense_1 = Dense(64, activation=activations.relu, name="dense_2")(dense_1)
    dense_1 = Dense(nclass, activation=activations.softmax, name="dense_3_mitbih")(dense_1)

    model = models.Model(inputs=inp, outputs=dense_1)
    opt = optimizers.Adam(0.001)

    model.compile(optimizer=opt, loss=losses.sparse_categorical_crossentropy, metrics=['acc'])
    #model.summary()
    return model
 def __init__(self, num_points):
     super(FeatureTransNet, self).__init__()
     self.num_points = num_points
     self.conv1 = Convolution1D(64, 1, activation='relu')
     self.conv2 = Convolution1D(128, 1, activation='relu')
     self.conv3 = Convolution1D(1024, 1, activation='relu')
     self.max_pooling = MaxPooling1D(pool_size=self.num_points)
     self.dense1 = Dense(512, activation='relu')
     self.dense2 = Dense(256, activation='relu')
     self.dense3 = Dense(64 * 64,
                         weights=[
                             np.zeros([256, 64 * 64]),
                             np.eye(64).flatten().astype(np.float32)
                         ])
 def __init__(self,
              output_channels: int,
              input_channels: Optional[int] = None,
              kernel_size: int = 3,
              pooling_size: int = 1,
              dropout_rate: float = 0.0):
     super().__init__()
     dimension_decrease_factor = 4
     kernel_initializer = LecunNormal()
     self.dimension_decrease_layer = Convolution1D(
         output_channels // dimension_decrease_factor,
         kernel_size=1,
         activation=selu,
         kernel_initializer=kernel_initializer)
     self.convolutional_layer = Convolution1D(
         output_channels // dimension_decrease_factor,
         kernel_size=kernel_size,
         activation=selu,
         padding='same',
         kernel_initializer=kernel_initializer)
     self.dimension_increase_layer = Convolution1D(
         output_channels,
         kernel_size=1,
         activation=selu,
         kernel_initializer=kernel_initializer)
     if pooling_size > 1:
         self.pooling_layer = AveragePooling1D(pool_size=pooling_size,
                                               padding='same')
     else:
         self.pooling_layer = None
     if input_channels is not None and output_channels != input_channels:
         if output_channels < input_channels:
             raise NotImplementedError(
                 f'Residual blocks with less output channels than input channels is not'
                 f'implemented. Output channels was {output_channels} and input was'
                 f'{input_channels}')
         self.dimension_change_permute0 = Permute((2, 1))
         self.dimension_change_layer = ZeroPadding1D(
             padding=(0, output_channels - input_channels))
         self.dimension_change_permute1 = Permute((2, 1))
     else:
         self.dimension_change_layer = None
     if dropout_rate > 0:
         self.dropout_layer = AlphaDropout(rate=dropout_rate,
                                           noise_shape=(50, 1,
                                                        output_channels))
     else:
         self.dropout_layer = None
Esempio n. 18
0
def cnn_rnn_1_conv_1_lstm(win_len, regression=False):

    forward_lstm = LSTM(units=320, return_sequences=True)
    brnn = Bidirectional(forward_lstm)

    # CNN
    model = Sequential()
    model.add(
        Convolution1D(activation="relu",
                      input_shape=(win_len, 4),
                      padding="valid",
                      strides=1,
                      filters=16,
                      kernel_size=30))
    model.add(MaxPooling1D(strides=15, pool_size=15))
    model.add(Dropout(0.2))

    # RNN
    model.add(brnn)
    model.add(Dropout(0.5))

    model.add(Flatten())
    model.add(Dense(128, activation='relu'))

    if regression:
        model.add(Dense(1, kernel_initializer='normal', activation='linear'))
    else:
        model.add(Dense(2, activation='softmax'))

    return model
Esempio n. 19
0
def train_model(data, model_path):
    x = data['x']
    y = data['y']
    (x_train, x_val, y_train, y_val) = train_test_split(x, y, test_size=0.3,
            random_state=SEED)

    print('Building model...')

    n_features = x_train.shape[2]

    model = models.Sequential()
    model.add(Convolution1D(64, FILTER_LENGTH, activation='relu',
                            input_shape=(150, 150, 3)))
    model.add(MaxPooling1D(2))
    model.add(layers.Convolution1D(64, (3, 3), activation='relu'))
    model.add(layers.MaxPooling1D((2, 2)))
    model.add(layers.Convolution1D(128, (3, 3), activation='relu'))
    model.add(layers.MaxPooling1D((2, 2)))
    model.add(layers.Convolution1D(128, (3, 3), activation='relu'))
    model.add(layers.MaxPooling1D((2, 2)))
    model.add(layers.Flatten())
    model.add(layers.Dense(512, activation='relu'))
    model.add(layers.Dropout(0.3))
    model.add(layers.Dense(1, activation='sigmoid'))

    return model
Esempio n. 20
0
    def __call__(self, inputs):
        if self.dilations is None:
            self.dilations = [1, 2, 4, 8, 16, 32]
        x = inputs
        x = Convolution1D(self.nb_filters,
                          1,
                          padding=self.padding,
                          name=self.name + '_initial_conv')(x)
        skip_connections = []
        for s in range(self.nb_stacks):
            for i in self.dilations:
                x, skip_out = residual_block(x,
                                             s,
                                             i,
                                             self.activation,
                                             self.nb_filters,
                                             self.kernel_size,
                                             self.padding,
                                             self.dropout_rate,
                                             name=self.name)
                skip_connections.append(skip_out)
        if self.use_skip_connections:
            x = tensorflow.keras.layers.add(skip_connections)
        x = Activation('relu')(x)

        if not self.return_sequences:
            output_slice_index = -1
            x = Lambda(lambda tt: tt[:, output_slice_index, :])(x)
        return x
Esempio n. 21
0
def dilated_tcn(num_feat, num_classes, nb_filters,
                kernel_size, dilations, nb_stacks, max_len,
                activation='norm_relu', use_skip_connections=True,
                return_param_str=False, output_slice_index=None,
                regression=False, lr=0.00007):
    """
    dilation_depth : number of layers per stack
    nb_stacks : number of stacks.
    """
    input_layer = Input(name='input_layer', shape=(max_len, num_feat))
    x = input_layer
    x = Convolution1D(nb_filters, kernel_size, padding='causal', name='initial_conv')(x)

    skip_connections = []
    for s in range(nb_stacks):
        for i in dilations:
            x, skip_out = residual_block(x, s, i, activation, nb_filters, kernel_size)
            skip_connections.append(skip_out)

    if use_skip_connections:
        x = keras.layers.add(skip_connections)
    x = Activation('relu')(x)

    if output_slice_index is not None:  # can test with 0 or -1.
        if output_slice_index == 'last':
            output_slice_index = -1
        if output_slice_index == 'first':
            output_slice_index = 0
        x = Lambda(lambda tt: tt[:, output_slice_index, :])(x)

    print('x.shape=', x.shape)

    if not regression:
        # classification
        x = Dense(num_classes)(x)
        x = Activation('softmax', name='output_softmax')(x)
        output_layer = x
        print(f'model.x = {input_layer.shape}')
        print(f'model.y = {output_layer.shape}')
        model = Model(input_layer, output_layer)

        adam = optimizers.Adam(lr=lr, clipnorm=1.)
        model.compile(adam, loss='sparse_categorical_crossentropy', metrics=['accuracy'])
        print('Adam with norm clipping.')
    else:
        # regression
        x = Dense(1)(x)
        x = Activation('linear', name='output_dense')(x)
        output_layer = x
        print(f'model.x = {input_layer.shape}')
        print(f'model.y = {output_layer.shape}')
        model = Model(input_layer, output_layer)
        adam = optimizers.Adam(lr=lr, clipnorm=1.)
        model.compile(adam, loss='mean_squared_error', metrics=['mae','acc'])

    if return_param_str:
        param_str = 'D-TCN_C{}_B{}_L{}'.format(2, nb_stacks, dilations)
        return model, param_str
    else:
        return model
Esempio n. 22
0
def build_simple_model(x_train):
    print('Building simple model...')

    n_features = x_train.shape[2]
    input_shape = (None, n_features)
    model_input = Input(input_shape, name='input')
    layer = model_input
    layer = Convolution1D(filters=CONV_FILTER_COUNT, kernel_size=FILTER_LENGTH, name='convolution_1')(layer)
    layer = Dense(256, activation='relu')(layer)
    layer = Dropout(0.5)(layer)
    layer = Dense(len(GENRES))(layer)
    merge_layer = Lambda(function=lambda x: K.mean(x, axis=1), output_shape=lambda shape: (shape[0],) + shape[2:],
                         name='output_merged')
    layer = merge_layer(layer)
    layer = Activation('softmax', name='output_realtime')(layer)
    model_output = layer

    model = Model(model_input, model_output)
    opt = Adam(lr=0.001)
    model.compile(
        loss='categorical_crossentropy',
        optimizer=opt,
        metrics=['accuracy']
    )

    print(model.summary())
    return model
Esempio n. 23
0
def build_model(x_train):
    print('Building model...')

    n_features = x_train.shape[2]
    input_shape = (None, n_features)
    model_input = Input(input_shape, name='input')
    layer = model_input
    for i in range(N_LAYERS):
        layer = Convolution1D(filters=CONV_FILTER_COUNT, kernel_size=FILTER_LENGTH,
                              name='convolution_' + str(i + 1))(layer)
        layer = BatchNormalization(momentum=0.9)(layer)
        layer = Activation('relu')(layer)
        layer = MaxPooling1D(2)(layer)
        layer = Dropout(0.5)(layer)

    layer = Dense(len(GENRES))(layer)
    time_distributed_merge_layer = Lambda(
        function=lambda x: K.mean(x, axis=1),
        output_shape=lambda shape: (shape[0],) + shape[2:],
        name='output_merged'
    )
    layer = time_distributed_merge_layer(layer)
    layer = Activation('softmax', name='output_realtime')(layer)
    model_output = layer
    model = Model(model_input, model_output)
    opt = Adam(lr=0.001)
    model.compile(
        loss='categorical_crossentropy',
        optimizer=opt,
        metrics=['accuracy']
    )

    print(model.summary())
    return model
Esempio n. 24
0
    def build_model(cls, n_features):
        n_labels = len(cls.label_names)

        print('Building model...')
        input_shape = (None, n_features)
        model_input = Input(input_shape, name='input')
        layer = model_input
        for i in range(N_LAYERS):
            layer = Convolution1D(filters=CONV_FILTER_COUNT,
                                  kernel_size=FILTER_LENGTH,
                                  name='convolution_' + str(i + 1))(layer)
            layer = BatchNormalization(momentum=0.9)(layer)
            layer = Activation('relu')(layer)
            layer = MaxPooling1D(2)(layer)
            layer = Dropout(0.5)(layer)

        layer = TimeDistributed(Dense(n_labels))(layer)
        time_distributed_merge_layer = Lambda(
            function=lambda x: K.mean(x, axis=1),
            output_shape=lambda shape: (shape[0], ) + shape[2:],
            name='output_merged')
        layer = time_distributed_merge_layer(layer)
        layer = Activation('softmax', name='output_realtime')(layer)
        model_output = layer
        model = Model(model_input, model_output)
        model.compile(loss='categorical_crossentropy',
                      optimizer=Adam(learning_rate=0.001),
                      metrics=[
                          'accuracy',
                          TopKCategoricalAccuracy(3, name='top3-accuracy')
                      ])
        model.summary()
        return model
    def build(self):
        conv_layers = [[256, 10], [256, 7], [256, 5], [256, 3]]
        fully_connected_layers = [1024, 1024]
        input = Input(shape=(self.max_sequence_len, ),
                      dtype='int32',
                      name='input')
        embedded_sequence = self.embedding_layer(input)

        convolution_output = []
        for num_filters, filter_width in conv_layers:
            conv = Convolution1D(filters=num_filters,
                                 kernel_size=filter_width,
                                 activation='tanh',
                                 name='Conv1D_{}_{}'.format(
                                     num_filters,
                                     filter_width))(embedded_sequence)
            pool = GlobalMaxPooling1D(name='MaxPoolingOverTime_{}_{}'.format(
                num_filters, filter_width))(conv)
            convolution_output.append(pool)
        x = Concatenate()(convolution_output)
        for fl in fully_connected_layers:
            x = Dense(fl, activation='selu',
                      kernel_initializer='lecun_normal')(x)
            x = AlphaDropout(0.5)(x)

        output = Dense(self.class_len, activation='sigmoid')(x)
        model = Model(inputs=input, outputs=output)
        return model
Esempio n. 26
0
 def testOutput(self):
     with self.assertRaises(ValueError) as cm:
     	self.classifier.add(Convolution1D(64,0,padding="same",input_shape=(32,32,1),activation='relu'))
     print(cm.expected)
     with self.assertRaises(ValueError) as cm:
     	self.classifier.add(Convolution2D(64,0,padding="same",input_shape=(32,32,1),activation='relu'))
     print(cm.expected)
     with self.assertRaises(ValueError) as cm:
     	self.classifier.add(Convolution3D(64,0,padding="same",input_shape=(32,32,1),activation='relu'))
     print(cm.expected)
     with self.assertRaises(ValueError) as cm:
     	self.classifier.add(Conv2DTranspose(64,0,padding="same",input_shape=(32,32,1),activation='relu'))
     print(cm.expected)
     with self.assertRaises(ValueError) as cm:
     	self.classifier.add(Conv3DTranspose(64,0,padding="same",input_shape=(32,32,1),activation='relu'))
     print(cm.expected)
     with self.assertRaises(ValueError) as cm:
     	self.classifier.add(SeparableConv1D(64,0,padding="same",input_shape=(32,32,1),activation='relu'))
     print(cm.expected)
     with self.assertRaises(ValueError) as cm:
     	self.classifier.add(SeparableConv2D(64,0,padding="same",input_shape=(32,32,1),activation='relu'))
     print(cm.expected)
     with self.assertRaises(ValueError) as cm:
     	self.classifier.add(DepthwiseConv2D(0,padding="same",input_shape=(32,32,1),activation='relu'))
     print(cm.expected)
Esempio n. 27
0
    def fit(self, train_x, train_y):
        # It seems hard to pass the tokenizer to the model here to calculate the vocab_size
        # Therefore directly build the vectorization inside model
        max_features = 20000
        maxlen = 50
        self.tokenizer = tokenizer = Tokenizer(num_words=max_features)
        tokenizer.fit_on_texts(list(train_x))
        list_tokenized_train = tokenizer.texts_to_sequences(train_x)

        vectorized_train_x = pad_sequences(list_tokenized_train,
                                           maxlen=maxlen,
                                           padding='post')
        vocab_size = len(tokenizer.word_index)

        for idx, cls in enumerate(self.classes):
            print("Building Model")

            model = tf.keras.models.Sequential()
            model.add(Embedding(vocab_size + 1, 100, input_length=50))
            model.add(Convolution1D(256, 5, padding='same'))
            model.add(MaxPooling1D(3, 3, padding='same'))
            model.add(Convolution1D(128, 5, padding='same'))
            model.add(MaxPooling1D(3, 3, padding='same'))
            model.add(Convolution1D(64, 3, padding='same'))
            model.add(Flatten())
            model.add(Dropout(0.1))
            model.add(BatchNormalization())
            model.add(Dense(256, activation='relu'))
            model.add(Dropout(0.1))
            model.add(Dense(2, activation='softmax'))
            model.compile(optimizer='adam',
                          loss='categorical_crossentropy',
                          metrics=['accuracy'])

            self.models[cls] = model

            class_labels = train_y[:, idx]
            # This is recommended by one of the error returned.
            class_labels_binarized_the_way_keras_wants = to_categorical(
                class_labels)
            print("fitting model")

            self.models[cls].fit(vectorized_train_x,
                                 class_labels_binarized_the_way_keras_wants,
                                 epochs=5,
                                 batch_size=800)
            print("done fitting")
Esempio n. 28
0
def cnn_1_conv_2_fcc(win_len, regression=False):

    # =================  Heuristic-based efficient architectures (win_len=3000) =================
    # - conv: Conv1D
    # - mp: MaxPooling1D
    # - dp: Dropout

    # >>> conv[32@50]_mp[5@15]_fcc[128]

    # CNN
    model = Sequential()
    model.add(
        Convolution1D(activation="relu",
                      input_shape=(win_len, 4),
                      padding="valid",
                      strides=1,
                      filters=32,
                      kernel_size=50))  #starting default: 30, 60 (ok)
    model.add(MaxPooling1D(strides=5, pool_size=50))

    model.add(
        Convolution1D(activation="relu",
                      input_shape=(win_len, 4),
                      padding="valid",
                      strides=1,
                      filters=64,
                      kernel_size=50))  #starting default: 30, 60 (ok)
    model.add(MaxPooling1D(strides=5, pool_size=50))

    #model.add(Convolution1D(activation="relu",
    #			input_shape=(win_len, 4),
    #			padding="valid", strides=1,
    #			filters=32, kernel_size=40)) #starting default: 30, 60 (ok)
    #model.add(MaxPooling1D(strides=5, pool_size=40))

    # FCC
    model.add(Flatten())
    model.add(Dense(256, activation='relu'))
    #model.add(Dense(128, activation='relu'))
    #model.add(Dropout(0.2))

    if regression:
        model.add(Dense(1, kernel_initializer='normal', activation='linear'))
    else:
        model.add(Dense(2, activation='softmax'))

    return model
Esempio n. 29
0
 def f(input_):
     residual = input_
     tanh_out = Convolution1D(n_atrous_filters,
                              atrous_filter_size,
                              dilation_rate=atrous_rate,
                              padding='same',
                              activation='tanh')(input_)
     sigmoid_out = Convolution1D(n_atrous_filters,
                                 atrous_filter_size,
                                 dilation_rate=atrous_rate,
                                 padding='same',
                                 activation='sigmoid')(input_)
     merged = Multiply()([tanh_out, sigmoid_out])
     skip_out = Convolution1D(1, 1, activation='relu',
                              padding='same')(merged)
     out = Add()([skip_out, residual])
     return out, skip_out
def identity_block(X, filters, kernel_size=5, dropout=0.1, pool_size=2):
    img_1 = Convolution1D(filters,
                          kernel_size=kernel_size,
                          activation='relu',
                          padding='same')(X)
    img_1 = Convolution1D(filters,
                          kernel_size=kernel_size,
                          activation='relu',
                          padding='same')(img_1)
    img_1 = Convolution1D(filters,
                          kernel_size=kernel_size,
                          activation='relu',
                          padding='same')(img_1)
    img_1 = Add()([X, img_1])
    img_1 = Activation('relu')(img_1)
    img_1 = Dropout(rate=dropout)(img_1)
    return img_1