Esempio n. 1
0
    def build_model_CNN(self):
        ''' CNN '''

        model = Sequential()
        embedding_layer = Embedding(self.vocab_length,
                                    300,
                                    weights=[self.embedding_matrix],
                                    input_length=self.length_long_sentence,
                                    trainable=False)
        model.add(embedding_layer)
        model.add(
            Conv1D(filters=150,
                   kernel_regularizer=l2(0.01),
                   kernel_size=5,
                   strides=1,
                   padding='valid'))
        model.add(MaxPooling1D(2, padding='valid'))
        model.add(
            Conv1D(filters=150,
                   kernel_regularizer=l2(0.01),
                   kernel_size=5,
                   strides=1,
                   padding='valid'))
        model.add(MaxPooling1D(2, padding='valid'))
        model.add(Flatten())
        model.add(Dense(80, kernel_regularizer=l2(0.01), activation='relu'))
        model.add(Dense(40, kernel_regularizer=l2(0.01), activation='relu'))
        model.add(Dense(20, kernel_regularizer=l2(0.01), activation='relu'))
        model.add(Dense(2, activation='sigmoid'))
        model.compile(loss='binary_crossentropy',
                      optimizer=RMSprop(lr=0.001),
                      metrics=['accuracy'])
        model.summary()
def cnn_trans(local_steps, global_steps, n_features, lr, loc_f1, loc_k1,
              loc_p1, glob_f1, glob_k1, glob_p1, glob_f2, glob_k2, glob_p2,
              n_neurons1, n_neurons2, n_neurons3, drop1, drop2, num_heads,
              key_dim):
    inputA = Input(shape=(local_steps, n_features))
    inputB = Input(shape=(global_steps, n_features))
    local = Conv1D(loc_f1, loc_k1, padding='same', activation='relu')(inputA)
    local = Conv1D(loc_f1, loc_k1, padding='same', activation='relu')(local)
    local = MaxPooling1D(loc_p1)(local)

    glob = Conv1D(glob_f1, glob_k1, padding='same', activation='relu')(inputB)
    glob = Conv1D(glob_f1, glob_k1, padding='same', activation='relu')(glob)
    glob = MaxPooling1D(glob_p1)(glob)
    glob = Conv1D(glob_f2, glob_k2, padding='same', activation='relu')(glob)
    glob = MaxPooling1D(glob_p2)(glob)

    joined = MultiHeadAttention(num_heads, key_dim,
                                kernel_regularizer='l2')(local, glob)
    joined = Flatten()(joined)
    joined = Dense(n_neurons1, activation='relu',
                   kernel_regularizer='l2')(joined)
    joined = Dropout(drop1)(joined)
    joined = Dense(n_neurons2, activation='relu',
                   kernel_regularizer=None)(joined)
    joined = Dropout(drop2)(joined)
    joined = Dense(n_neurons3, activation='relu',
                   kernel_regularizer=None)(joined)
    out = Dense(1, activation='sigmoid')(joined)
    model = Model(inputs=[inputA, inputB], outputs=out)
    opt = Adam(lr=lr)
    model.compile(optimizer=opt, loss='binary_crossentropy', metrics=metrics)
    return model
    def encode_embedding_input(input_layer,
                               type='subject',
                               reduce_size=False,
                               reduce_size_n=32
                               ):

        conv1_size = CONV_SIZES[type][0]

        if noise:
            input_layer = GaussianNoise(stddev=.001)(input_layer)

        if normalization:
            input_layer = BatchNormalization()(input_layer)

        if reduce_size:
            input_layer = Dense(reduce_size_n, activation="sigmoid")(input_layer)

        conv1 = Conv1D(conv1_size, (2,), activation=mish, padding='same')(input_layer)
        pool1 = MaxPooling1D((2,), padding='same')(conv1)

        if type in ['subject', 'object']:
            return Flatten()(pool1)

        conv2_size = CONV_SIZES[type][1]
        conv2 = Conv1D(conv2_size, (2,), activation=mish, padding='same')(pool1)
        pool2 = MaxPooling1D((2,), padding='same')(conv2)
        return Flatten()(pool2)
Esempio n. 4
0
def create_2_layer_CNN():
    model = Sequential()
    model.add(
        Conv1D(nodes,
               kernel_size=3,
               strides=1,
               activation='relu',
               input_shape=input_shape))
    model.add(BatchNormalization())
    model.add(MaxPooling1D(pool_size=2))
    model.add(Dropout(dropRate))
    #
    model.add(Conv1D(32, 3, activation='relu'))
    model.add(BatchNormalization())
    model.add(MaxPooling1D(pool_size=2))
    model.add(Dropout(dropRate))

    model.add(Flatten())
    model.add(Dense(256, activation='relu'))
    # model.add(BatchNormalization())

    model.add(Dense(128, activation='relu'))
    # model.add(BatchNormalization())

    model.add(Dense(num_classes, activation='softmax'))

    model.compile(loss=keras.losses.categorical_crossentropy,
                  optimizer=keras.optimizers.Adam(),
                  metrics=['accuracy'])
    return model
Esempio n. 5
0
def build_sep_cnn_lstm(n_steps,
                       n_features,
                       lr,
                       n_filters,
                       n_filters2,
                       k_size,
                       p_size,
                       n_neurons1,
                       n_neurons2,
                       n_neurons3,
                       drop1,
                       drop2,
                       reg='None'):
    inp = Input(shape=(n_steps, n_features))
    rnn = LSTM(n_neurons1, return_sequences=True, kernel_regularizer=reg)(inp)
    rnn = LSTM(n_neurons2)(rnn)
    rnn = Dropout(drop1)(rnn)
    cnn = Conv1D(n_filters, k_size, padding='same', activation='relu')(inp)
    cnn = MaxPooling1D(p_size)(cnn)
    cnn = Conv1D(n_filters2,
                 k_size,
                 padding='same',
                 activation='relu',
                 kernel_regularizer=reg)(cnn)
    cnn = MaxPooling1D(p_size)(cnn)
    cnn = Flatten()(cnn)
    cnn = Dropout(drop2)(cnn)
    rnn = concatenate([rnn, cnn])
    rnn = Dense(n_neurons3, activation='relu', kernel_regularizer=reg)(rnn)
    out = Dense(1, activation='sigmoid')(rnn)
    model = Model(inp, out)
    opt = Adam(lr=lr)
    model.compile(optimizer=opt, loss='binary_crossentropy', metrics=metrics)
    return model
Esempio n. 6
0
def build_cnn_lstm_attn2(local_steps, global_steps, n_features, lr, loc_f1,
                         loc_k1, loc_p1, loc_drop1, glob_f1, glob_k1, glob_p1,
                         glob_f2, glob_k2, glob_p2, glob_drop1, n_neurons1,
                         n_neurons2, n_neurons3, n_neurons4, n_neurons5, drop1,
                         drop2, reg):
    inputA = Input(shape=(local_steps, n_features))
    inputB = Input(shape=(global_steps, n_features))
    local = Conv1D(loc_f1, loc_k1, padding='same', activation='relu')(inputA)
    local = Conv1D(loc_f1, loc_k1, padding='same', activation='relu')(local)
    local = MaxPooling1D(loc_p1)(local)

    glob = Conv1D(glob_f1, glob_k1, padding='same', activation='relu')(inputB)
    glob = Conv1D(glob_f1, glob_k1, padding='same', activation='relu')(glob)
    glob = MaxPooling1D(glob_p1)(glob)
    glob = Conv1D(glob_f2, glob_k2, padding='same', activation='relu')(glob)
    glob = Conv1D(glob_f2, glob_k2, padding='same', activation='relu')(glob)
    glob = MaxPooling1D(glob_p2)(glob)

    joined = Attention()([local, glob])
    #joined = LSTM(n_neurons1, kernel_regularizer=l2(reg), return_sequences=True)(joined)
    joined = LSTM(n_neurons2, kernel_regularizer=l2(reg))(joined)
    joined = Dense(n_neurons3, activation='relu')(joined)
    joined = Dropout(drop1)(joined)
    joined = Dense(n_neurons4, activation='relu',
                   kernel_regularizer=None)(joined)
    joined = Dropout(drop2)(joined)
    joined = Dense(n_neurons5, activation='relu',
                   kernel_regularizer=None)(joined)
    out = Dense(1, activation='sigmoid')(joined)
    model = Model(inputs=[inputA, inputB], outputs=out)
    opt = Adam(lr=lr)
    model.compile(optimizer=opt, loss='binary_crossentropy', metrics=metrics)
    return model
def _cnn(x_shape):
    """https://github.com/lykaust15/Deep_learning_examples/blob/master/
    8.RBP_prediction_CNN/RBP_binding_site_prediction.ipynb"""
    model = Sequential([
        Conv1D(128, (10, ),
               activation='relu',
               input_shape=(x_shape[1], x_shape[2])),
        Dropout(0.25),
        MaxPooling1D(pool_size=(3, ), strides=(1, )),
        Conv1D(128, (10, ), activation='relu', padding='same'),
        Dropout(0.25),
        MaxPooling1D(pool_size=(3, ), strides=(1, )),
        Conv1D(256, (5, ), activation='relu', padding='same'),
        Dropout(0.25),
        GlobalAveragePooling1D(),
        Dropout(0.25),
        Dense(128, activation='relu'),
        Dropout(0.5),
        Dense(1, activation='sigmoid')
    ])

    model.compile(loss='binary_crossentropy',
                  optimizer=Adadelta(),
                  metrics=['accuracy'])

    return model
Esempio n. 8
0
def build_model() -> Model:
    """
    This is the code for the "NYU" model from "Character Level Based Detection of DGA Domain Names"
    (http://faculty.washington.edu/mdecock/papers/byu2018a.pdf)
    which is itself adapted from X. Zhang, J. Zhao, and Y. LeCun, “Character-level convolutional networks for text
    classification,” in Advances in Neural Information Processing Systems, vol. 28, 2015, pp. 649–657.
    """
    # a constant here representing the maximum expected length of a domain.
    max_length = 75
    main_input = Input(shape=(max_length,), dtype="int32", name="main_input")
    embedding = Embedding(input_dim=128, output_dim=128, input_length=max_length)(main_input)
    conv1 = Conv1D(filters=128, kernel_size=3, padding="same", strides=1)(embedding)
    thresh1 = ThresholdedReLU(1e-6)(conv1)
    max_pool1 = MaxPooling1D(pool_size=2, padding="same")(thresh1)
    conv2 = Conv1D(filters=128, kernel_size=2, padding="same", strides=1)(max_pool1)
    thresh2 = ThresholdedReLU(1e-6)(conv2)
    max_pool2 = MaxPooling1D(pool_size=2, padding="same")(thresh2)
    flatten = Flatten()(max_pool2)
    fc = Dense(64)(flatten)
    thresh_fc = ThresholdedReLU(1e-6)(fc)
    drop = Dropout(0.5)(thresh_fc)
    output = Dense(1, activation="sigmoid")(drop)
    model = Model(inputs=main_input, outputs=output)
    precision = as_keras_metric(tf.metrics.precision)
    recall = as_keras_metric(tf.metrics.recall)
    model.compile(
        loss="binary_crossentropy",
        optimizer="adam",
        metrics=["mae", "mean_squared_error", "acc", precision, recall],
    )
    return model
def build_astronet2(len_local, len_global, n_features, drop1, lr):
    inputA = Input(shape=(len_local, n_features))
    inputB = Input(shape=(len_global, n_features))

    loc = Conv1D(16, 5, activation='relu', padding='same')(inputA)
    loc = Conv1D(16, 5, activation='relu', padding='same')(loc)
    loc = MaxPooling1D(5, strides=2)(loc)
    loc = Conv1D(32, 5, activation='relu', padding='same')(loc)
    loc = Conv1D(32, 5, activation='relu', padding='same')(loc)
    loc = MaxPooling1D(5, strides=2)(loc)
    loc = Flatten()(loc)

    glob = Conv1D(16, 5, activation='relu', padding='same')(inputB)
    glob = Conv1D(16, 5, activation='relu', padding='same')(glob)
    glob = MaxPooling1D(5, strides=2)(glob)
    glob = Conv1D(32, 5, activation='relu', padding='same')(glob)
    glob = Conv1D(32, 5, activation='relu', padding='same')(glob)
    glob = MaxPooling1D(5, strides=2)(glob)
    glob = Conv1D(64, 5, activation='relu', padding='same')(glob)
    glob = Conv1D(64, 5, activation='relu', padding='same')(glob)
    glob = MaxPooling1D(5, strides=2)(glob)
    glob = Flatten()(glob)
    joined = concatenate([loc, glob])
    joined = Dense(64, activation='relu')(joined)
    joined = Dropout(drop1)(joined)
    joined = Dense(32, activation='relu')(joined)
    out = Dense(1, activation='sigmoid')(joined)
    model = Model(inputs=[inputA, inputB], outputs=out)
    opt = Adam(lr=lr, epsilon=(10**(-8)))
    model.compile(optimizer=opt, loss='binary_crossentropy', metrics=metrics)
    return model
    def encode_embedding_input(input_layer):
        conv1_size, conv2_size = (CONV_SIZES[2], CONV_SIZES[1])

        input_layer = GaussianNoise(stddev=.1)(input_layer)
        conv1 = Conv1D(conv1_size, (2,), activation='mish', padding='same')(input_layer)
        pool1 = MaxPooling1D((2,), padding='same')(conv1)
        conv2 = Conv1D(conv2_size, (2,), activation='mish', padding='same')(pool1)
        pool2 = MaxPooling1D((2,), padding='same')(conv2)
        return Flatten()(pool2)
Esempio n. 11
0
    def graph(self,
              type='shallow',
              filter_size=3,
              filter_num=100,
              num_filter_block=None,
              dropout_rate=0):
        def ConvBlock(model, filters, kernel_size=filter_size):
            model.add(BatchNormalization())
            model.add(
                Conv1D(filters=filters,
                       kernel_size=kernel_size,
                       padding='same',
                       activation='relu'))
            model.add(BatchNormalization())
            model.add(
                Conv1D(filters=filters,
                       kernel_size=kernel_size,
                       padding='same',
                       activation='relu'))

        if type == 'shallow' or type == 'scnn':
            self.model.add(
                Conv1D(kernel_size=filter_size,
                       strides=1,
                       filters=filter_num,
                       padding='valid',
                       activation='relu'))
            self.model.add(
                MaxPooling1D(pool_size=self.data_sequence - filter_size,
                             strides=1,
                             padding='valid'))
            self.model.add(Flatten())
            self.model.add(Dropout(rate=dropout_rate))
            self.model.add(Dense(self.num_labels, activation='softmax'))

        elif type == 'deep' or type == 'dcnn':
            self.model.add(
                Conv1D(kernel_size=filter_size,
                       filters=filter_num,
                       padding='same',
                       activation='relu'))

            cur_filter_num = filter_num
            for _, num_blocks in enumerate(num_filter_block):
                for i in range(num_blocks):
                    ConvBlock(model=self.model,
                              kernel_size=filter_size,
                              filters=cur_filter_num)
                self.model.add(MaxPooling1D(pool_size=3, strides=2))
                cur_filter_num *= 2
            self.model.add(Flatten())

            self.model.add(Dense(2048, activation='relu', name='Dense1'))
            self.model.add(Dense(2048, activation='relu', name='Dense2'))
            self.model.add(
                Dense(self.num_labels, activation='softmax', name='Output'))
            self.model.summary()
Esempio n. 12
0
def create_convolutional_encoder(input_shape):

    reshape = Reshape(target_shape=(INPUT_LENGTH, 1), input_shape=(INPUT_LENGTH,))(input_shape)
    conv1 = Conv1D(filters=32, kernel_size=30, activation='relu', input_shape=(INPUT_LENGTH, 1))(reshape)
    conv1 = MaxPooling1D(pool_size=10)(conv1)
    conv1 = BatchNormalization()(conv1)
    conv2 = Conv1D(filters=64, kernel_size=30, activation='relu')(conv1)
    conv2 = MaxPooling1D(pool_size=10)(conv2)
    conv2 = BatchNormalization()(conv2)

    return conv2
    def create(self):
        """ Creates CNN model.

        Returns
        -------
        model: Model
            A Convolutinal Neural Network model
        """

        model = Sequential()
        model.add(
            Reshape((self.input_shape[0], 1), input_shape=self.input_shape))

        if isinstance(self.filters, int):
            model.add(
                Conv1D(self.filters,
                       self.kernel_size,
                       strides=self.strides,
                       padding='valid',
                       activation='relu',
                       kernel_regularizer=self.kernel_regularizer))
            if self.pool > 0:
                model.add(MaxPooling1D(self.pool))
        else:
            for c, filter in enumerate(self.filters, start=1):
                model.add(
                    Conv1D(filter,
                           self.kernel_size,
                           strides=self.strides,
                           padding='valid',
                           kernel_regularizer=self.kernel_regularizer))
                if self.batch_normalization:
                    model.add(BatchNormalization())
                    model.add(Activation('relu'))
                elif self.dropout:
                    model.add(Activation('relu'))
                    model.add(Dropout(self.dropout_rate))
                else:
                    model.add(Activation('relu'))

                if self.pool > 0 and len(self.filters) != c:
                    model.add(MaxPooling1D(self.pool))

        model.add(GlobalAveragePooling1D())
        if self.include_top:
            model.add(
                Dense(self.num_classes,
                      activation=self.last_activation,
                      kernel_regularizer=self.kernel_regularizer))
        if self.summary:
            model.summary()
        return model
Esempio n. 14
0
def get_cnn_model(input_shape):
    """
    Creates a Sequential model with Conv and MaxPooling layers
    Prints summary of model
    :param input_shape: tuple
    :return: Sequential
    """
    model = Sequential()
    model.add(Conv1D(64, 15, activation='relu', input_shape=input_shape))
    model.add(MaxPooling1D(4))
    model.add(Conv1D(128, 15, activation='relu'))
    model.add(MaxPooling1D(4))
    return model
    def encode_embedding_input(input_layer, large=False):
        conv1_size, conv2_size = (CONV_SIZES[2], CONV_SIZES[1]) if large else (CONV_SIZES[1], CONV_SIZES[0])

        if noise:
            input_layer = GaussianNoise(stddev=.001)(input_layer)
        
        if normalization:
            input_layer = BatchNormalization()(input_layer)
        
        conv1 = Conv1D(conv1_size, (2,), activation='mish', padding='same')(input_layer)
        pool1 = MaxPooling1D((2,), padding='same')(conv1)
        conv2 = Conv1D(conv2_size, (2,), activation='mish', padding='same')(pool1)
        pool2 = MaxPooling1D((2,), padding='same')(conv2)
        return Flatten()(pool2)
def _cnnpar(x_shape,
            conv_filters1=50,
            kernel_sizes=None,
            maps_per_kernel=2,
            dropout1=0.1,
            dropout2=0.5,
            pool_size=3,
            conv_filters2=150,
            dense_units=60,
            lr=0.001):
    """https://github.com/emzodls/neuripp/blob/master/models.py"""
    if kernel_sizes is None:
        kernel_sizes = [3, 4, 5]

    inpt = Input(shape=(x_shape[1], x_shape[2]))
    kernel_sizes = kernel_sizes
    maps_per_kernel = maps_per_kernel
    convs = []

    for kernel_size in kernel_sizes:

        for map_n in range(maps_per_kernel):
            conv = Conv1D(filters=conv_filters1,
                          kernel_size=kernel_size,
                          padding='valid',
                          activation='relu',
                          kernel_initializer='glorot_normal',
                          strides=1)(inpt)
            conv_drop = Dropout(dropout1)(conv)
            max_pool = MaxPooling1D(pool_size)(conv_drop)
            convs.append(max_pool)

    merge = keras.layers.Concatenate(axis=1)(convs)
    mix = Conv1D(filters=conv_filters2,
                 kernel_size=kernel_sizes[0],
                 padding='valid',
                 activation='relu',
                 kernel_initializer='glorot_normal',
                 strides=1)(merge)
    max_pool = MaxPooling1D(3)(mix)
    flatten = Flatten()(max_pool)
    dense = Dense(dense_units, activation='relu')(flatten)
    drop = Dropout(dropout2)(dense)
    output = Dense(2, activation='sigmoid')(drop)
    model = keras.Model(inputs=inpt, outputs=output)
    model.compile(loss='sparse_categorical_crossentropy',
                  optimizer=keras.optimizers.Adam(lr),
                  metrics=['accuracy'])

    return model
Esempio n. 17
0
def build_model(embedding_weights,
                embedding_dim,
                num_words,
                input_length,
                num_classes=20):
    """ Builds a Keras model. It sets embeddings layer trainable to False
    to keep the embeddings fixed

    Parameters
    ----------
    embedding_weights: np.ndarray
        A numpy array contains embedding weights
    embedding_dim: int
        Embeddings dimension
    num_words: int
        Number of words in the dataset
    input_length: int
        Maximum sequence length
    num_classes: int
        Number of classes in the dataset

    Returns
    -------
    model: Model
        A keras compiled model instance
    """

    embedding = Embedding(num_words,
                          embedding_dim,
                          embeddings_initializer=Constant(embedding_weights),
                          input_length=input_length,
                          trainable=False)

    seq_input = Input(shape=(input_length, ), dtype='int32')
    embedded_sequences = embedding(seq_input)
    x = Conv1D(128, 5, activation='relu')(embedded_sequences)
    x = MaxPooling1D(5)(x)
    x = Conv1D(128, 5, activation='relu')(x)
    x = MaxPooling1D(5)(x)
    x = Conv1D(128, 5, activation='relu')(x)
    x = GlobalMaxPooling1D()(x)
    x = Dense(128, activation='relu')(x)
    preds = Dense(num_classes, activation='softmax')(x)

    model = Model(seq_input, preds)
    model.compile(loss='categorical_crossentropy',
                  optimizer='rmsprop',
                  metrics=['acc'])

    return model
Esempio n. 18
0
    def get_cnn_rnn_model(self, pre_embeddings, dp_rate=0.0, use_lstm=False, filter_sizes=[2, 3, 4]):
        """
        first CNN to generate a vector, then apply RNN on the vector
        :param pre_embeddings:
        :param dp_rate: drop out rate
        :param use_lstm: utilize LSTM or GRU unit
        :return: the model
        """
        # Embedding part can try multichannel as same as origin paper
        embedding_layer = Embedding(self.max_features,  # 字典长度
                                    self.embedding_dims,  # 词向量维度
                                    weights=[pre_embeddings],  # 预训练的词向量
                                    input_length=self.maxlen,  # 每句话的最大长度
                                    trainable=False  # 是否在训练过程中更新词向量
                                    )
        input = Input((self.maxlen,))
        embedding = embedding_layer(input)

        # add a convolution layer
        c = Conv1D(NUM_FILTERS, 3, padding='valid', activation='relu')(embedding)
        cc = MaxPooling1D()(c)

        if dp_rate > 0:
            # 加dropout层
            cc = Dropout(dp_rate)(cc)

        if use_lstm:
            x = CuDNNLSTM(RNN_DIM)(cc)
        else:
            x = CuDNNGRU(RNN_DIM)(cc)

        output = Dense(self.class_num, activation=self.last_activation)(x)
        model = Model(inputs=input, outputs=output)

        return model
def _rnncnn(x_shape, lr, conv_filters, kernel_size, pool_size, lstm_units,
            dropout1, dropout2):
    """https://github.com/lykaust15/Deep_learning_examples/blob/master/2.CNN_
    RNN_sequence_analysis/DNA_sequence_function_prediction.ipynb"""
    model = Sequential([
        Convolution1D(
            activation='relu',
            input_shape=(x_shape[1], x_shape[2]),
            padding='valid',
            filters=conv_filters,
            kernel_size=kernel_size,
        ),
        MaxPooling1D(pool_size=pool_size, strides=13),
        Dropout(dropout1),
        Bidirectional(LSTM(lstm_units, return_sequences=True), ),
        Dropout(dropout2),
        Flatten(),
        Dense(input_dim=75 * 640, units=925),
        Activation('relu'),
        Dense(input_dim=925, units=1),
        Activation('sigmoid'),
    ])

    model.compile(loss='binary_crossentropy',
                  optimizer=keras.optimizers.Adam(lr),
                  metrics=['acc'])

    return model
Esempio n. 20
0
def create_model(sequence_length):
    input_shape = (sequence_length, embedding_dim)

    model_input = Input(shape=input_shape)

    z = model_input

    z = Dropout(dropout_prob[0])(z)

    # Convolutional block
    conv_blocks = []
    for sz in filter_sizes:
        conv = Convolution1D(filters=num_filters,
                     kernel_size=sz,
                     padding="valid",
                     activation="relu",
                     strides=1)(z)
        conv = MaxPooling1D(pool_size=2)(conv)
        conv = Flatten()(conv)
        conv_blocks.append(conv)

    z = Concatenate()(conv_blocks) if len(conv_blocks) > 1 else conv_blocks[0]

    z = Dropout(dropout_prob[1])(z)
    z = Dense(hidden_dims, activation="relu")(z)
    model_output = Dense(1, activation="sigmoid")(z)

    model = Model(model_input, model_output)
    model.compile(loss="binary_crossentropy", optimizer="adam", metrics=["accuracy"])

    print(model.summary())
    return model
    def build(self):
        n_steps, n_length = 4, 32

        self.train_X = self.train_X.reshape(
            (self.train_X.shape[0], n_steps, n_length, self.n_features)) # self.n_features = 6
        self.test_X = self.test_X.reshape(
            (self.test_X.shape[0], n_steps, n_length, self.n_features))

        model = Sequential()
        model.add(TimeDistributed(Conv1D(filters=64, kernel_size=3,
                                         activation='relu'), input_shape=(None, n_length, self.n_features)))
        model.add(TimeDistributed(
            Conv1D(filters=16, kernel_size=3, activation='relu')))
        model.add(TimeDistributed(Dropout(0.5)))
        model.add(TimeDistributed(MaxPooling1D(pool_size=2)))
        model.add(TimeDistributed(Flatten()))
        model.add(LSTM(100))
        model.add(Dropout(0.5))
        model.add(Dense(100, activation='relu'))
        model.add(Dense(self.n_outputs, activation='softmax'))
        model.compile(loss='categorical_crossentropy',
                           optimizer='adam', metrics=['accuracy'])

        # print(model.summary())
        # input()

        super().build(model)
Esempio n. 22
0
def ConvolutionalNet(vocabulary_size, embedding_dimension, input_length, embedding_weights=None):
    
    model = Sequential()
    if embedding_weights is None:
        model.add(Embedding(vocabulary_size, embedding_dimension, input_length=input_length, trainable=False))
    else:
        model.add(Embedding(vocabulary_size, embedding_dimension, input_length=input_length, weights=[embedding_weights], trainable=False))

    model.add(Convolution1D(32, 2, kernel_regularizer=l2(0.005)))
    model.add(BatchNormalization())
    model.add(Activation(activations.relu))

    model.add(Convolution1D(32, 2, kernel_regularizer=l2(0.001)))
    model.add(BatchNormalization())
    model.add(Activation(activations.relu))

    model.add(Convolution1D(32, 2, kernel_regularizer=l2(0.001)))
    model.add(BatchNormalization())
    model.add(Activation(activations.relu))

    model.add(MaxPooling1D(17))
    model.add(Flatten())

    model.add(Dense(1, use_bias=True, kernel_regularizer=l2(0.001)))
    model.add(BatchNormalization())
    model.add(Activation(activations.sigmoid))

    return model
def model_v2(top_layer_units):
    model = Sequential()

    model.add(
        TimeDistributed(Conv1D(filters=16,
                               kernel_size=4,
                               padding='same',
                               activation=tf.nn.relu,
                               data_format='channels_last'),
                        input_shape=(NUM_MFCC, NUM_FRAMES, 1)))

    model.add(
        TimeDistributed(
            Conv1D(filters=8,
                   kernel_size=2,
                   padding='same',
                   activation=tf.nn.relu)))
    model.add(TimeDistributed(MaxPooling1D(pool_size=2)))
    model.add(TimeDistributed(Flatten()))
    model.add(LSTM(50, return_sequences=True))
    model.add(Dropout(0.3))
    model.add(Flatten())
    model.add(Dense(units=512, activation=tf.nn.tanh))
    model.add(Dense(units=256, activation=tf.nn.tanh))
    model.add(
        Dense(units=top_layer_units,
              activation=tf.nn.softmax,
              name='top_layer'))
    return model
Esempio n. 24
0
def evaluate_model(trainX, trainy, testX, testy):
    global best_accuracy
    verbose, epochs, batch_size = 0, 10, 32
    # n_timesteps, n_features, n_outputs = trainX.shape[1], trainX.shape[2], trainy.shape[1]
    model = Sequential()
    #     model.add(Conv1D(filters=64, kernel_size=3, activation='relu', input_shape=(n_timesteps,n_features)))  (x_train.shape[1],1)
    model.add(
        Conv1D(filters=64,
               kernel_size=3,
               activation='relu',
               input_shape=(trainX.shape[1], 1)))
    model.add(Conv1D(filters=64, kernel_size=3, activation='relu'))
    model.add(Dropout(0.5))
    model.add(MaxPooling1D(pool_size=2))
    model.add(Flatten())
    model.add(Dense(100, activation='relu'))
    model.add(Dense(trainy.shape[1], activation='softmax'))
    model.compile(loss='categorical_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])
    # fit network
    model.fit(trainX, trainy, epochs=epochs, batch_size=batch_size, verbose=1)
    # evaluate model
    _, accuracy = model.evaluate(testX,
                                 testy,
                                 batch_size=batch_size,
                                 verbose=0)
    if accuracy > best_accuracy:
        best_accuracy = accuracy
        model.save(BestModleFilePath)
    return accuracy
Esempio n. 25
0
    def _create(self):
        EMBEDDING_DIMS = 50
        GRU_DIMS = 64
        DROPOUT_FC = 0.2
        DROPOUT_GRU = 0.2
        DROPOUT_EMB = 0.2
        # Convolution
        kernel_size = 3
        filters = 32
        pool_size = 2

        print('Creating Model...')
        # EMBEDDING
        input_play = Input(shape=(SEQ_LEN, ), dtype='int32', name='input_play')

        # Keras requires the total_dim to have 2 more dimension for "other" class
        embedding_layer = Embedding(input_dim=(EMBEDDING_CLASSES + 2),
                                    output_dim=EMBEDDING_DIMS,
                                    input_length=SEQ_LEN,
                                    mask_zero=False,
                                    trainable=True,
                                    name='emb')(input_play)
        drop_emb = Dropout(DROPOUT_EMB, name='dropout_emb')(embedding_layer)

        conv = Conv1D(filters,
                      kernel_size,
                      padding='same',
                      activation='relu',
                      strides=1,
                      name='conv1')(drop_emb)

        maxpool = MaxPooling1D(pool_size=pool_size, name='maxpool1')(conv)

        gru = GRU(GRU_DIMS, dropout=DROPOUT_GRU, name='gru1')(maxpool)

        # TIME_OF_DAY OHE
        ohe1 = Input(shape=(TOTAL_TOD_BINS, ), name='time_of_day_ohe')

        # DAY_OF_WEEK OHE
        ohe2 = Input(shape=(TOTAL_DOW_BINS, ), name='day_of_wk_ohe')

        # MERGE LAYERS
        print('Merging features...')

        merged = concatenate([gru, ohe1, ohe2], axis=1, name='concat')

        # FULLY CONNECTED LAYERS
        dense = Dense(128, activation='relu', name='main_dense')(merged)
        bn = BatchNormalization(name='bn_fc1')(dense)
        drop = Dropout(DROPOUT_FC, name='dropout1')(bn)
        dense = Dense(64, activation='relu', name='dense2')(drop)
        drop = Dropout(DROPOUT_FC, name='dropout2')(dense)
        dense = Dense(32, activation='relu', name='dense3')(drop)
        pred = Dense(TARGET_CLASSES, activation='softmax',
                     name='output')(dense)

        self.model = Model(inputs=[input_play, ohe1, ohe2], outputs=[pred])
        print(self.model.summary())

        return self.model
    def build_parametric_model(self, parameters):
        """
            The method creates the model when the an object
            is istantantiated.
        """
        learning_rate = parameters["learning_rate"]

        self.model = Sequential()
        self.model.add(Embedding(self.top_words, self.embedding_dim, input_length=self.max_words))
        self.model.add(Conv1D(filters=32, kernel_size=3, padding='same', activation='relu'))
        self.model.add(MaxPooling1D(pool_size=2))
        self.model.add(Flatten())
        self.model.add(Dense(250, activation='relu'))
        self.model.add(Dense(1, activation='sigmoid'))

        # Setting the embedding matrix as the weights of the Embedding Layer.
        self.model.layers[0].set_weights([self.embedding_matrix])
        self.model.layers[0].trainable = False
        
        self.model.compile(
            loss='binary_crossentropy', 
            optimizer=Adam(learning_rate), 
            metrics=['accuracy'])
        
        self.model.summary()
        
        return
def build_cnn_model(n_steps,
                    n_features,
                    lr,
                    n_filters,
                    k_size,
                    p_size,
                    n_neurons1,
                    n_neurons2,
                    n_neurons3,
                    drop1,
                    reg='None'):
    model = Sequential()
    model.add(
        Conv1D(n_filters,
               k_size,
               input_shape=(n_steps, n_features),
               activation='relu',
               padding='same'))
    model.add(MaxPooling1D(p_size))  # default stride is 1
    model.add(Dropout(drop1))
    model.add(Flatten())
    model.add(Dense(n_neurons1, activation='relu', kernel_regularizer=reg))
    # model.add(Dropout(0.5))
    model.add(Dense(1, activation='sigmoid', kernel_regularizer=reg))
    opt = Adam(lr=lr)
    model.compile(optimizer=opt, loss='binary_crossentropy', metrics='val_acc')
    return model
Esempio n. 28
0
 def create_model(self):
     K.clear_session()
     input0 = Input(shape=(self.c['sentencepad'], self.c['wordvectdim']))
     input1 = Input(shape=(self.c['sentencepad'], self.c['wordvectdim']))
     Convolt_Layer = []
     MaxPool_Layer = []
     Flatten_Layer = []
     for kernel_size, filters in self.c['cnnfilters'].items():
         Convolt_Layer.append(
             Convolution1D(filters=filters,
                           kernel_size=kernel_size,
                           padding='valid',
                           activation=self.c['cnnactivate'],
                           kernel_initializer=self.c['cnninitial']))
         MaxPool_Layer.append(
             MaxPooling1D(pool_size=int(self.c['sentencepad'] -
                                        kernel_size + 1)))
         Flatten_Layer.append(Flatten())
     Convolted_tensor0 = []
     Convolted_tensor1 = []
     for channel in range(len(self.c['cnnfilters'])):
         Convolted_tensor0.append(Convolt_Layer[channel](input0))
         Convolted_tensor1.append(Convolt_Layer[channel](input1))
     MaxPooled_tensor0 = []
     MaxPooled_tensor1 = []
     for channel in range(len(self.c['cnnfilters'])):
         MaxPooled_tensor0.append(MaxPool_Layer[channel](
             Convolted_tensor0[channel]))
         MaxPooled_tensor1.append(MaxPool_Layer[channel](
             Convolted_tensor1[channel]))
     Flattened_tensor0 = []
     Flattened_tensor1 = []
     for channel in range(len(self.c['cnnfilters'])):
         Flattened_tensor0.append(Flatten_Layer[channel](
             MaxPooled_tensor0[channel]))
         Flattened_tensor1.append(Flatten_Layer[channel](
             MaxPooled_tensor1[channel]))
     if len(self.c['cnnfilters']) > 1:
         Flattened_tensor0 = concatenate(Flattened_tensor0)
         Flattened_tensor1 = concatenate(Flattened_tensor1)
     else:
         Flattened_tensor0 = Flattened_tensor0[0]
         Flattened_tensor1 = Flattened_tensor1[0]
     absDifference = Lambda(lambda X: K.abs(X[0] - X[1]))(
         [Flattened_tensor0, Flattened_tensor1])
     mulDifference = multiply([Flattened_tensor0, Flattened_tensor1])
     allDifference = concatenate([absDifference, mulDifference])
     for ilayer, densedimension in enumerate(self.c['densedimension']):
         allDifference = Dense(
             units=int(densedimension),
             activation=self.c['denseactivate'],
             kernel_initializer=self.c['denseinitial'])(allDifference)
     output = Dense(
         name='output',
         units=self.c['num_classes'],
         activation='softmax',
         kernel_initializer=self.c['denseinitial'])(allDifference)
     self.model = Model(inputs=[input0, input1], outputs=output)
     self.model.compile(loss='mean_squared_error',
                        optimizer=self.c['optimizer'])
Esempio n. 29
0
def get_time_series_cnn_model(n_timesteps,
                              n_features,
                              n_outputs,
                              conv_layers=[(64, 3), (64, 3)],
                              dense_size=100,
                              quantized=False,
                              prune_params=None):
    keras_model = KerasModel(model=None,
                             quantized=quantized,
                             prune_params=prune_params)
    graph, session = keras_model.graph, keras_model.session
    print(graph, session)
    with graph.as_default():
        with session.as_default():
            model = tf.keras.Sequential()
            filters, kernel_size = conv_layers[0]
            model.add(
                Conv1D(filters=filters,
                       kernel_size=kernel_size,
                       activation='relu',
                       input_shape=(n_timesteps, n_features)))
            if len(conv_layers) > 1:
                filters, kernel_size = conv_layers[1]
                model.add(
                    Conv1D(filters=filters,
                           kernel_size=kernel_size,
                           activation='relu'))
            model.add(Dropout(0.5))
            model.add(MaxPooling1D(pool_size=2))
            if len(conv_layers) > 2:
                filters, kernel_size = conv_layers[2]
                model.add(
                    Conv1D(filters=filters,
                           kernel_size=kernel_size,
                           activation='relu',
                           input_shape=(n_timesteps, n_features)))
                model.add(MaxPooling1D(pool_size=2))
            model.add(Flatten())
            model.add(Dense(dense_size, activation='relu'))
            model.add(Dense(n_outputs, activation='softmax'))
            if prune_params:
                model = prune.prune_low_magnitude(model, **prune_params)
            model.compile(loss='categorical_crossentropy',
                          optimizer='adam',
                          metrics=['accuracy'])
            keras_model.model = model
            return keras_model
Esempio n. 30
0
def sepcnn_model(blocks,
                 filters,
                 kernel_size,
                 embedding_dim,
                 dropout_rate,
                 pool_size,
                 input_shape,
                 num_tags,
                 num_features,
                 use_pretrained_embedding=False,
                 is_embedding_trainable=True,
                 embedding_matrix=None):

    mod_units, mod_activation = last_layer_units(num_tags)
    model = models.Sequential()

    model.add(
        Embedding(
            input_dim=num_features,
            output_dim=embedding_dim,
            input_length=input_shape[0],
        ))

    for _ in range(blocks - 1):
        model.add(Dropout(rate=dropout_rate))
        model.add(
            SeparableConv1D(filters=filters,
                            kernel_size=kernel_size,
                            activation='relu',
                            bias_initializer='random_uniform',
                            depthwise_initializer='random_uniform',
                            padding='same'))
        model.add(
            SeparableConv1D(filters=filters,
                            kernel_size=kernel_size,
                            activation='relu',
                            bias_initializer='random_uniform',
                            depthwise_initializer='random_uniform',
                            padding='same'))
        model.add(MaxPooling1D(pool_size=pool_size))

    model.add(
        SeparableConv1D(filters=filters * 2,
                        kernel_size=kernel_size,
                        activation='relu',
                        bias_initializer='random_uniform',
                        depthwise_initializer='random_uniform',
                        padding='same'))
    model.add(
        SeparableConv1D(filters=filters * 2,
                        kernel_size=kernel_size,
                        activation='relu',
                        bias_initializer='random_uniform',
                        depthwise_initializer='random_uniform',
                        padding='same'))
    model.add(GlobalAveragePooling1D())
    model.add(Dropout(rate=dropout_rate))
    model.add(Dense(mod_units, activation=mod_activation))
    return model