예제 #1
0
    def build_model_CNN(self):
        ''' CNN '''

        model = Sequential()
        embedding_layer = Embedding(self.vocab_length,
                                    300,
                                    weights=[self.embedding_matrix],
                                    input_length=self.length_long_sentence,
                                    trainable=False)
        model.add(embedding_layer)
        model.add(
            Conv1D(filters=150,
                   kernel_regularizer=l2(0.01),
                   kernel_size=5,
                   strides=1,
                   padding='valid'))
        model.add(MaxPooling1D(2, padding='valid'))
        model.add(
            Conv1D(filters=150,
                   kernel_regularizer=l2(0.01),
                   kernel_size=5,
                   strides=1,
                   padding='valid'))
        model.add(MaxPooling1D(2, padding='valid'))
        model.add(Flatten())
        model.add(Dense(80, kernel_regularizer=l2(0.01), activation='relu'))
        model.add(Dense(40, kernel_regularizer=l2(0.01), activation='relu'))
        model.add(Dense(20, kernel_regularizer=l2(0.01), activation='relu'))
        model.add(Dense(2, activation='sigmoid'))
        model.compile(loss='binary_crossentropy',
                      optimizer=RMSprop(lr=0.001),
                      metrics=['accuracy'])
        model.summary()
예제 #2
0
def DenseNetLayer(x, b, f, n=1):
    """
    Implementation of a DenseNetLayer
    :param x: input
    :param b: number of elementary blocks in the layer
    :param f: features generated at every block
    :param n: unique identifier of the DenseNetLayer
    :param training: passed to the batch normalization layers
    :return:
    """
    with tf.name_scope(f"denseNet_{n}"):
        for _ in range(b):
            y = BatchNormalization()(x)
            y = Conv1D(filters=4 * f,
                       kernel_size=1,
                       padding='same',
                       data_format='channels_last')(y)
            y = Activation('relu')(y)
            y = BatchNormalization()(y)
            y = Conv1D(filters=f,
                       kernel_size=8,
                       padding='same',
                       data_format='channels_last')(y)
            y = Activation('relu')(y)
            x = Concatenate()([x, y])
    return x
    def build(self):
        n_steps, n_length = 4, 32

        self.train_X = self.train_X.reshape(
            (self.train_X.shape[0], n_steps, n_length, self.n_features)) # self.n_features = 6
        self.test_X = self.test_X.reshape(
            (self.test_X.shape[0], n_steps, n_length, self.n_features))

        model = Sequential()
        model.add(TimeDistributed(Conv1D(filters=64, kernel_size=3,
                                         activation='relu'), input_shape=(None, n_length, self.n_features)))
        model.add(TimeDistributed(
            Conv1D(filters=16, kernel_size=3, activation='relu')))
        model.add(TimeDistributed(Dropout(0.5)))
        model.add(TimeDistributed(MaxPooling1D(pool_size=2)))
        model.add(TimeDistributed(Flatten()))
        model.add(LSTM(100))
        model.add(Dropout(0.5))
        model.add(Dense(100, activation='relu'))
        model.add(Dense(self.n_outputs, activation='softmax'))
        model.compile(loss='categorical_crossentropy',
                           optimizer='adam', metrics=['accuracy'])

        # print(model.summary())
        # input()

        super().build(model)
def build_astronet2(len_local, len_global, n_features, drop1, lr):
    inputA = Input(shape=(len_local, n_features))
    inputB = Input(shape=(len_global, n_features))

    loc = Conv1D(16, 5, activation='relu', padding='same')(inputA)
    loc = Conv1D(16, 5, activation='relu', padding='same')(loc)
    loc = MaxPooling1D(5, strides=2)(loc)
    loc = Conv1D(32, 5, activation='relu', padding='same')(loc)
    loc = Conv1D(32, 5, activation='relu', padding='same')(loc)
    loc = MaxPooling1D(5, strides=2)(loc)
    loc = Flatten()(loc)

    glob = Conv1D(16, 5, activation='relu', padding='same')(inputB)
    glob = Conv1D(16, 5, activation='relu', padding='same')(glob)
    glob = MaxPooling1D(5, strides=2)(glob)
    glob = Conv1D(32, 5, activation='relu', padding='same')(glob)
    glob = Conv1D(32, 5, activation='relu', padding='same')(glob)
    glob = MaxPooling1D(5, strides=2)(glob)
    glob = Conv1D(64, 5, activation='relu', padding='same')(glob)
    glob = Conv1D(64, 5, activation='relu', padding='same')(glob)
    glob = MaxPooling1D(5, strides=2)(glob)
    glob = Flatten()(glob)
    joined = concatenate([loc, glob])
    joined = Dense(64, activation='relu')(joined)
    joined = Dropout(drop1)(joined)
    joined = Dense(32, activation='relu')(joined)
    out = Dense(1, activation='sigmoid')(joined)
    model = Model(inputs=[inputA, inputB], outputs=out)
    opt = Adam(lr=lr, epsilon=(10**(-8)))
    model.compile(optimizer=opt, loss='binary_crossentropy', metrics=metrics)
    return model
예제 #5
0
파일: train.py 프로젝트: nkzhangheng/degas
def build_model() -> Model:
    """
    This is the code for the "NYU" model from "Character Level Based Detection of DGA Domain Names"
    (http://faculty.washington.edu/mdecock/papers/byu2018a.pdf)
    which is itself adapted from X. Zhang, J. Zhao, and Y. LeCun, “Character-level convolutional networks for text
    classification,” in Advances in Neural Information Processing Systems, vol. 28, 2015, pp. 649–657.
    """
    # a constant here representing the maximum expected length of a domain.
    max_length = 75
    main_input = Input(shape=(max_length,), dtype="int32", name="main_input")
    embedding = Embedding(input_dim=128, output_dim=128, input_length=max_length)(main_input)
    conv1 = Conv1D(filters=128, kernel_size=3, padding="same", strides=1)(embedding)
    thresh1 = ThresholdedReLU(1e-6)(conv1)
    max_pool1 = MaxPooling1D(pool_size=2, padding="same")(thresh1)
    conv2 = Conv1D(filters=128, kernel_size=2, padding="same", strides=1)(max_pool1)
    thresh2 = ThresholdedReLU(1e-6)(conv2)
    max_pool2 = MaxPooling1D(pool_size=2, padding="same")(thresh2)
    flatten = Flatten()(max_pool2)
    fc = Dense(64)(flatten)
    thresh_fc = ThresholdedReLU(1e-6)(fc)
    drop = Dropout(0.5)(thresh_fc)
    output = Dense(1, activation="sigmoid")(drop)
    model = Model(inputs=main_input, outputs=output)
    precision = as_keras_metric(tf.metrics.precision)
    recall = as_keras_metric(tf.metrics.recall)
    model.compile(
        loss="binary_crossentropy",
        optimizer="adam",
        metrics=["mae", "mean_squared_error", "acc", precision, recall],
    )
    return model
def _cnn(x_shape):
    """https://github.com/lykaust15/Deep_learning_examples/blob/master/
    8.RBP_prediction_CNN/RBP_binding_site_prediction.ipynb"""
    model = Sequential([
        Conv1D(128, (10, ),
               activation='relu',
               input_shape=(x_shape[1], x_shape[2])),
        Dropout(0.25),
        MaxPooling1D(pool_size=(3, ), strides=(1, )),
        Conv1D(128, (10, ), activation='relu', padding='same'),
        Dropout(0.25),
        MaxPooling1D(pool_size=(3, ), strides=(1, )),
        Conv1D(256, (5, ), activation='relu', padding='same'),
        Dropout(0.25),
        GlobalAveragePooling1D(),
        Dropout(0.25),
        Dense(128, activation='relu'),
        Dropout(0.5),
        Dense(1, activation='sigmoid')
    ])

    model.compile(loss='binary_crossentropy',
                  optimizer=Adadelta(),
                  metrics=['accuracy'])

    return model
예제 #7
0
def build_cnn_lstm_attn(local_steps, global_steps, n_features, lr, loc_f1,
                        loc_k1, loc_p1, loc_drop1, glob_f1, glob_k1, glob_p1,
                        glob_f2, glob_k2, glob_p2, glob_drop1, n_neurons1,
                        n_neurons2, n_neurons3, n_neurons4, n_neurons5, drop1,
                        drop2, reg):
    inputA = Input(shape=(local_steps, n_features))
    inputB = Input(shape=(global_steps, n_features))
    local = Conv1D(loc_f1, loc_k1, padding='same', activation='relu')(inputA)
    local = Conv1D(loc_f1, loc_k1, padding='same', activation='relu')(local)
    local = MaxPooling1D(loc_p1)(local)
    local = LSTM(n_neurons1, dropout=loc_drop1,
                 kernel_regularizer=l2(reg))(local)

    glob = Conv1D(glob_f1, glob_k1, padding='same', activation='relu')(inputB)
    glob = Conv1D(glob_f1, glob_k1, padding='same', activation='relu')(glob)
    glob = MaxPooling1D(glob_p1)(glob)
    glob = Conv1D(glob_f2, glob_k2, padding='same', activation='relu')(glob)
    glob = MaxPooling1D(glob_p2)(glob)
    glob = LSTM(n_neurons2, dropout=glob_drop1,
                kernel_regularizer=l2(reg))(glob)
    joined = Attention()([local, glob])
    joined = Dense(n_neurons3, activation='relu',
                   kernel_regularizer=None)(joined)
    joined = Dropout(drop1)(joined)
    joined = Dense(n_neurons4, activation='relu',
                   kernel_regularizer=None)(joined)
    joined = Dropout(drop2)(joined)
    joined = Dense(n_neurons5, activation='relu',
                   kernel_regularizer=None)(joined)
    out = Dense(1, activation='sigmoid')(joined)
    model = Model(inputs=[inputA, inputB], outputs=out)
    opt = Adam(lr=lr)
    model.compile(optimizer=opt, loss='binary_crossentropy', metrics=metrics)
    return model
예제 #8
0
def identity_block_1d(input_tensor, kernel_size, filters, stage, block):
    """The identity block is the block that has no conv layer at shortcut.

    # Arguments
        input_tensor: input tensor
        kernel_size: default 3, the kernel size of middle conv layer at main path
        filters: list of integers, the filters of 3 conv layer at main path
        stage: integer, current stage label, used for generating layer names
        block: 'a','b'..., current block label, used for generating layer names

    # Returns
        Output tensor for the block.
    """
    filters1, filters2, filters3 = filters
    conv_name_base = 'res_1d' + str(stage) + block + '_branch'
    bn_name_base = 'bn_1d' + str(stage) + block + '_branch'

    x = Conv1D(filters1, (1), name=conv_name_base + '2a')(input_tensor)
    x = BatchNormalization(name=bn_name_base + '2a')(x)
    x = Activation('relu')(x)

    x = Conv1D(filters2,
               kernel_size,
               padding='same',
               name=conv_name_base + '2b')(x)
    x = BatchNormalization(name=bn_name_base + '2b')(x)
    x = Activation('relu')(x)

    x = Conv1D(filters3, (1), name=conv_name_base + '2c')(x)
    x = BatchNormalization(name=bn_name_base + '2c')(x)

    x = layers.add([x, input_tensor])
    x = Activation('relu')(x)
    return x
예제 #9
0
def build_sep_cnn_lstm(n_steps,
                       n_features,
                       lr,
                       n_filters,
                       n_filters2,
                       k_size,
                       p_size,
                       n_neurons1,
                       n_neurons2,
                       n_neurons3,
                       drop1,
                       drop2,
                       reg='None'):
    inp = Input(shape=(n_steps, n_features))
    rnn = LSTM(n_neurons1, return_sequences=True, kernel_regularizer=reg)(inp)
    rnn = LSTM(n_neurons2)(rnn)
    rnn = Dropout(drop1)(rnn)
    cnn = Conv1D(n_filters, k_size, padding='same', activation='relu')(inp)
    cnn = MaxPooling1D(p_size)(cnn)
    cnn = Conv1D(n_filters2,
                 k_size,
                 padding='same',
                 activation='relu',
                 kernel_regularizer=reg)(cnn)
    cnn = MaxPooling1D(p_size)(cnn)
    cnn = Flatten()(cnn)
    cnn = Dropout(drop2)(cnn)
    rnn = concatenate([rnn, cnn])
    rnn = Dense(n_neurons3, activation='relu', kernel_regularizer=reg)(rnn)
    out = Dense(1, activation='sigmoid')(rnn)
    model = Model(inp, out)
    opt = Adam(lr=lr)
    model.compile(optimizer=opt, loss='binary_crossentropy', metrics=metrics)
    return model
예제 #10
0
def cnn_trans(local_steps, global_steps, n_features, lr, loc_f1, loc_k1,
              loc_p1, glob_f1, glob_k1, glob_p1, glob_f2, glob_k2, glob_p2,
              n_neurons1, n_neurons2, n_neurons3, drop1, drop2, num_heads,
              key_dim):
    inputA = Input(shape=(local_steps, n_features))
    inputB = Input(shape=(global_steps, n_features))
    local = Conv1D(loc_f1, loc_k1, padding='same', activation='relu')(inputA)
    local = Conv1D(loc_f1, loc_k1, padding='same', activation='relu')(local)
    local = MaxPooling1D(loc_p1)(local)

    glob = Conv1D(glob_f1, glob_k1, padding='same', activation='relu')(inputB)
    glob = Conv1D(glob_f1, glob_k1, padding='same', activation='relu')(glob)
    glob = MaxPooling1D(glob_p1)(glob)
    glob = Conv1D(glob_f2, glob_k2, padding='same', activation='relu')(glob)
    glob = MaxPooling1D(glob_p2)(glob)

    joined = MultiHeadAttention(num_heads, key_dim,
                                kernel_regularizer='l2')(local, glob)
    joined = Flatten()(joined)
    joined = Dense(n_neurons1, activation='relu',
                   kernel_regularizer='l2')(joined)
    joined = Dropout(drop1)(joined)
    joined = Dense(n_neurons2, activation='relu',
                   kernel_regularizer=None)(joined)
    joined = Dropout(drop2)(joined)
    joined = Dense(n_neurons3, activation='relu',
                   kernel_regularizer=None)(joined)
    out = Dense(1, activation='sigmoid')(joined)
    model = Model(inputs=[inputA, inputB], outputs=out)
    opt = Adam(lr=lr)
    model.compile(optimizer=opt, loss='binary_crossentropy', metrics=metrics)
    return model
예제 #11
0
    def _cnn_maxpool_multifilter(self, name: str) -> Model:
        """https://richliao.github.io/supervised/classification/2016/11/26/textclassifier-convolutional/
        """
        convs = []
        filter_sizes = [3, 4, 5]

        _inputs = Input((self.maxlen, ), name='input')
        l_embed = Embedding(input_dim=self.input_dim,
                            output_dim=self.embed_dim,
                            input_length=self.maxlen,
                            name='embedding')(_inputs)

        for fsz in filter_sizes:
            l_conv = Conv1D(filters=self.conv_filters,
                            kernel_size=fsz,
                            activation='relu')(l_embed)
            l_pool = MaxPool1D(self.conv_pool_size)(l_conv)
            convs.append(l_pool)

        l_merge = Concatenate(axis=1)(convs)
        l_cov1 = Conv1D(filters=self.conv_filters,
                        kernel_size=self.conv_kernel_size,
                        activation='relu')(l_merge)
        l_pool1 = MaxPool1D(pool_size=self.conv_pool_size)(l_cov1)
        l_cov2 = Conv1D(filters=self.conv_filters,
                        kernel_size=self.conv_kernel_size,
                        activation='relu')(l_pool1)
        l_pool2 = GlobalMaxPool1D()(l_cov2)
        l_flat = Flatten()(l_pool2)
        l_dense = Dense(self.units, activation='relu')(l_flat)
        _preds = Dense(self.classes, activation='sigmoid', name='fc1')(l_dense)

        return Model(inputs=_inputs, outputs=_preds, name=name)
예제 #12
0
def crnn_model(width=100,
               n_vars=6,
               n_classes=7,
               conv_kernel_size=5,
               conv_filters=3,
               lstm_units=3):
    input_shape = (width, n_vars)
    model = Sequential()
    model.add(
        Conv1D(filters=conv_filters,
               kernel_size=conv_kernel_size,
               padding='valid',
               activation='relu',
               input_shape=input_shape))
    model.add(
        Conv1D(filters=conv_filters,
               kernel_size=conv_kernel_size,
               padding='valid',
               activation='relu'))
    model.add(LSTM(units=lstm_units, dropout=0.1, recurrent_dropout=0.1))
    model.add(Dense(n_classes, activation="softmax"))

    model.compile(loss='categorical_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])

    return model
def define_model():
    input_shape = (99, 13)
    output_shape = 35
    activation = 'relu'

    input_layer = keras.Input(shape=input_shape)

    h = Conv1D(256, 5, activation=activation, padding='same')(input_layer)
    h = BatchNormalization()(h)

    h = Conv1D(256, 5, activation=activation, padding='same')(h)
    #h = BatchNormalization()(h)
    h = MaxPool1D(3)(h)

    h = Conv1D(512, 5, activation=activation, padding='same')(h)
    #h = BatchNormalization()(h)
    h = Dropout(0.35)(h)

    h = Conv1D(512, 5, activation=activation, padding='same')(h)
    h = GlobalAveragePooling1D()(h)
    h = Dropout(0.5)(h)

    output_layer = Dense(35, activation='softmax')(h)

    model = keras.Model(inputs=input_layer, outputs=output_layer)
    return model
예제 #14
0
def get_locnet(locnet_inputs, output_shape):
    # Define filters
    c1 = 128
    c2 = 64
    c3 = 64

    # CNN
    inputs = locnet_inputs
    x1 = Conv1D(c1, (8), padding='same')(inputs)
    x1 = BatchNormalization()(x1)
    x1 = Activation('relu')(x1)
    x1 = MaxPool1D(2)(x1)
    x1 = Conv1D(c2, (5), padding='same')(x1)
    x1 = BatchNormalization()(x1)
    x1 = Activation('relu')(x1)
    x1 = MaxPool1D(2)(x1)
    x1 = Conv1D(c3, (3), padding='same')(x1)
    x1 = BatchNormalization()(x1)
    x1 = Activation('relu')(x1)
    x1 = MaxPool1D(2)(x1)

    x1 = Flatten()(x1)
    # Output Layer
    locnet_outputs = Dense(output_shape, activation='tanh')(x1)

    # Define model
    locnet = Model(inputs=locnet_inputs,
                   outputs=locnet_outputs,
                   name="Localization Network")

    return locnet
예제 #15
0
def evaluate_model(trainX, trainy, testX, testy):
    global best_accuracy
    verbose, epochs, batch_size = 0, 10, 32
    # n_timesteps, n_features, n_outputs = trainX.shape[1], trainX.shape[2], trainy.shape[1]
    model = Sequential()
    #     model.add(Conv1D(filters=64, kernel_size=3, activation='relu', input_shape=(n_timesteps,n_features)))  (x_train.shape[1],1)
    model.add(
        Conv1D(filters=64,
               kernel_size=3,
               activation='relu',
               input_shape=(trainX.shape[1], 1)))
    model.add(Conv1D(filters=64, kernel_size=3, activation='relu'))
    model.add(Dropout(0.5))
    model.add(MaxPooling1D(pool_size=2))
    model.add(Flatten())
    model.add(Dense(100, activation='relu'))
    model.add(Dense(trainy.shape[1], activation='softmax'))
    model.compile(loss='categorical_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])
    # fit network
    model.fit(trainX, trainy, epochs=epochs, batch_size=batch_size, verbose=1)
    # evaluate model
    _, accuracy = model.evaluate(testX,
                                 testy,
                                 batch_size=batch_size,
                                 verbose=0)
    if accuracy > best_accuracy:
        best_accuracy = accuracy
        model.save(BestModleFilePath)
    return accuracy
def model_v2(top_layer_units):
    model = Sequential()

    model.add(
        TimeDistributed(Conv1D(filters=16,
                               kernel_size=4,
                               padding='same',
                               activation=tf.nn.relu,
                               data_format='channels_last'),
                        input_shape=(NUM_MFCC, NUM_FRAMES, 1)))

    model.add(
        TimeDistributed(
            Conv1D(filters=8,
                   kernel_size=2,
                   padding='same',
                   activation=tf.nn.relu)))
    model.add(TimeDistributed(MaxPooling1D(pool_size=2)))
    model.add(TimeDistributed(Flatten()))
    model.add(LSTM(50, return_sequences=True))
    model.add(Dropout(0.3))
    model.add(Flatten())
    model.add(Dense(units=512, activation=tf.nn.tanh))
    model.add(Dense(units=256, activation=tf.nn.tanh))
    model.add(
        Dense(units=top_layer_units,
              activation=tf.nn.softmax,
              name='top_layer'))
    return model
    def encode_embedding_input(input_layer,
                               type='subject',
                               reduce_size=False,
                               reduce_size_n=32
                               ):

        conv1_size = CONV_SIZES[type][0]

        if noise:
            input_layer = GaussianNoise(stddev=.001)(input_layer)

        if normalization:
            input_layer = BatchNormalization()(input_layer)

        if reduce_size:
            input_layer = Dense(reduce_size_n, activation="sigmoid")(input_layer)

        conv1 = Conv1D(conv1_size, (2,), activation=mish, padding='same')(input_layer)
        pool1 = MaxPooling1D((2,), padding='same')(conv1)

        if type in ['subject', 'object']:
            return Flatten()(pool1)

        conv2_size = CONV_SIZES[type][1]
        conv2 = Conv1D(conv2_size, (2,), activation=mish, padding='same')(pool1)
        pool2 = MaxPooling1D((2,), padding='same')(conv2)
        return Flatten()(pool2)
예제 #18
0
def create_2_layer_CNN():
    model = Sequential()
    model.add(
        Conv1D(nodes,
               kernel_size=3,
               strides=1,
               activation='relu',
               input_shape=input_shape))
    model.add(BatchNormalization())
    model.add(MaxPooling1D(pool_size=2))
    model.add(Dropout(dropRate))
    #
    model.add(Conv1D(32, 3, activation='relu'))
    model.add(BatchNormalization())
    model.add(MaxPooling1D(pool_size=2))
    model.add(Dropout(dropRate))

    model.add(Flatten())
    model.add(Dense(256, activation='relu'))
    # model.add(BatchNormalization())

    model.add(Dense(128, activation='relu'))
    # model.add(BatchNormalization())

    model.add(Dense(num_classes, activation='softmax'))

    model.compile(loss=keras.losses.categorical_crossentropy,
                  optimizer=keras.optimizers.Adam(),
                  metrics=['accuracy'])
    return model
예제 #19
0
 def _cnn_maxpool(self, name: str) -> Model:
     """https://richliao.github.io/supervised/classification/2016/11/26/textclassifier-convolutional/
     """
     return Sequential([
         InputLayer(input_shape=(self.maxlen, ), name='input'),
         Embedding(input_dim=self.input_dim,
                   output_dim=self.embed_dim,
                   input_length=self.maxlen,
                   name='embedding'),
         Conv1D(filters=self.conv_filters,
                kernel_size=self.conv_kernel_size,
                activation='relu'),
         MaxPool1D(pool_size=self.conv_pool_size),
         Conv1D(filters=self.conv_filters,
                kernel_size=self.conv_kernel_size,
                activation='relu'),
         MaxPool1D(pool_size=self.conv_pool_size),
         Conv1D(filters=self.conv_filters,
                kernel_size=self.conv_kernel_size,
                activation='relu'),
         GlobalMaxPool1D(),
         Flatten(),
         Dense(self.units, activation='relu'),
         Dense(self.classes, activation='sigmoid', name='fc1'),
     ],
                       name=name)
예제 #20
0
def make_cnn_model(vocab_size=10000, embed_dim=8, input_seq_length=20):
    """
    I am the builder function for the CNN Model.
    :param vocab_size: size of the vocabulary of the embedding, should be size of vocab of the vectorizer
    :param embed_dim: how many dimensions to use for the vector embedding
    :param input_seq_length: how long the sequence of inputs will be
    :return: Keras Model
    """
    x = inp = Input(shape=(None, ), dtype="int64")
    x = Embedding(
        input_dim=vocab_size,
        output_dim=embed_dim,
        input_length=input_seq_length,
    )(x)
    x = Conv1D(filters=64, kernel_size=3, strides=2, activation="linear")(x)
    x = BatchNormalization()(x)
    x = LeakyReLU()(x)
    x = Conv1D(filters=64, kernel_size=3, strides=2, activation="linear")(x)
    x = BatchNormalization()(x)
    x = LeakyReLU()(x)
    x = Conv1D(filters=64, kernel_size=3, strides=2, activation="linear")(x)
    x = BatchNormalization()(x)
    x = LeakyReLU()(x)
    x = GlobalMaxPooling1D()(x)
    x = Dense(units=128, activation="linear")(x)
    x = BatchNormalization()(x)
    x = LeakyReLU()(x)
    out = Dense(1, activation="sigmoid")(x)
    return Model(inputs=[inp], outputs=[out], name="cnn_model")
    def decode_embedding_input(latent, shape, name, large=False):
        conv1_size = 128 if large else 64

        latent = Reshape((1, INNER_SIZE))(latent)
        conv1 = Conv1D(conv1_size, (1,), activation='mish', padding='same', name=name + '_conv1')(latent)
        up1 = UpSampling1D(shape[0], name=name + '_up1')(conv1)
        conv2 = Conv1D(shape[1], (6,), activation='mish', padding='same', name=name + '_conv2')(up1)
        return conv2
예제 #22
0
def res_block(x_in, num_filters, momentum=0.8, training=False):
    x = Conv1D(num_filters, kernel_size=3, padding='same')(x_in)
    x = BatchNormalization(momentum=momentum)(x, training=training)
    x = PReLU(shared_axes=[1, 2])(x)
    x = Conv1D(num_filters, kernel_size=3, padding='same')(x)
    x = BatchNormalization(momentum=momentum)(x, training=training)
    x = Add()([x_in, x])
    return x
예제 #23
0
def create_convolutional_decoder(encoder):
    conv1 = Conv1D(filters=32, kernel_size=42, activation='relu', input_shape=(INPUT_LENGTH, 1))(input_shape)
    conv1 = UpSampling1D(pool_size=10)(conv1)
    conv1 = BatchNormalization()(conv1)
    conv2 = Conv1D(filters=64, kernel_size=59, activation='relu')(conv1)
    conv2 = MaxPooling1D(pool_size=10)(conv2)
    conv2 = BatchNormalization()(conv2)

    return conv2
예제 #24
0
파일: cnn.py 프로젝트: mattscchan/551p4
    def graph(self,
              type='shallow',
              filter_size=3,
              filter_num=100,
              num_filter_block=None,
              dropout_rate=0):
        def ConvBlock(model, filters, kernel_size=filter_size):
            model.add(BatchNormalization())
            model.add(
                Conv1D(filters=filters,
                       kernel_size=kernel_size,
                       padding='same',
                       activation='relu'))
            model.add(BatchNormalization())
            model.add(
                Conv1D(filters=filters,
                       kernel_size=kernel_size,
                       padding='same',
                       activation='relu'))

        if type == 'shallow' or type == 'scnn':
            self.model.add(
                Conv1D(kernel_size=filter_size,
                       strides=1,
                       filters=filter_num,
                       padding='valid',
                       activation='relu'))
            self.model.add(
                MaxPooling1D(pool_size=self.data_sequence - filter_size,
                             strides=1,
                             padding='valid'))
            self.model.add(Flatten())
            self.model.add(Dropout(rate=dropout_rate))
            self.model.add(Dense(self.num_labels, activation='softmax'))

        elif type == 'deep' or type == 'dcnn':
            self.model.add(
                Conv1D(kernel_size=filter_size,
                       filters=filter_num,
                       padding='same',
                       activation='relu'))

            cur_filter_num = filter_num
            for _, num_blocks in enumerate(num_filter_block):
                for i in range(num_blocks):
                    ConvBlock(model=self.model,
                              kernel_size=filter_size,
                              filters=cur_filter_num)
                self.model.add(MaxPooling1D(pool_size=3, strides=2))
                cur_filter_num *= 2
            self.model.add(Flatten())

            self.model.add(Dense(2048, activation='relu', name='Dense1'))
            self.model.add(Dense(2048, activation='relu', name='Dense2'))
            self.model.add(
                Dense(self.num_labels, activation='softmax', name='Output'))
            self.model.summary()
예제 #25
0
    def __init__(self,
                 units,
                 input_size,
                 niter,
                 kbinmap,
                 args,
                 droprate=0.05,
                 regrate=1e-5):
        super(RIMHybrid, self).__init__()
        self.nc, self.bs = args.nc, args.bs
        nc, bs = args.nc, args.bs
        self.niter = niter
        print("number of iterations : ", niter)
        self.beta_1, self.beta_2 = 0.9, 0.999
        self.lr = 0.1
        self.eps = 1e-7
        self.kbinmap = tf.constant(kbinmap)
        self.kbinmapflat = tf.constant(kbinmap.flatten())

        self.layers_in = [
            Dense(units, activation='tanh', kernel_regularizer=l2reg(regrate))
            for i in range(niter)
        ]
        self.drop_in = [Dropout(rate=droprate) for i in range(niter)]
        self.layers_out = [
            Dense(2 * nc,
                  activation='linear',
                  kernel_regularizer=l2reg(regrate)) for i in range(niter)
        ]
        self.drop_out = [Dropout(rate=droprate) for i in range(niter)]
        self.conv_in = [
            Conv1D(1, 1, activation='linear') for i in range(niter)
        ]
        self.conv_out = [Conv1D(1, 1) for i in range(niter)]
        self.conv_relu = [
            Conv1D(1, 1, activation='relu') for i in range(niter)
        ]

        self.layers1d = [[self.layers_in[i], self.layers_out[i], \
                         self.drop_in[i], self.drop_out[i],
                         self.conv_in[i], self.conv_out[i],
                         self.conv_relu[i]] for i in range(niter)]

        cell_size = input_size
        self.input_layer = Conv3D(input_size,
                                  kernel_size=5,
                                  trainable=True,
                                  padding='SAME',
                                  input_shape=(None, nc, nc, nc, 2),
                                  activation='tanh')

        self.cell = ConvLSTM3DCell(cell_size, kernel_size=5, padding='SAME')
        self.output_layer = Conv3D(1,
                                   kernel_size=5,
                                   trainable=True,
                                   padding='SAME',
                                   input_shape=(None, nc, nc, nc, cell_size))
    def encode_embedding_input(input_layer):
        conv1_size, conv2_size = (CONV_SIZES[2], CONV_SIZES[1])

        input_layer = GaussianNoise(stddev=.1)(input_layer)
        conv1 = Conv1D(conv1_size, (2,), activation='mish', padding='same')(input_layer)
        pool1 = MaxPooling1D((2,), padding='same')(conv1)
        conv2 = Conv1D(conv2_size, (2,), activation='mish', padding='same')(pool1)
        pool2 = MaxPooling1D((2,), padding='same')(conv2)
        return Flatten()(pool2)
예제 #27
0
def mlstm_fcn(config: Union[str,
                            SoccerSageConfig] = SoccerSageConfig()) -> Model:
    '''Load and train the MLSTM-FCN model.
    Source: https://github.com/titu1994/MLSTM-FCN/blob/master/eeg_model.py

    Args:
        config: The configuration data.

    Returns:
        Model: A model in TensorFlow.
    '''
    if isinstance(config, str):
        config = SoccerSageConfig.from_yaml(config)

    n_classes = config.num_results
    if config.classifier_type == 'total_goals':
        n_classes = config.num_goals

    ip = Input(shape=(config.time_steps, config.feature_size))

    x = Permute((2, 1))(ip)
    #x = Masking(mask_value=config.masking_value)(x)
    x = LSTM(64)(ip)
    x = Dropout(0.8)(x)

    y = Conv1D(128, 8, padding='same', kernel_initializer='he_uniform')(ip)
    y = BatchNormalization()(y)
    y = Activation('relu')(y)
    y = squeeze_excite_block(y)

    y = Conv1D(256, 5, padding='same', kernel_initializer='he_uniform')(y)
    y = BatchNormalization()(y)
    y = Activation('relu')(y)
    y = squeeze_excite_block(y)

    y = Conv1D(128, 3, padding='same', kernel_initializer='he_uniform')(y)
    y = BatchNormalization()(y)
    y = Activation('relu')(y)

    y = GlobalAveragePooling1D()(y)

    x = concatenate([x, y])

    out = Dense(n_classes, activation='softmax')(x)

    classifier = Model(ip, out)

    classifier.compile(
        loss='categorical_crossentropy',
        optimizer=tf.keras.optimizers.Adam(lr=config.learning_rate),
        metrics=['accuracy', ranked_probability_score])

    if config.pretrained_classifier:
        files = SoccerSageFiles(config)
        classifier.load_weights(files.model_weights)

    return classifier
 def __init__(self, vocab_size, embedding_dim=20):
     super().__init__(self)
     self.embedding = Embedding(vocab_size + 1, embedding_dim)
     self.conv1d_32 = Conv1D(32, 3, activation='relu')
     self.conv1d_64 = Conv1D(64, 3, activation='relu')
     self.conv1d_128 = Conv1D(128, 3, activation='relu')
     self.max_pooling = MaxPooling1D(4)
     self.global_max_pooling = GlobalMaxPooling1D()
     self.dense = Dense(4, activation='softmax')
예제 #29
0
def get_cnn1d_tx_relu(output_size, img_height, img_width, show=True):

    # value with same Tx would use in same channel
    model_input = Input(shape=(img_height * img_width, ), name='Main_input')
    x = Reshape((img_height, img_width), name='Reshape_1')(model_input)
    x = Permute((2, 1), name='Permute_1')(x)
    x = Conv1D(filters=256,
               kernel_size=11,
               strides=1,
               padding='same',
               activation='relu',
               name='Conv1D_relu_1')(x)
    x = Conv1D(filters=128,
               kernel_size=5,
               strides=1,
               padding='same',
               activation='relu',
               name='Conv1D_relu_2')(x)
    x = Conv1D(filters=64,
               kernel_size=3,
               strides=1,
               padding='same',
               activation='relu',
               name='Conv1D_relu_3')(x)
    x = Conv1D(filters=32,
               kernel_size=3,
               strides=1,
               padding='same',
               activation='relu',
               name='Conv1D_relu_4')(x)
    x = Conv1D(filters=16,
               kernel_size=3,
               strides=1,
               padding='same',
               activation='relu',
               name='Conv1D_relu_5')(x)
    # x = Conv1D(filters=8, kernel_size=48, strides=1,
    #            padding='same', activation='relu',
    #            name='Conv1D_relu_6')(x)
    x = Flatten(name='Flatten_1')(x)
    x = Dense(512, activation='relu', name='Dense_relu_1')(x)
    x = Dense(512, activation='relu', name='Dense_relu_2')(x)
    # x = Dense(512, activation='relu', name='Dense_relu_3')(x)
    cnn1d_output = Dense(output_size,
                         activation='linear',
                         name='Output_Dense_linear')(x)
    cnn1d = Model(inputs=model_input,
                  outputs=cnn1d_output,
                  name='CNN1D_Tx_relu')

    if show:
        print('CNN1D Tx summary:')
        cnn1d.summary()
        print()

    return cnn1d
예제 #30
0
def create_convolutional_encoder(input_shape):

    reshape = Reshape(target_shape=(INPUT_LENGTH, 1), input_shape=(INPUT_LENGTH,))(input_shape)
    conv1 = Conv1D(filters=32, kernel_size=30, activation='relu', input_shape=(INPUT_LENGTH, 1))(reshape)
    conv1 = MaxPooling1D(pool_size=10)(conv1)
    conv1 = BatchNormalization()(conv1)
    conv2 = Conv1D(filters=64, kernel_size=30, activation='relu')(conv1)
    conv2 = MaxPooling1D(pool_size=10)(conv2)
    conv2 = BatchNormalization()(conv2)

    return conv2