def baseline_model(seq_dim=3):
    input_1 = Input(shape=(None, 3))
    input_2 = Input(shape=(None, seq_dim))

    base_model = encoder(seq_dim=3)

    x1 = base_model(input_1)
    x2 = base_model(input_2)

    x1 = Concatenate(axis=-1)([GlobalMaxPool1D()(x1), GlobalAvgPool1D()(x1)])
    x2 = Concatenate(axis=-1)([GlobalMaxPool1D()(x2), GlobalAvgPool1D()(x2)])

    x3 = Subtract()([x1, x2])
    x3 = Multiply()([x3, x3])

    x = Multiply()([x1, x2])

    x = Concatenate(axis=-1)([x, x3])
    x = Dropout(0.1)(x)
    x = Dense(100, activation="relu")(x)
    x = Dropout(0.1)(x)
    out = Dense(1, activation="sigmoid")(x)

    model = Model([input_1, input_2], out)

    model.compile(loss="binary_crossentropy", metrics=[acc], optimizer=Adam(0.0001))

    model.summary()

    return model
Exemplo n.º 2
0
    def __init__(self,
                 vocab_size,
                 embedd_dim,
                 n_classes,
                 n_filters,
                 maxlen,
                 n_units,
                 dropout,
                 training=False,
                 name="dcnn"):

        super(DCNN, self).__init__(name=name)
        self.embedding = Embedding(vocab_size, embedd_dim)
        self.dropout = Dropout(dropout)
        self.conv_1 = Conv1D(n_filters,
                             kernel_size=2,
                             padding="valid",
                             activation="relu")
        self.pool_1 = GlobalMaxPool1D()
        self.conv_2 = Conv1D(n_filters,
                             kernel_size=3,
                             padding="valid",
                             activation="relu")
        self.pool_2 = GlobalMaxPool1D()
        self.conv_3 = Conv1D(n_filters,
                             kernel_size=4,
                             padding="valid",
                             activation="relu")
        self.pool_3 = GlobalMaxPool1D()
        self.dense_1 = Dense(n_units, activation="relu")
        if n_classes == 2:
            self.outputs_layer = Dense(1, activation="sigmoid")
        else:
            self.outputs_layer = Dense(n_classes, activation="softmax")
Exemplo n.º 3
0
def model_def(x, y, z):
    input_1 = Input(shape=(x, ))
    embedding_1 = Embedding(y, 128, input_length=x)(input_1)

    conv_1 = Conv1D(32, 1, use_bias=True, padding='valid',
                    activation='relu')(embedding_1)
    normalized_1 = BatchNormalization()(conv_1)
    drop_out_1 = Dropout(0.5)(normalized_1)
    pooling_1 = GlobalMaxPool1D()(drop_out_1)

    conv_2 = Conv1D(32, 2, use_bias=True, padding='valid',
                    activation='relu')(embedding_1)
    normalized_2 = BatchNormalization()(conv_2)
    drop_out_2 = Dropout(0.5)(normalized_2)
    pooling_2 = GlobalMaxPool1D()(drop_out_2)

    conv_3 = Conv1D(32, 3, use_bias=True, padding='valid',
                    activation='relu')(embedding_1)
    normalized_3 = BatchNormalization()(conv_3)
    drop_out_3 = Dropout(0.5)(normalized_3)
    pooling_3 = GlobalMaxPool1D()(drop_out_3)

    merged_1 = concatenate([pooling_1, pooling_2, pooling_3])

    dense_1 = Dense(2 * len(z), activation='relu', use_bias=True)(merged_1)
    dense_2 = Dense(2 * len(z), activation='relu', use_bias=True)(dense_1)
    dense_3 = Dense(2 * len(z), activation='relu', use_bias=True)(dense_2)

    dense_4 = Dense(len(z), activation='softmax', use_bias=True)(dense_3)
    final_model = Model(inputs=[input_1], outputs=[dense_4])
    final_model.compile(optimizer='adam', loss='categorical_crossentropy')

    return final_model
Exemplo n.º 4
0
    def build(self, input_shape) -> None:
        self.embedding: Embedding = Embedding(input_dim=self.vocabulary_size,
                                              output_dim=self.embedding_size,
                                              input_length=self.sentence_len,
                                              trainable=True)
        self.conv_1: Conv1D = Conv1D(filters=self.conv_filter,
                                     kernel_size=3,
                                     activation="relu",
                                     name="conv_1")
        self.conv_2: Conv1D = Conv1D(filters=self.conv_filter,
                                     kernel_size=4,
                                     activation="relu",
                                     name="conv_2")
        self.conv_3: Conv1D = Conv1D(filters=self.conv_filter,
                                     kernel_size=5,
                                     activation="relu",
                                     name="conv_3")

        if not self.global_max_pool:
            self.pool_1: MaxPool1D = MaxPool1D(pool_size=self.pool_size,
                                               strides=1,
                                               name="pool_1")
            self.pool_2: MaxPool1D = MaxPool1D(pool_size=self.pool_size,
                                               strides=1,
                                               name="pool_2")
            self.pool_3: MaxPool1D = MaxPool1D(pool_size=self.pool_size,
                                               strides=1,
                                               name="pool_3")
        else:
            self.pool_1: GlobalMaxPool1D = GlobalMaxPool1D(name="pool_1")
            self.pool_2: GlobalMaxPool1D = GlobalMaxPool1D(name="pool_2")
            self.pool_3: GlobalMaxPool1D = GlobalMaxPool1D(name="pool_3")

        self.concatenate: Concatenate = Concatenate(axis=1)
        self.flatten: Flatten = Flatten()

        self.dropout_1: Dropout = Dropout(self.drop_rate, name="dropout_1")
        self.dense1 = Dense(self.dense_size,
                            activation="sigmoid",
                            kernel_regularizer=regularizers.l1_l2(
                                self.l1_regularization,
                                self.l2_regularization))
        self.dropout_2: Dropout = Dropout(self.drop_rate, name="dropout_2")
        self.dense: Dense = Dense(self.class_num,
                                  activation="softmax",
                                  kernel_regularizer=regularizers.l1_l2(
                                      self.l1_regularization,
                                      self.l2_regularization))
        super(TextCNN, self).build(input_shape)
Exemplo n.º 5
0
 def build_model(self):
     input = Input((self.max_len,))
     query_embedding = Embedding(self.max_features, self.embedding_dims, input_length=self.max_len)(input)
     value_embedding = Embedding(self.max_features, self.embedding_dims, input_length=self.max_len)(input)
     convs = []
     for kernel_size in [3, 3, 3]:
         query_conv = Conv1D(128, kernel_size, activation='relu')(query_embedding)
         value_conv = Conv1D(128, kernel_size, activation='relu')(value_embedding)
     query_value_attention = Attention(self.max_len)([query_conv, value_conv])
     query_encoding = GlobalMaxPool1D()(query_conv)
     query_value_attention = GlobalMaxPool1D()(query_value_attention)
     concate = Concatenate(axis=-1)([query_encoding, query_value_attention])
     output = Dense(self.class_num, activation=self.activation)(concate)
     model = Model(inputs=input, outputs=output)
     return model
 def __init__(self, specs):
     super(CNNmodel, self).__init__()
     n_cnn, kernel_sizes, filters, n_classes = specs
     self.model = tf.keras.Sequential()
     for _ in range(n_cnn):
         self.model.add(
             Conv1D(filters[_],
                    kernel_size=kernel_sizes[_],
                    activation=tf.keras.activations.relu,
                    padding='valid'))
         self.model.add(
             Conv1D(filters[_],
                    kernel_size=kernel_sizes[_],
                    activation=tf.keras.activations.relu,
                    padding='valid'))
         if _ < (n_cnn - 1):
             self.model.add(MaxPool1D(pool_size=2))
             self.model.add(Dropout(0.1))
         else:
             self.model.add(GlobalMaxPool1D())
             self.model.add(Dropout(0.2))
     self.ffl_block = tf.keras.Sequential()
     self.ffl_block._name = 'ffl_block'
     self.ffl_block.add(
         Dense(64, activation=tf.keras.activations.relu, name="dense_1"))
     self.ffl_block.add(
         Dense(64, activation=tf.keras.activations.relu, name="dense_2"))
     self.ffl_block.add(
         Dense(n_classes,
               activation=tf.keras.activations.sigmoid,
               name="dense_3_ptbdb"))
    def _build_character_block(self,
                               block,
                               dropout=0.3,
                               filters=[64, 100],
                               kernel_size=[3, 3],
                               pool_size=[2, 2],
                               padding='valid',
                               activation='relu',
                               kernel_initializer='glorot_normal'):
        """
        Build block of neural network with convolutional layers to extract character level features

        Parameters
        ----------
        block : keras.layers.Embedding
            trainable Embedding layer block for character embeddings

        dropout : int
            dropout rate to use with Dropout layer for reducing over-fitting

        filters : list of int
            to determine no. of output feature maps from convolution operation with each kernel

        kernel_size : list of int
            size of 1D kernel/convolution window to use while convolution operation

        pool_size : list of int, optional
            no. of convolution layers to pool together if required

        padding : str
            mode by which to pad inputs before convolution op. Default value = 'valid'

        activation : str
            activation function to use with convolution layer. Default value = 'relu'

        kernel_initializer : str
            regularizer function applied to the kernel weights matrix. Default value = 'glorot_normal'


        Returns
        -------
        block : keras.layers.Dense
            Dense layer (fully-connected) output block after applying convolution and max pooling ops
            to input

        """

        for i in range(len(filters)):
            block = Conv1D(filters=filters[i],
                           kernel_size=kernel_size[i],
                           padding=padding,
                           activation=activation,
                           kernel_initializer=kernel_initializer)(block)

        block = Dropout(dropout)(block)
        block = MaxPooling1D(pool_size=pool_size[i])(block)

        block = GlobalMaxPool1D()(block)
        block = Dense(128, activation='relu')(block)
        return block
Exemplo n.º 8
0
def Conv1d_Model(weights, vocab_size, embedding_size, max_sen_len, num_classes):
    return Sequential(
        [
            Embedding(
                vocab_size,
                embedding_size,
                weights=[weights],
                trainable=False,
                input_shape=(max_sen_len,),
            ),
            Conv1D(128, 3, strides=1, padding="SAME", activation="relu"),
            Dropout(0.1),
            Conv1D(256, 3, strides=1, padding="SAME", activation="relu"),
            BatchNormalization(),
            Dropout(0.1),
            GlobalMaxPool1D(),
            BatchNormalization(),
            Dropout(0.2),
            Dense(64, activation="relu", name="relu_dens1"),
            Dropout(0.3),
            BatchNormalization(),
            Dense(32, activation="relu", name="relu_dense2"),
            Dense(num_classes, activation="softmax", name="softmax_dense"),
        ]
    )
Exemplo n.º 9
0
    def __init__(
        self,
        kernel_sizes: List[int] = None,
        filters: int = DEFAULT_FILTERS,
        dropout_rate: float = DEFAULT_DROPOUT_RATE,
        dense_layers: int = DEFAULT_DENSE_LAYERS,
        activation: str = DEFAULT_ACTIVATION,
        classes: int = DEFAULT_CLASSES,
    ) -> None:
        if kernel_sizes is None:
            kernel_sizes = copy(ConvolutionalNGramsModel.DEFAULT_KERNEL_SIZES)

        super(ConvolutionalNGramsModel, self).__init__()
        self._convolutions: List[Conv1D] = [
            Conv1D(filters=filters,
                   kernel_size=kernel_size,
                   activation=activation) for kernel_size in kernel_sizes
        ]
        self._pools: List[GlobalMaxPool1D] = [
            GlobalMaxPool1D() for _ in range(len(self._convolutions))
        ]
        self._stack: Concatenate = Concatenate(axis=1)
        self._dropout: Dropout = Dropout(rate=dropout_rate)
        self._dense: List[Dense] = ([
            Dense(units=(filters * len(kernel_sizes)) // (2**layer))
            for layer in range(1, dense_layers)
        ] if dense_layers > 1 else [])
        self._classification: Dense = Dense(units=classes,
                                            activation=activation)
        self._softmax: Softmax = Softmax(axis=1)
Exemplo n.º 10
0
    def __init__(self, args: dict):
        super(CnnYoonKim, self).__init__()

        self.embed_input = args['embed_input']
        self.embed_out = args['embedding']
        self.cnn_filters = args['cnn_filters']
        self.dropout_rate = args['dropout']
        self.hidden_units = args['hidden_units']
        self.out_dim = args['out_dim']

        self.embedding = Embedding(self.embed_input,
                                   self.embed_out,
                                   mask_zero=True)
        self.convolutions = [
            Conv1D(
                self.cnn_filters,
                x,
                activation='relu',
                kernel_constraint=tf.keras.constraints.MaxNorm(max_value=3.),
                padding='same') for x in args['kernel_size']
        ]
        self.gmp = GlobalMaxPool1D()
        self.dropout = Dropout(self.dropout_rate)
        self.fcn = Dense(self.hidden_units, 'relu')
        self.out_fcn = Dense(self.out_dim, 'softmax')
Exemplo n.º 11
0
    def __init__(self, model_config, training_config):
        super().__init__(model_config, training_config)
        self.update_parameters(model_config, training_config)
        nb_classes = len(model_config.list_classes)

        input_layer = Input(shape=(self.parameters["maxlen"],
                                   self.parameters["embed_size"]), )
        x = Bidirectional(
            GRU(self.parameters["recurrent_units"],
                return_sequences=True,
                dropout=self.parameters["dropout_rate"],
                recurrent_dropout=self.parameters["recurrent_dropout_rate"]))(
                    input_layer)
        x = Dropout(self.parameters["dropout_rate"])(x)
        x = Bidirectional(
            LSTM(self.parameters["recurrent_units"],
                 return_sequences=True,
                 dropout=self.parameters["dropout_rate"],
                 recurrent_dropout=self.parameters["recurrent_dropout_rate"]))(
                     x)
        x_a = GlobalMaxPool1D()(x)
        x_b = GlobalAveragePooling1D()(x)
        x = concatenate([x_a, x_b])
        x = Dense(self.parameters["dense_size"], activation="relu")(x)
        output_layer = Dense(nb_classes, activation="sigmoid")(x)
        self.model = Model(inputs=input_layer, outputs=output_layer)
Exemplo n.º 12
0
    def build(self, args, num_words, num_chars):
        self.scale = tf.constant(math.sqrt(args.attention_dim / args.heads))

        word_embeddings = tf.keras.Input(shape=[None, MorphoDataset.Dataset.EMBEDDING_SIZE], dtype=tf.float32)
        charseqs = tf.keras.Input(shape=[None], dtype=tf.int32)
        charseq_ids = tf.keras.Input(shape=[None], dtype=tf.int32)
        positional_encoding = tf.keras.Input(shape=[None, args.attention_dim], dtype=tf.float32)

        chars_embedded = Embedding(input_dim=num_chars, output_dim=args.cle_dim, mask_zero=False)(charseqs)
        convoluted = []
        for width in range(2, args.cnn_max_width + 1):
            hidden = chars_embedded
            for _ in range(args.cle_layers):
                hidden = Conv1D(args.cnn_filters, kernel_size=width, strides=1, padding='valid', activation=tf.nn.relu)(hidden)
            convoluted.append(GlobalMaxPool1D()(hidden))
        chars_hidden = concatenate(convoluted, axis=1)
        chars_hidden = Dense(args.we_dim, activation=tf.nn.tanh)(chars_hidden)
        char_embedding = Lambda(lambda args: tf.gather(*args))([chars_hidden, charseq_ids])

        embedded = concatenate([word_embeddings, char_embedding], axis=2)
        embedded = Dense(args.attention_dim, activation=tf.nn.tanh)(embedded)
        embedded = add([embedded, positional_encoding])
        embedded = SpatialDropout1D(args.input_dropout)(embedded)

        x = embedded
        for _ in range(args.layers):
            x = self.attention_layer(x)

        predictions = []
        for tag in range(MorphoDataset.TAGS):
            bias_init = tf.constant_initializer(self.bias(tag))
            tag_prediction = Dense(units=MorphoDataset.TAG_SIZES[tag]-1, activation=None, bias_initializer=bias_init)(x)
            predictions.append(tag_prediction)

        self.model = tf.keras.Model(inputs=[word_embeddings, charseq_ids, charseqs, positional_encoding], outputs=predictions)
Exemplo n.º 13
0
def build_model_bert_lstm(max_seq_length):
    in_idA = Input(shape=(max_seq_length, ), name="input_idsA")
    in_maskA = Input(shape=(max_seq_length, ), name="input_masksA")
    in_segmentA = Input(shape=(max_seq_length, ), name="segment_idAs")
    bert_inputsA = [in_idA, in_maskA, in_segmentA]
    bert_outputA = BH.BertLayer(n_fine_tune_layers=3,
                                name='bert_inputA')(bert_inputsA)

    in_idB = Input(shape=(max_seq_length, ), name="input_idsB")
    in_maskB = Input(shape=(max_seq_length, ), name="input_masksB")
    in_segmentB = Input(shape=(max_seq_length, ), name="segment_idsB")
    bert_inputsB = [in_idB, in_maskB, in_segmentB]
    bert_outputB = BH.BertLayer(n_fine_tune_layers=3,
                                name='bert_inputB')(bert_inputsB)

    # bert_output = Multiply()([bert_outputA, bert_outputB])
    bert_output = Add()([bert_outputA, bert_outputB])
    bert_output = Reshape((-1, 1))(bert_output)
    bert_lstm = Bidirectional(
        LSTM(128, return_sequences=True, dropout=0.2,
             recurrent_dropout=0.1))(bert_output)
    bert_lstm = GlobalMaxPool1D()(bert_lstm)
    # bert_lstm = Dropout(0.2)(bert_lstm)
    dense = Dense(64, activation='relu')(bert_lstm)
    pred = Dense(2, activation='softmax')(dense)

    model = Model(
        inputs=[in_idA, in_maskA, in_segmentA, in_idB, in_maskB, in_segmentB],
        outputs=pred)
    # model.compile(loss=F1score.loss(), optimizer='adam', metrics=['accuracy'])
    model.compile(loss='categorical_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])
    # print(model.summary())
    return model
Exemplo n.º 14
0
def buildModel():
    inp = Input((MAXIMUM_SEQ_LEN, ))
    #use embeddings
    emb = Embedding(VOCAB_LENGTH,
                    EMBEDDING_DIM,
                    weights=[embedding_matrix],
                    trainable=False)(inp)
    #to drop some embedding instead of particular cells
    emb = SpatialDropout1D(0.2)(emb)
    #generate 100(fwd) + 100(bwd) hidden states
    hidden_states = Bidirectional(
        LSTM(100, return_sequences=True, dropout=0.1,
             recurrent_dropout=0.1))(emb)
    #on each hidden state use 100*64 kernels of size 3
    conv = Conv1D(64,
                  kernel_size=3,
                  padding="valid",
                  kernel_initializer="glorot_uniform")(hidden_states)
    #take maximum for each cell of all hidden state
    x1 = GlobalMaxPool1D()(conv)
    x2 = GlobalAvgPool1D()(conv)
    #cocatenate both polling
    x = Concatenate()([x1, x2])
    x = Dropout(0.2)(x)
    x = Dense(50, activation='relu')(x)
    x = Dropout(0.1)(x)
    out = Dense(6, activation='sigmoid')(x)
    model = Model(inp, out)

    model.compile(loss="binary_crossentropy",
                  optimizer="adam",
                  metrics=[AUC(name="auc")])
Exemplo n.º 15
0
def get_model(embedding_matrix, learning_rate=0.001):
    optimizer = Adam(learning_rate=learning_rate)

    nb_words, embed_size = embedding_matrix.shape
    inp = Input(shape=(MAX_LEN, ))
    x = Embedding(nb_words,
                  embed_size,
                  weights=[embedding_matrix],
                  trainable=False)(inp)
    x = Bidirectional(
        LSTM(20, return_sequences=True, dropout=0.4, recurrent_dropout=0.2))(x)
    x = GlobalMaxPool1D()(x)
    x = Dense(5, activation="relu")(x)
    x = Dropout(0.3)(x)
    x = Dense(1, activation="sigmoid")(x)
    model = Model(inputs=inp, outputs=x)
    model.compile(
        loss='binary_crossentropy',
        optimizer=optimizer,
        metrics=[
            'accuracy',
            #tf.keras.metrics.AUC(),
            tf.keras.metrics.Precision(),
            tf.keras.metrics.Recall()
        ])
    return model
Exemplo n.º 16
0
def prepare_model(embed_dim, l2_weight, vocab_size, conv_layers):
    seq = Input((None, ), dtype="int32")
    x = Embedding(
        vocab_size,
        embed_dim,
        embeddings_regularizer=tf.keras.regularizers.l2(l2_weight))(seq)

    conv_outputs = []
    for i, l in enumerate(conv_layers):
        c = Conv1D(
            filters=l[0],
            kernel_size=l[1],
            activation=None,
            kernel_regularizer=tf.keras.regularizers.l2(l2_weight),
        )(x)
        c = Activation("tanh", name="tanh_%d" % i)(c)
        c = GlobalMaxPool1D()(c)
        conv_outputs.append(c)

    x = Concatenate(axis=-1)(conv_outputs)
    x = Dense(
        2,
        activation=None,
        kernel_regularizer=tf.keras.regularizers.l2(l2_weight),
    )(x)  # from_logits

    return Model(seq, x)
Exemplo n.º 17
0
def conv1d_v1(input_shape, n_classes):

    X_input = Input(shape=input_shape)

    X = Lambda(lambda q: expand_dims(q, -1), name='expand_dims')(X_input)

    X = Conv1D(16, 9, activation=relu, padding='valid')(X)
    X = Conv1D(16, 9, activation=relu, padding='valid')(X)
    X = MaxPool1D(16)(X)
    X = Dropout(0.1)(X)

    X = Conv1D(32, 3, activation=relu, padding='valid')(X)
    X = Conv1D(32, 3, activation=relu, padding='valid')(X)
    X = MaxPool1D(4)(X)
    X = Dropout(0.1)(X)

    X = Conv1D(32, 3, activation=relu, padding='valid')(X)
    X = Conv1D(32, 3, activation=relu, padding='valid')(X)
    X = MaxPool1D(4)(X)
    X = Dropout(0.1)(X)

    X = Conv1D(256, 3, activation=relu, padding='valid')(X)
    X = Conv1D(256, 3, activation=relu, padding='valid')(X)
    X = GlobalMaxPool1D()(X)

    X = Dense(64, activation=relu)(X)
    X = Dense(128, activation=relu)(X)

    X = Dense(n_classes, activation=softmax)(X)

    model = Model(inputs=X_input, outputs=X)

    return model
 def define_model(self, embedding_matrix, saved_model=None):
     if saved_model is None:
         print("--- Initializing Model ---")
         num_labels = self.y_train.shape[1]
         VOCAB_SIZE = embedding_matrix.shape[0]
         model = Sequential()
         embedding_layer = Embedding(VOCAB_SIZE,
                                     100,
                                     weights=[embedding_matrix],
                                     input_length=max_length,
                                     trainable=False)
         model.add(embedding_layer)
         model.add(LSTM(60, return_sequences=True, name='lstm_layer'))
         model.add(GlobalMaxPool1D())
         model.add(Dense(32, activation="relu"))
         model.add(Dropout(0.2))
         model.add(Dense(16, activation="relu"))
         model.add(Dropout(0.2))
         model.add(Dense(num_labels, activation="sigmoid"))
         model.compile(loss='binary_crossentropy',
                       optimizer='adam',
                       metrics=['accuracy'])
         return model
     print("--- Loading Model from {} ---".format(saved_model))
     model = preproc.load_model(saved_model)
     if model is None:  # If the filepath is wrong or the model hasn't actually been defined earlier
         print("--- no model found, initializing from scrach ---")
         return self.define_model(embedding_matrix, saved_model=None)
     return model
Exemplo n.º 19
0
def get_model():
    nclass = 5
    inp = Input(shape=(187, 1))
    img_1 = Convolution1D(16, kernel_size=5, activation=activations.relu, padding="valid")(inp)
    img_1 = Convolution1D(16, kernel_size=5, activation=activations.relu, padding="valid")(img_1)
    img_1 = MaxPool1D(pool_size=2)(img_1)
    img_1 = Dropout(rate=0.1)(img_1)
    img_1 = Convolution1D(32, kernel_size=3, activation=activations.relu, padding="valid")(img_1)
    img_1 = Convolution1D(32, kernel_size=3, activation=activations.relu, padding="valid")(img_1)
    img_1 = MaxPool1D(pool_size=2)(img_1)
    img_1 = Dropout(rate=0.1)(img_1)
    img_1 = Convolution1D(32, kernel_size=3, activation=activations.relu, padding="valid")(img_1)
    img_1 = Convolution1D(32, kernel_size=3, activation=activations.relu, padding="valid")(img_1)
    img_1 = MaxPool1D(pool_size=2)(img_1)
    img_1 = Dropout(rate=0.1)(img_1)
    img_1 = Convolution1D(256, kernel_size=3, activation=activations.relu, padding="valid")(img_1)
    img_1 = Convolution1D(256, kernel_size=3, activation=activations.relu, padding="valid")(img_1)
    img_1 = GlobalMaxPool1D()(img_1)
    img_1 = Dropout(rate=0.2)(img_1)

    dense_1 = Dense(64, activation=activations.relu, name="dense_1")(img_1)
    dense_1 = Dense(64, activation=activations.relu, name="dense_2")(dense_1)
    dense_1 = Dense(nclass, activation=activations.softmax, name="dense_3_mitbih")(dense_1)

    model = models.Model(inputs=inp, outputs=dense_1)
    opt = optimizers.Adam(0.001)

    model.compile(optimizer=opt, loss=losses.sparse_categorical_crossentropy, metrics=['acc'])
    #model.summary()
    return model
Exemplo n.º 20
0
    def __init__(self, model_config, training_config):
        super().__init__(model_config, training_config)
        self.update_parameters(model_config, training_config)
        nb_classes = len(model_config.list_classes)

        input_layer = Input(shape=(self.parameters["maxlen"],
                                   self.parameters["embed_size"]), )
        x = LSTM(
            self.parameters["recurrent_units"],
            return_sequences=True,
            dropout=self.parameters["dropout_rate"],
            recurrent_dropout=self.parameters["dropout_rate"])(input_layer)
        x = Dropout(self.parameters["dropout_rate"])(x)

        x = Conv1D(filters=self.parameters["recurrent_units"],
                   kernel_size=2,
                   padding='same',
                   activation='relu')(x)
        x = Conv1D(filters=300,
                   kernel_size=5,
                   padding='valid',
                   activation='tanh',
                   strides=1)(x)
        x_a = GlobalMaxPool1D()(x)
        x_b = GlobalAveragePooling1D()(x)
        x = concatenate([x_a, x_b])
        x = Dense(self.parameters["dense_size"], activation="relu")(x)
        x = Dropout(self.parameters["dropout_rate"])(x)
        x = Dense(nb_classes, activation="sigmoid")(x)
        self.model = Model(inputs=input_layer, outputs=x)
def proposed_CNN(input_shape, num_class):
    x = x_in = Input(input_shape, name='input')
    num_features = 128
    # stack se blocks
    k = 1
    dilation_rate = 4  # dilation rate
    kernel_size = 3
    NUM_BLOCKS = 7
    start = 3  # (729,256)
    end = 7  # (27,256)
    for i in range(NUM_BLOCKS):
        num_features *= 2 if (i == start) else 1
        if i >= start and i < end:
            # 用不同的r
            x = Strided_Conv(x, num_features, k)
            x = residual_block(x,
                               num_features,
                               kernel_size,
                               dilation_rate,
                               name=f'block_{i+1}_{k+1}')
            # x = SE_residual_block(x,num_features,kernel_size,dilation_rate,name=f'block_{i+1}_{k+1}')
        else:
            x = Strided_Conv(x, num_features, k)
        k += 1

    x = GlobalMaxPool1D(name='final_pool')(x)

    # the final two FCs
    x = Dense(x.shape[-1].value, name='fc1')(x)
    x = BatchNormalization(name='norm1')(x)
    x = Activation('relu', name='relu1')(x)
    x = Dropout(0.5, name='drop1')(x)
    x = Dense(num_class, activation='sigmoid', name='output')(x)

    return Model(inputs=x_in, outputs=x, name='sampleCNN')
Exemplo n.º 22
0
    def get_model(self):
        X_input = Input(shape=(187, 1))
        X = Conv1D(32, kernel_size=5, activation='relu')(X_input)

        X_after = Conv1D(32, kernel_size=5, activation='relu',
                         padding='same')(X)
        X_shortcut = X
        X = self.residual_block_32(X)
        X = self.residual_block_32(X)
        X = self.residual_block_32(X)
        X = self.residual_block_32(X)

        X = Conv1D(32, kernel_size=5, activation='relu', padding='same')(X)
        X = Add()([X, X_after])
        X = BatchNormalization()(X)
        X = Conv1D(256, kernel_size=3, activation='relu')(X)
        transfer_layer = GlobalMaxPool1D()(X)
        X = Dense(64, activation='relu', name="dense_1")(transfer_layer)
        X = Dense(64, activation='relu', name="dense_2")(X)
        X_out = Dense(self.output_dim,
                      activation=self.last_activation,
                      name='output_layer')(X)

        model = models.Model(inputs=X_input, outputs=X_out)
        return model
def Sample_CNN(input_shape, num_class):
    x_in = Input(input_shape, name='input')
    num_features = 128
    x = Conv1D(num_features,
               kernel_size=3,
               strides=3,
               padding='same',
               kernel_regularizer=l2(WEIGHT_DECAY),
               name='conv0')(x_in)
    x = BatchNormalization(name='norm0')(x)
    x = Activation('relu', name='relu0')(x)
    # stack se blocks
    for i in range(NUM_BLOCKS):
        num_features *= 2 if (i == 2 or i == (NUM_BLOCKS - 1)) else 1
        x = se_block(x, num_features, name=f'block_{i+1}')
    x = GlobalMaxPool1D(name='final_pool')(x)

    # the final two FCs
    x = Dense(x.shape[-1].value, name='fc1')(x)
    x = BatchNormalization(name='norm1')(x)
    x = Activation('relu', name='relu1')(x)
    x = Dropout(0.5, name='drop1')(x)
    x = Dense(num_class, activation='sigmoid', name='output')(x)

    return Model(inputs=x_in, outputs=x, name='sampleCNN')
Exemplo n.º 24
0
    def define_discriminator(self) -> Sequential:
        # activation_func = tf.keras.layers.LeakyReLU(alpha=0.02)
        activation_func = tf.keras.layers.ReLU()
        self.d_model = Sequential()
        self.d_model.add(
            Conv1D(filters=self.vector_size,
                   kernel_size=20,
                   strides=4,
                   activation=activation_func,
                   batch_input_shape=(self.num_samples, self.vector_size, 1)))
        self.d_model.add(Dropout(.3))
        self.d_model.add(GlobalMaxPool1D())
        self.d_model.add(Flatten())
        self.d_model.add(
            Dense(self.vector_size / 2,
                  activation=activation_func,
                  kernel_initializer='he_uniform',
                  input_dim=self.vector_size))
        self.d_model.add(Dropout(.3))
        self.d_model.add(Dense(1, activation='sigmoid'))

        # compile model
        loss_func = tf.keras.losses.BinaryCrossentropy()
        opt_func = tf.keras.optimizers.RMSprop(lr=0.0005)
        self.d_model.compile(loss=loss_func,
                             optimizer=opt_func,
                             metrics=['accuracy'])
        return self.d_model
Exemplo n.º 25
0
def baseline_model(seq_dim=3):
    input_1 = Input(shape=(None, seq_dim))

    base_model = encoder(seq_dim=seq_dim)

    x1 = base_model(input_1)

    x1 = Dropout(0.5)(x1)

    x1 = Concatenate(axis=-1)([GlobalMaxPool1D()(x1), GlobalAvgPool1D()(x1)])

    x = Dropout(0.5)(x1)
    x = Dense(100, activation="relu")(x)
    x = Dropout(0.5)(x)
    out = Dense(1, activation="sigmoid")(x)

    model = Model(input_1, out)

    model.compile(loss="binary_crossentropy",
                  metrics=[acc],
                  optimizer=Adam(0.0001))

    model.summary()

    return model
def get_model():
    nclass = 1
    inp = Input(shape=(187, 1))
    img_1 = Convolution1D(32,
                          kernel_size=5,
                          activation=activations.relu,
                          padding="valid")(inp)
    for i in range(5):
        img_1 = res_block(img_1, 32, dropout=0.2)
    img_1 = Convolution1D(32,
                          kernel_size=3,
                          activation=activations.relu,
                          padding="same")(img_1)
    img_1 = Convolution1D(32,
                          kernel_size=3,
                          activation=activations.relu,
                          padding="same",
                          name="final_conv")(img_1)
    img_1 = GlobalMaxPool1D()(img_1)
    img_1 = Dropout(rate=0.2)(img_1)

    dense_1 = Dense(32, activation=activations.relu, name="dense_1")(img_1)
    dense_1 = Dense(32, activation=activations.relu, name="dense_2")(dense_1)
    dense_1 = Dense(nclass,
                    activation=activations.sigmoid,
                    name="dense_3_mitbih")(dense_1)

    model = models.Model(inputs=inp, outputs=dense_1)
    opt = optimizers.Adam(0.001)

    model.compile(optimizer=opt,
                  loss=losses.binary_crossentropy,
                  metrics=['acc'])
    model.summary()
    return model
Exemplo n.º 27
0
    def __init__(self,
                 training_config,
                 architecture=None,
                 name="StepCOVNetArrowModel"):
        arrow_input = Input(shape=training_config.arrow_input_shape,
                            name="arrow_input",
                            dtype=tf.int32)
        arrow_mask = Input(shape=training_config.arrow_mask_shape,
                           name="arrow_mask",
                           dtype=tf.int32)
        model_input = [arrow_input, arrow_mask]

        if architecture is None:
            gp2_model = PretrainedModels.gpt2_model()
            model_output = gp2_model(arrow_input, attention_mask=arrow_mask)[0]
            # GPT-2 model returns feature maps for avg/max pooling. Using LSTM for additional feature extraction.
            # Might be able to replace this with another method in the future
            model_output = GlobalMaxPool1D()(model_output)
        else:
            # TODO: Add support for existing arrow models
            raise NotImplementedError(
                "No support yet for existing architectures")

        super(ArrowModel, self).__init__(model_input=model_input,
                                         model_output=model_output,
                                         name=name)
Exemplo n.º 28
0
def get_model(embedding_matrix: np.ndarray = None,
              embedding_size: int = config.EMBEDDING_SIZE,
              max_sequence_length: int = config.MAX_SEQUENCE_LENGTH,
              max_features: int = config.MAX_FEATURES,
              dropout: float = config.DROPOUT,
              num_lstm_units: int = config.NUM_LSTM_UNITS,
              num_dense_units: int = config.NUM_DENSE_UNITS,
              learning_rate: float = config.LEARNING_RATE):
    """Returns a bidirectional LSTM model"""

    inp = Input(shape=(max_sequence_length, ))
    if not embedding_matrix is None:
        x = Embedding(max_features, embedding_size,
                      weights=[embedding_matrix])(inp)
    else:
        x = Embedding(max_features, embedding_size)(inp)
    x = Bidirectional(
        LSTM(num_lstm_units,
             return_sequences=True,
             dropout=dropout,
             recurrent_dropout=dropout))(x)
    x = GlobalMaxPool1D()(x)
    x = Dense(num_dense_units, activation="relu")(x)
    x = Dropout(rate=dropout)(x)
    x = Dense(6, activation="sigmoid")(x)
    model = Model(inputs=inp, outputs=x)
    model.compile(Adam(lr=learning_rate),
                  loss="binary_crossentropy",
                  metrics=["accuracy"])

    return model
Exemplo n.º 29
0
    def __init__(self,
                 vocab_size,
                 embedding_dim,
                 filter_sizes,
                 num_filters,
                 hidden_dim,
                 dropout_p,
                 num_classes,
                 freeze_embeddings=False):
        super(TextCNN, self).__init__(name="cnn")

        # Embeddings
        self.embedding = Embedding(input_dim=vocab_size,
                                   output_dim=embedding_dim,
                                   trainable=not freeze_embeddings)

        # Conv & pool
        self.convs = []
        self.pools = []
        for filter_size in filter_sizes:
            conv = Conv1D(filters=num_filters,
                          kernel_size=filter_size,
                          padding='same',
                          activation='relu')
            pool = GlobalMaxPool1D(data_format='channels_last')
            self.convs.append(conv)
            self.pools.append(pool)

        # Concatenation
        self.concat = Concatenate(axis=1)

        # FC layers
        self.fc1 = Dense(units=hidden_dim, activation='relu')
        self.dropout = Dropout(rate=dropout_p)
        self.fc2 = Dense(units=num_classes, activation='softmax')
Exemplo n.º 30
0
def build_cnn_model(num_classes, feat_size, name='cnn'):
    input_seq = Input(shape=(None, feat_size), dtype='float32')
    x = MaxPool1D(5, strides=2)(input_seq)

    conv1 = Conv1D(64, kernel_size=3, strides=1, activation='relu')(x)
    conv1 = GlobalMaxPool1D()(conv1)

    conv2 = Conv1D(64, kernel_size=5, strides=2, activation='relu')(x)
    conv2 = GlobalMaxPool1D()(conv2)

    conv3 = Conv1D(64, kernel_size=7, strides=3, activation='relu')(x)
    conv3 = GlobalMaxPool1D()(conv3)

    x = Concatenate()([conv1, conv2, conv3])
    x = Dense(num_classes, activation='softmax')(x)
    model = tf.keras.Model(inputs=input_seq, outputs=x, name=name)
    return model