Ejemplo n.º 1
0
def model9(embeddingMatrix):
    embeddingLayer = Embedding(embeddingMatrix.shape[0],
                               EMBEDDING_DIM,
                               weights=[embeddingMatrix],
                               input_length=MAX_SEQUENCE_LENGTH,
                               trainable=True)
    model = Sequential()
    model.add(embeddingLayer)
    model.add(Conv1D(32, 3, padding='same', activation='relu'))
    model.add(MaxPool1D(2))
    model.add(GRU(LSTM_DIM))
    model.add(Dense(NUM_CLASSES, activation='softmax'))
    adam = optimizers.adam(lr=LEARNING_RATE)
    model.compile(loss='categorical_crossentropy',
                  optimizer=adam,
                  metrics=['acc'])
    model.summary()
    return model
Ejemplo n.º 2
0
def build_keras_convolutional_graph(inshape, outshape):
    inl = Input(inshape)  # 300x50
    C = [
        Conv1D(50, kernel_size=7)(inl),  # 50x44
        Conv1D(50, kernel_size=5)(inl),  # 50x46
        Conv1D(50, kernel_size=3)(inl)
    ]  # 50x48
    CP = [Flatten()(MaxPool1D()(c)) for c in C]
    CA = Activation("relu")(Concatenate()(CP))  # 3450
    CC = BatchNormalization()(CA)
    FF1 = Dropout(0.5)(Dense(360, activation="tanh")(CC))
    FF2 = Dense(120, activation="tanh")(FF1)
    O = Dense(outshape, activation="sigmoid")(FF2)
    model = Model(inl, O)
    model.compile(optimizer="adam",
                  loss="binary_crossentropy",
                  metrics=["acc"])
    return model
Ejemplo n.º 3
0
def RNNModel(lstm = False):
    model = Sequential()
    model.add(Embedding(input_dim = num_most_freq_words_to_include, 
                                output_dim = embedding_vector_length,
                                input_length = MAX_REVIEW_LENGTH_FOR_KERAS_RNN))
    
    model.add(Dropout(0.2))
    model.add(Conv1D(filters = 32, kernel_size = 3, padding = 'same', activation = 'relu'))
    model.add(MaxPool1D(pool_size = 2))
    if lstm == True:
        model.add(LSTM(100))
    else:
        model.add(GRU(100))
    model.add(Dropout(0.2))
    model.add(Dense(1, activation = 'sigmoid'))             
    model.compile(loss = 'binary_crossentropy', optimizer = 'adam', metrics = ['accuracy'])
    
    return model
Ejemplo n.º 4
0
            def f():
                baa = conv(64, 1, ba)
                baa = conv(64, 3, baa)
                baa = conv(128, 1, baa)

                baa = MaxPool1D()(baa)

                def f():
                    baaa = conv(64, 1, baa)
                    baaa = conv(64, 3, baaa)
                    baaa = conv(128, 1, baaa)
                    return Add()([baa, baaa])

                baa = f()

                baa = UpSampling1D()(baa)

                return Add()([ba, baa])
def get_cnn_model(input_dim):
    model = Sequential()
    model.add(
        Conv1D(32,
               3,
               padding='same',
               input_shape=(input_dim, 1),
               activation='relu'))
    model.add(Conv1D(32, 3, padding='same', activation='relu'))
    model.add(MaxPool1D(pool_size=2))
    model.add(Flatten())
    model.add(Dense(32, activation='relu'))
    model.add(Dropout(0.3))
    model.add(Dense(1, activation='linear'))
    model.compile(optimizer='adam',
                  loss='mean_squared_error',
                  metrics=['mean_squared_error'])
    return model
Ejemplo n.º 6
0
 def _create_model(self):
     convs = []
     text_input = Input(shape=(self.max_document_length, ))
     x = Embedding(self.vocabulary_size, self.embedding_size)(text_input)
     for fsz in [3, 8]:
         conv = Conv1D(128, fsz, padding='valid', activation='relu')(x)
         pool = MaxPool1D()(conv)
         convs.append(pool)
     x = Concatenate(axis=1)(convs)
     x = Flatten()(x)
     x = Dense(128, activation='relu')(x)
     x = Dropout(self.dropout_keep_prob)(x)
     preds = Dense(3, activation='softmax')(x)
     model = Model(text_input, preds)
     model.compile(loss='categorical_crossentropy',
                   optimizer=optimizers.Adam(lr=self.lr),
                   metrics=['accuracy'])
     return model
Ejemplo n.º 7
0
 def resblock(self):
     inputs = Input(shape=(30, 2048))
     d = Dense(512, activation='tanh')(inputs)
     x = Conv1D(512,
                kernel_size=3,
                strides=1,
                activation='tanh',
                padding='same')(d)
     x = MaxPool1D(pool_size=3, strides=1, padding='same')(x)
     x = Conv1D(512,
                kernel_size=3,
                strides=1,
                activation='tanh',
                padding='same')(x)
     y = add([d, x])
     y = Dropout(0.7)(y)
     model = Model(inputs=inputs, outputs=y)
     return model
Ejemplo n.º 8
0
 def build_CNN_model(self, emd_matrix, long_sent_size, vocab_len, number_of_classes):
     self.model = Sequential()
     self.model.add(Embedding(vocab_len,
                             100,
                             weights=[emd_matrix],
                             trainable=False, input_length=long_sent_size))
     # Prevents overfit
     self.model.add(Dropout(0.5))
     self.model.add(Conv1D(64, 5, activation='relu'))
     # Get the most relevant features
     self.model.add(MaxPool1D(2, strides=2))
     # Transforms the input data to calculate the density
     self.model.add(Flatten())
     self.model.add(Dense(number_of_classes, activation='softmax'))
     self.model.summary()
     self.model.compile(loss='categorical_crossentropy', optimizer='adam',
                       metrics=['accuracy'])
     return self.model
Ejemplo n.º 9
0
    def define_full_model(self, vector=None):
        encoder_input = Input(shape=(self.n_input_len,))
        y_input = Input(shape=(None,))

        # encoder元件
        mask = Lambda(lambda x0: K.cast(K.greater(K.expand_dims(x0, 2), 0), 'float32'))
        embedding = Embedding(self.n_input, char_size, weights=[vector], trainable=True)  # 强制映射到128维度,类似做成词向量
        encoder_conv = Conv1D(self.n_input, 5)
        encoder_pool = MaxPool1D(2, strides=2)
        encoder = Bidirectional(LSTM(self.n_uints//2, return_sequences=True))

        # encoder传播
        # print('输入维度', encoder_input)
        encoder_layer1 = embedding(encoder_input)
        mask_y = mask(y_input)
        # print('词向量大小', encoder_layer1)
        encoder_layer2 = encoder_conv(encoder_layer1)
        encoder_layer3 = encoder_pool(encoder_layer2)
        # print('池化后大小', encoder_layer3)
        encoder_output = encoder(encoder_layer3)
        # print('编码器输出', encoder_output)

        # decoder元件
        decoder_att = AttentionDecoder(self.n_uints, self.n_input,
                                       return_probabilities=True)  # 这里设置输出序列维度
        dense_1 = Dense(512, activation='relu')
        dense_2 = Dense(self.n_output, activation='softmax')

        # decoder传播过程
        decoder_layer1 = decoder_att(encoder_output)
        decoder_layer2 = dense_1(decoder_layer1)
        decoder_output = dense_2(decoder_layer2)

        # 损失
        # 交叉熵作为loss,但mask掉padding部分
        print('标签和预测值为', y_input, decoder_output)
        cross_entropy = K.sparse_categorical_crossentropy(y_input[:, 1:], decoder_output[:, :-1])
        loss = K.sum(cross_entropy * mask_y[:, 1:, 0]) / K.sum(mask_y[:, 1:, 0])

        model = Model([encoder_input, y_input], decoder_output)
        model.add_loss(loss)
        model.compile(optimizer='adam', metrics=['acc'])
        model.summary()
        return model
Ejemplo n.º 10
0
def get_model():
    number_labels = 1
    inp = Input(shape=(187, 1))

    forward = Convolution1D(filters=32,
                            kernel_size=3,
                            strides=1,
                            padding='same',
                            activation=activations.relu)(inp)
    forward = Convolution1D(filters=64,
                            kernel_size=3,
                            strides=1,
                            padding='same',
                            activation=activations.relu)(forward)
    forward = MaxPool1D(pool_size=2)(forward)
    forward = Dropout(rate=0.2)(forward)

    forward = LSTM(units=32, activation='tanh', return_sequences=True)(forward)
    forward = BatchNormalization()(forward)
    forward = LSTM(units=32, activation='tanh',
                   return_sequences=False)(forward)
    forward = BatchNormalization()(forward)

    dense_1 = Dense(32, activation=activations.sigmoid,
                    name="dense_1")(forward)
    dense_1 = Dense(32, activation=activations.sigmoid,
                    name="dense_2")(dense_1)
    dense_1 = Dense(32, activation=activations.sigmoid,
                    name="dense_3")(dense_1)
    dense_1 = Dense(32, activation=activations.sigmoid,
                    name="dense_4")(dense_1)

    dense_1 = Dense(number_labels,
                    activation=activations.sigmoid,
                    name="dense_3_mitbih")(dense_1)

    model = models.Model(inputs=inp, outputs=dense_1)
    opt = optimizers.Adam(0.001)

    model.compile(optimizer=opt,
                  loss=losses.binary_crossentropy,
                  metrics=['acc'])
    model.summary()
    return model
Ejemplo n.º 11
0
def get_model(layer, active_1, active_2, active_3, Dropout_1, Dropout_2):
    
    inp = Input(shape=(16384, 1))
    
    #---------------------------------------
    #convolution part
    
    #convolution1
    img_1 = Conv1D(128, kernel_size=5, activation=activations.relu, padding="valid")(inp)
    img_1 = MaxPool1D(pool_size=2)(img_1)
    # img_1 = Dropout(rate=0.1)(img_1)
    
    #convolution2
    # img_1 = Conv1D(64, kernel_size=3, activation=activations.relu, padding="valid")(img_1)
    # img_1 = MaxPool1D(pool_size=2)(img_1)
    # img_1 = Dropout(rate=0.1)(img_1)
    
    # #convolution3
    img_1 = Conv1D(32, kernel_size=3, activation=activations.relu, padding="valid")(img_1)
    img_1 = GlobalMaxPool1D()(img_1)
    # img_1 = Dropout(rate=0.1)(img_1)
    
    #---------------------------------------
    #fully connected part
    #layer1
    dense_1 = Dense(500, activation=active_1, name="dense_1")(img_1)
    dense_1 = Dropout(rate=Dropout_1)(dense_1)
    
    #layer2
    dense_1 = Dense(500, activation=active_2, name="dense_2")(dense_1)
    dense_1 = Dropout(rate=Dropout_2)(dense_1)
    
    #layer2
    dense_1 = Dense(1, activation=active_3, name="output_dense")(dense_1)

    #train model
    model = models.Model(inputs=inp, outputs=dense_1)
    # opt = optimizers.Adam(0.1)

    model.compile(optimizer='adam', 
                  loss='binary_crossentropy', 
                  metrics=['accuracy'])
    model.summary()
    return model
Ejemplo n.º 12
0
def C_RNN_series(vocab_size, max_len, embedding_size):
    model = Sequential()
    model.add(Embedding(vocab_size, embedding_size, input_length=max_len))
    model.add(Convolution1D(32, 3, padding='same', strides=1))
    model.add(Activation('relu'))
    model.add(MaxPool1D(pool_size=2))

    model.add(
        GRU(32, implementation=2, return_sequences=True, go_backwards=False))
    model.add(
        GRU(32, implementation=2, return_sequences=True, go_backwards=True))
    model.add(
        Bidirectional(LSTM(32, return_sequences=True), merge_mode='concat'))
    model.add(Flatten())
    model.add(Dense(4, activation='softmax'))
    model.compile(optimizer="adam",
                  loss="categorical_crossentropy",
                  metrics=["accuracy", f1])
    return model
Ejemplo n.º 13
0
def block_inception1d(inp, filters):
    tower_1 = conv1d_bn(inp, filters, 1, padding='same')
    tower_1 = conv1d_bn(tower_1, filters, 1, padding='same')

    tower_2 = conv1d_bn(inp, filters, 1, padding='same')
    tower_2 = conv1d_bn(tower_2, filters, 3, padding='same')
    tower_2 = conv1d_bn(tower_2, filters, 3, padding='same')

    tower_3 = conv1d_bn(inp, filters, 1, padding='same')
    tower_3 = conv1d_bn(tower_3, filters, 3, padding='same')
    tower_3 = conv1d_bn(tower_3, filters, 3, padding='same')
    tower_3 = conv1d_bn(tower_3, filters, 3, padding='same')

    tower_4 = MaxPool1D(3, strides=1, padding='same')(inp)
    tower_4 = conv1d_bn(tower_4, filters, 1, padding='same')

    x = concatenate([tower_1, tower_2, tower_3, tower_4])

    return x
Ejemplo n.º 14
0
def gen_model(sequences):
    model = Sequential([
        sequences,  # sequences: Embedding Sequences
        Conv1D(256, 5, activation='relu'),
        AveragePooling1D(pool_size=5),
        Conv1D(128, 5, activation='relu'),
        AveragePooling1D(pool_size=5),
        Conv1D(64, 5, activation='relu'),
        MaxPool1D(pool_size=5),
        GlobalMaxPooling1D(),
        Dropout(0.3),
        Dense(64, activation='relu'),
        Dense(len(POLARITY_LABEL), activation='softmax')
    ])
    model.compile(loss='categorical_crossentropy',
                  optimizer='rmsprop',
                  metrics=['acc'])

    return model
Ejemplo n.º 15
0
def CNN_model():
    model = Sequential()
    model.add(
        Conv1D(filters=1,
               kernel_size=3,
               strides=1,
               activation='relu',
               padding='valid',
               data_format='channels_last',
               input_shape=(153, 4)))
    model.add(MaxPool1D(pool_size=5, strides=1, padding='valid'))
    model.add(Flatten())
    model.add(Dense(64, activation='relu'))
    model.add(Dense(2, activation='softmax'))
    model.compile(loss='categorical_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])
    model.summary()
    return model
Ejemplo n.º 16
0
def defineCovNetsModel(modelId, filterSize):
    
    global MAX_SEQUENCE_LENGTH, EMBEDDING_DIM
    
    modelInput = Input(shape=(MAX_SEQUENCE_LENGTH[modelId],EMBEDDING_DIM))
    embeddingLayer = modelInput
    #print("\n input layer>>>",embeddingLayer)
    
    convBlocks = []
    for eachFilter in filterSize:
        currFilter = int(np.rint(MAX_SEQUENCE_LENGTH[modelId]/(eachFilter)))
        if currFilter == 0:
            currFilter = 2
        singleConv = Conv1D(filters=currFilter,kernel_size=9,padding='valid',activation='relu',strides=1)(embeddingLayer)
        #print("\n convolution layer>>>",singleConv)
        
        singleConv = MaxPool1D(pool_size = 1)(singleConv)
        #print("\n MaxPool1D layer>>>",singleConv)
    
        singleConv = Flatten()(singleConv)
        #print("\n Flatten layer>>>",singleConv)
        
        convBlocks.append(singleConv)
            
    tranformLayer = Concatenate()(convBlocks) if len(convBlocks) > 1 else convBlocks[0] 
    
    #conv = Dropout(0.5)(conv)
    tranformLayer = Dense(10,activation='relu')(tranformLayer)
    #print("\n 1st Dense layer>>>",tranformLayer)
    
    modelOutput = Dense(2,activation='sigmoid')(tranformLayer)
    #print("\n 2nd Dense layer>>>",modelOutput)
    
    model = Model(inputs = modelInput, outputs=modelOutput)
    #print("\n model>>>",model)
    
    lrAdam = Adam(lr=0.01,decay=0.001)
    #lrAdam = Adam(lr=0.01)
    model.compile(optimizer=lrAdam, loss='binary_crossentropy',metrics=['accuracy'])
    
    #model.summary()
        
    return(model)
Ejemplo n.º 17
0
    def CNNGLU(self):
        layer = {}

        def GLU(x, dim, num):
            conv1 = Conv1D(filters=dim, kernel_size=1, padding='same')
            conv2 = Conv1D(filters=dim,
                           kernel_size=1,
                           padding='same',
                           activation='sigmoid')
            layer['glu_conv1_' + str(num)] = conv1
            layer['glu_conv2_' + str(num)] = conv2
            if self.load_weight:
                conv1_name = WEIGHT_FILE + self.lossname + 'glu_conv1_' + str(
                    num) + '_weight.npy'
                conv2_name = WEIGHT_FILE + self.lossname + 'glu_conv2_' + str(
                    num) + '_weight.npy'
                conv1_weight = np.load(conv1_name)
                conv1.set_weights(conv1_weight)
                conv2_weight = np.load(conv2_name)
                conv2.set_weights(conv2_weight)
            x1 = conv1(x)
            x2 = conv2(x)
            return Multiply()([x1, x2])

        x = Permute((2, 1))(self.embedding_layer)
        x = GLU(x, 400, 1)
        x = MaxPool1D(strides=2, padding='same')(x)
        x = GLU(x, 300, 2)
        x = GLU(x, 200, 3)
        x = GLU(x, 100, 4)
        x1 = GlobalMaxPooling1D()(x)
        x2 = GlobalAveragePooling1D()(x)

        self.cat_layers += [x1, x2]

        y = Concatenate()(self.cat_layers)
        output_layer = Dense(
            6,
            activation="sigmoid",
        )(y)
        self.result_model = Model(inputs=self.inputs, outputs=output_layer)
        self.set_loss(output_layer)
        return layer
Ejemplo n.º 18
0
def model1(input_shape=(None, None, 2),
           conv_blocks=[{
               'nlayers': 2,
               'nfilters': 8,
               'kernel_size': 3
           }, {
               'nlayers': 2,
               'nfilters': 16,
               'kernel_size': 3
           }, {
               'nlayers': 3,
               'nfilters': 32,
               'kernel_size': 3
           }],
           dense_layers=[64, 8],
           nlabels=1,
           verbose=True):
    inp = x = Input(batch_shape=input_shape, name='input')

    for block_number, conv_block in enumerate(conv_blocks):
        for layer_number in range(conv_block['nlayers']):
            name = "conv_block_{}_layer_{}".format(block_number, layer_number)
            x = Convolution1D(conv_block['nfilters'],
                              conv_block['kernel_size'],
                              name=name,
                              strides=1,
                              activation='relu',
                              padding='same')(x)
        x = MaxPool1D(2, name="max_pooling_{}".format(block_number))(x)

    x = Dropout(0.25, name="dropout_1")(x)
    x = Flatten(name="flatten")(x)

    for layer_number, n_neurons in enumerate(dense_layers):
        name = "fc_{}".format(layer_number)
        x = Dense(n_neurons, activation='relu', name=name)(x)

    x = Dense(nlabels, activation='linear', name='predictions')(x)
    model = Model(inputs=inp, outputs=x)

    if verbose: print model.summary()

    return model
Ejemplo n.º 19
0
def train_model_cnn_w2v(word_index, input_length, labels, x_train, y_train,
                        x_validate, y_validate):
    '''
    constructure and train model
    '''
    embedding_layer = load_w2v_as_embedding(
        word_index=word_index,
        input_length=input_length)  # 使用word2vec的向量模型来构造embedding_layer
    # embedding_layer = Embedding(input_dim=len(word_index) + 1, output_dim=EMBEDDING_DIM, input_length=input_length) # 不使用word2vec的向量模型来构造embedding_layer

    model = Sequential()
    model.add(embedding_layer)
    model.add(Dropout(rate=0.2))
    model.add(
        Conv1D(filters=250,
               kernel_size=3,
               strides=1,
               padding='valid',
               activation='relu'))
    model.add(MaxPool1D(pool_size=3))
    model.add(Flatten())
    model.add(Dense(units=EMBEDDING_DIM, activation='relu'))
    model.add(Dense(units=labels.shape[1], activation='softmax'))
    model.summary()
    plot_model(model, to_file='model.png', show_shapes=True)
    model.compile(optimizer='rmsprop',
                  loss='categorical_crossentropy',
                  metrics=['acc'])
    print(model.metrics_names)

    # 如果 validation_split 设置,会从训练数据分割后面0.2的数据做为验证数据集。
    # 启动 TensorBoard,在fit中的callbacks=[tb]
    # tb = TensorBoard(log_dir='/Users/yaochao/logs', histogram_freq=0, write_graph=True, write_images=True)
    # history = model.fit(x=x_train, y=y_train, validation_split=0.2, epochs=3, batch_size=128)
    history = model.fit(x=x_train,
                        y=y_train,
                        validation_data=(x_validate, y_validate),
                        epochs=2,
                        batch_size=100)
    plot_history(history, pre_filename='cnn_w2v')
    # model.save(TRAINED_MODEL)
    return model
Ejemplo n.º 20
0
    def train_vanilla_CNN(self, features, labels, trainable_embeddings):
        embedding_size = 100

        if self.feature_type == 'word-embeddings':
            x_train = pad_sequences(features, maxlen=self.max_len, padding='post')
            pretrained_embeddings = Embeddings(self.name, embedding_size).vectors()
            vocab_size = pretrained_embeddings.shape[0]
            embedding_layer = Embedding(input_dim=vocab_size, output_dim=embedding_size,
                                input_length=self.max_len, trainable=trainable_embeddings, weights=[pretrained_embeddings])
        elif self.feature_type in ['tf-idf', 'bow']:
            x_train = features
            vocab_size = x_train.shape[1]
            embedding_layer = Embedding(input_dim=vocab_size, output_dim=embedding_size,
                                input_length=vocab_size, trainable=trainable_embeddings)
        else:
            raise Exception('Please select a valid feature')

        model = Sequential()
        model.add(embedding_layer)
        model.add(Conv1D(filters=256, kernel_size=5, padding='same', activation='relu'))
        model.add(MaxPool1D(pool_size=5))
        model.add(Dropout(rate=0.3))
        model.add(Flatten())
        model.add(Dense(self.num_classes, activation='softmax'))
        model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['acc'])
        print(model.summary())
        self.model = model

        numeric_labels = SentenceLabelEncoder().encode_numerical(labels)
        class_weights = class_weight.compute_class_weight('balanced', np.unique(numeric_labels), numeric_labels)
        y_train = SentenceLabelEncoder().encode_categorical(labels)

        self.model.fit(x_train, y_train, validation_split=0.2, epochs=15, batch_size=128,
                        verbose=2, shuffle=True, class_weight=class_weights)
        print('##########################\n\n\t Cross Validation completed \n\n\t##########################')

        self.model.fit(x_train, y_train, epochs=15, batch_size=128,
                        verbose=2, shuffle=True, class_weight=class_weights)
        loss, accuracy = self.model.evaluate(x_train, y_train)
        print('loss, accuracy:', loss, accuracy)

        self.labels_pred = SentenceLabelEncoder().decode(self.model.predict_classes(x_train))
Ejemplo n.º 21
0
    def create_cnn_model(self,
                         contour_len,
                         num_classes,
                         kernel_size=5,
                         num_filters=20):
        """ Create 1D-CNN model for contour classification
        Args:
            contour_len (int): Input sequence length
            num_classes (int): Number of classes
        """
        self.model = Sequential()
        # self.model.add(Input(shape=))

        self.model.add(
            Conv1D(input_shape=(contour_len, 1),
                   filters=num_filters,
                   kernel_size=kernel_size,
                   padding="same",
                   name='conv1',
                   dilation_rate=self.dilation_rate))
        self.model.add(BatchNormalization())
        self.model.add(Activation(activation="relu"))
        self.model.add(Dropout(self.dropout_ratio))
        self.model.add(MaxPool1D(pool_size=2))
        # additional feature block(s)
        for i in range(1, self.num_feature_blocks):
            self.model.add(
                Conv1D(filters=num_filters,
                       kernel_size=kernel_size,
                       padding="same",
                       name="conv{}".format(i + 1),
                       dilation_rate=self.dilation_rate))
            self.model.add(BatchNormalization())
            self.model.add(Activation(activation="relu"))
            self.model.add(Dropout(self.dropout_ratio))
        self.model.add(Flatten())
        self.model.add(Dense(self.feature_dim, name='features'))
        self.model.add(Dense(num_classes, activation='softmax'))

        self.model.compile(loss='categorical_crossentropy',
                           optimizer=self.optimizer,
                           metrics=['accuracy'])
Ejemplo n.º 22
0
Archivo: model.py Proyecto: tang3/FXY
def textcnn(max_len,
            input_dim,
            output_dim=None,
            weight_matrix=None,
            input_type='wordindex',
            class_num=1):
    kernel_size = [2, 3, 4, 5]
    if input_type == 'wordindex':
        my_input = Input(shape=(max_len, ))
        emb = Embedding(input_dim, output_dim, input_length=max_len)(my_input)
        emb = SpatialDropout1D(0.2)(emb)
    elif input_type == 'word2vec':
        my_input = Input(shape=(max_len, input_dim))
        emb = SpatialDropout1D(0.2)(my_input)
    elif input_type == 'word2vec_tunning':
        my_input = Input(shape=(max_len, ))
        emb = Embedding(input_dim,
                        output_dim,
                        input_length=max_len,
                        weights=[weight_matrix],
                        trainable=True)(my_input)
        emb = SpatialDropout1D(0.2)(emb)
    else:
        raise ValueError(
            'input_type consists of wordindex,word2vec,word2vec_tunning')

    net = []
    for kernel in kernel_size:
        con = Conv1D(32, kernel, activation='relu', padding="same")(emb)
        con = MaxPool1D(2)(con)
        net.append(con)
    net = concatenate(net, axis=-1)
    net = Flatten()(net)
    net = Dropout(0.5)(net)
    net = Dense(64, activation='relu')(net)
    net = Dropout(0.5)(net)
    net = Dense(class_num, activation='sigmoid')(net)
    model = Model(inputs=my_input, outputs=net)
    model.compile(loss='binary_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])
    return model
Ejemplo n.º 23
0
def create_bubbleNet():

    nb_classes = 2
    nb_features = 20
    model = Sequential()
    model.add(
        Conv1D(filters=64,
               kernel_size=4,
               activation='relu',
               use_bias=True,
               input_shape=(nb_features, 1)))
    model.add(MaxPool1D(pool_size=2))
    model.add(Conv1D(filters=32, kernel_size=2, activation='relu'))
    model.add(Flatten())
    model.add(Dense(32))
    model.add(Activation('relu'))
    model.add(Dense(nb_classes))
    model.add(Activation('softmax'))

    return model
Ejemplo n.º 24
0
    def conv_block(self, model):
        """
        The block of dpcnn , which contains one MaxPooling1D Layer and two Conv1D Layer
        We use shortcut and pre-activation in this block
        """

        model1 = MaxPool1D(pool_size=self.pooling_size, strides=2)(model)
        model2 = Activation(self.conv1D_activation)(model1)
        model3 = Conv1D(self.num_filters,
                        kernel_size=self.kernel_size,
                        strides=1,
                        padding="same")(model2)
        model4 = Activation(self.conv1D_activation)(model3)
        model5 = Conv1D(self.num_filters,
                        kernel_size=self.kernel_size,
                        strides=1,
                        padding="same")(model4)
        model6 = add([model1, model5])

        return model6
Ejemplo n.º 25
0
def build_fn():
    model = Sequential()
    #model.add(LSTM(64, input_shape=X[0].shape))
    for i in range(3):
        model.add(
            Conv1D(32, kernel_size=3, padding='same', input_shape=X[0].shape))
        model.add(LeakyReLU())
        model.add(Conv1D(32, kernel_size=3, padding='same'))
        model.add(LeakyReLU())
        model.add(MaxPool1D())
    model.add(Flatten())
    model.add(Dense(1))

    model.compile(
        optimizer=Adam(),
        loss="mse",
        #metrics=['accuracy']
    )

    return model
def label():
    visible = Input(shape=(50, 300))
    hidden1 = Dropout(0.2)(visible)
    hidden2 = LSTM(100, return_sequences=True)(hidden1)

    # CNN layers for classification
    c_hidden1 = Conv1D(filters=50, kernel_size=5)(hidden2)
    c_hidden2 = MaxPool1D(pool_size=5)(c_hidden1)

    c_extract = Reshape((50, 9))(c_hidden2)

    # Time distributed layers for labelling
    t_hidden1 = Dense(64, activation='relu')(hidden2)
    t_hidden2 = Dropout(0.2)(t_hidden1)
    t_hidden3 = concatenate([c_extract, t_hidden2])
    t_output = TimeDistributed(Dense(7, activation='softmax'))(t_hidden3)

    pred_model = Model(inputs=visible, outputs=t_output)

    return pred_model
Ejemplo n.º 27
0
def cnn_w2v(word2idx):
    embeddings = np.zeros((len(word2idx) + 1, 100))
    # Approach with word2vec
    cnn_model = Sequential()

    cnn_model.add(
        Embedding(embeddings.shape[0],
                  embeddings.shape[1],
                  weights=[embeddings],
                  trainable=False,
                  input_length=52))
    # Prevents overfitting
    cnn_model.add(Dropout(0.5))
    cnn_model.add(Conv1D(64, 5, activation='relu'))
    # Get the most relevant features
    cnn_model.add(MaxPool1D(2, strides=2))
    # Transforms the input data to calculate the density
    cnn_model.add(Flatten())
    cnn_model.add(Dense(5, activation='softmax'))
    return cnn_model
Ejemplo n.º 28
0
def get_model1():
    nclass = 4
    inp = Input(shape=(14, 1))
    img_1 = Convolution1D(8,
                          kernel_size=5,
                          activation=activations.relu,
                          padding='same')(inp)
    #    img_1 = Convolution1D(8, kernel_size=5, activation=activations.relu, padding="same")(img_1)
    img_1 = MaxPool1D(pool_size=2)(img_1)
    img_1 = Dropout(rate=0.1)(img_1)
    #    img_1 = Convolution1D(16, kernel_size=3, activation=activations.relu, padding="valid")(img_1)
    #    img_1 = Convolution1D(16, kernel_size=3, activation=activations.relu, padding="valid")(img_1)
    #    img_1 = MaxPool1D(pool_size=2)(img_1)
    #    img_1 = Dropout(rate=0.1)(img_1)
    #    img_1 = Convolution1D(32, kernel_size=3, activation=activations.relu, padding="valid")(img_1)
    #    img_1 = Convolution1D(32, kernel_size=3, activation=activations.relu, padding="valid")(img_1)
    #    img_1 = MaxPool1D(pool_size=2)(img_1)
    #    img_1 = Dropout(rate=0.1)(img_1)
    img_1 = Convolution1D(64,
                          kernel_size=3,
                          activation=activations.relu,
                          padding="same")(img_1)
    #    img_1 = Convolution1D(64, kernel_size=3, activation=activations.relu, padding="same")(img_1)
    img_1 = GlobalMaxPool1D()(img_1)
    img_1 = Dropout(rate=0.2)(img_1)

    dense_1 = Dense(64, activation=activations.relu, name="dense_1")(img_1)
    dense_1 = Dense(64, activation=activations.relu, name="dense_2")(dense_1)
    dense_1 = Dense(nclass,
                    activation=activations.softmax,
                    name="dense_3_mitbih")(dense_1)

    model = models.Model(inputs=inp, outputs=dense_1)
    opt = optimizers.Adam(0.001)

    model.compile(optimizer=opt,
                  loss=losses.sparse_categorical_crossentropy,
                  metrics=['acc'])
    model.summary()
    return model
Ejemplo n.º 29
0
def get_cnn_pair_rnn(vocab_size,
                     max_sequence_len,
                     embedding_dim,
                     num_classes,
                     embedding_matrix=None):
    """并联 cnn rnn
    # 模型结构:词嵌入-卷积池化-全连接 ---拼接-全连接
    #                -双向GRU-全连接
    :param vocab_size:
    :param max_sequence_len:
    :param embedding_dim:
    :param num_classes:
    :param embedding_matrix:
    :return:
    """
    weights = None
    # train_able=True
    if embedding_matrix:
        weights = np.asarray([embedding_matrix])
    model = Sequential()
    model.add(
        Embedding(vocab_size,
                  embedding_dim,
                  input_length=max_sequence_len,
                  weights=weights,
                  trainable=True))
    sentence_input = Input(shape=(max_sequence_len, ), dtype='float64')
    embed = Embedding(vocab_size, embedding_dim,
                      input_length=max_sequence_len)(sentence_input)
    cnn = Convolution1D(256, 3, padding='same', strides=1,
                        activation='relu')(embed)
    cnn = MaxPool1D(pool_size=4)(cnn)
    cnn = Flatten()(cnn)
    cnn = Dense(256)(cnn)
    rnn = Bidirectional(LSTM(256, dropout=0.2, recurrent_dropout=0.1))(embed)
    rnn = Dense(256)(rnn)
    con = concatenate([cnn, rnn], axis=-1)
    output = Dense(num_classes, activation='sigmoid')(con)
    model = Model(inputs=sentence_input, outputs=output)
    return model
Ejemplo n.º 30
0
def compression_layer(compression, **kwargs):
    max_pool = ['max', 'max_pool', 'max-pool']
    mean_pool = [
        'mean', 'mean_pool', 'mean-pool', 'avg', 'avg_pool', 'avg-pool'
    ]
    convolution = ['conv', 'convolution', 'conv1d']
    dilated_convolution = [
        'dilated', 'dilated-conv', 'dilated-convolution',
        'dilated-convolutions'
    ]
    most_used = ['most', 'most_used', 'most-used']
    all_compressions = [
        'max_pool', 'mean_pool', 'convolution', 'dilated_convolution',
        'most-used'
    ]
    if isinstance(compression, str):
        compression = compression.lower()

    if compression in max_pool:
        layer = MaxPool1D(**kwargs)
    elif compression in mean_pool:
        layer = AvgPool1D(**kwargs)
    elif compression in convolution:
        assert 'filters' in kwargs or 'units' in kwargs, \
            'convolution-compression requries key-word argument `filters`'
        assert 'kernel_size' in kwargs, \
            'convolution-compression requries key-word argument `kernel_size`'
        filters = kwargs.get('filters') or kwargs.get('units')

        layer = Conv1D(filters=filters, kernel_size=3, **kwargs)
    elif compression in dilated_convolution:
        raise NotImplementedError(
            '`dilated-convolution compression` is not implemented.')
    elif compression in most_used:
        raise NotImplementedError(
            '`most-used compression` is not implemented.')
    else:
        raise ValueError(f'unexpected compression: {compression}. '
                         f'Select from [{all_compressions}]')
    return layer