Пример #1
0
def get_model():
    inp = Input(shape=(maxlen, ))  #maxlen
    x = Embedding(max_words + 1,
                  embed_size * 2,
                  weights=[embeding_matrix],
                  trainable=False)(inp)
    x = SpatialDropout1D(0.2)(x)
    x = Bidirectional(CuDNNGRU(units, return_sequences=True))(x)
    # x shape is (batch_size, seqsize, units*2)
    pools = []
    for filter_size in filter_sizes:
        conv = Conv1D(num_filters, kernel_size=filter_size)(x)
        conv = bn()(conv)
        conv = Activation("relu")(conv)
        # conv shape (batch_size, seqsize-filter_size+1, num_filters)
        max_pool = GlobalMaxPooling1D()(conv)
        ave_pool = GlobalAveragePooling1D()(conv)
        pools.extend([max_pool, ave_pool])

    pool = concatenate(pools)
    # pool shape is (batch_size, num_filters*6)
    pool = Dropout(0.3)(pool)

    z = Dense(400)(pool)
    z = bn()(z)
    z = Activation("relu")(z)
    z = Dropout(0.3)(z)
    oup = Dense(6, activation='sigmoid', W_regularizer=None)(z)

    model = Model(input=inp, output=oup)
    model.compile(loss='binary_crossentropy',
                  optimizer=Adam(lr=1e-3, decay=0.0),
                  metrics=['accuracy'])
    return model
Пример #2
0
def get_model():
    inp = Input(shape=(maxlen, ))
    x = Embedding(embeding_matrix.shape[0],
                  embed_size * 2,
                  weights=[embeding_matrix],
                  trainable=False)(inp)
    #x_1 = Embedding(max_words+1, embed_size, weights=[embeding_matrix_1], trainable=False)(inp)
    # (batch_size, seqsize, embed_size)
    x = SpatialDropout1D(0.1)(x)
    #x_1 = SpatialDropout1D(0.1)(x_1)
    x = Reshape((maxlen, embed_size * 2, 1))(x)
    #x_1 = Reshape((maxlen, embed_size, 1))(x_1)
    #x = concatenate([x,x_1],axis=3)

    ys = []
    # (batch_size, len, embed_size, channel)
    for filter_size in filter_sizes:
        conv = Conv2D(num_filters,
                      kernel_size=(filter_size, embed_size * 2),
                      kernel_initializer="normal")(x)
        # conv output-> (batch_size, len-filter_size+1, 1, num_filters)
        bnlayer = bn()(conv)
        relu = Activation("relu")(bnlayer)
        conv = Conv2D(num_filters,
                      kernel_size=(filter_size, 1),
                      kernel_initializer="normal")(relu)
        # conv output -> (batch_size, len-2*filter_size+1, 1, num_filters)
        bnlayer = bn()(conv)
        relu = Activation("relu")(bnlayer)

        # comment for not improve local_cv
        #         conv = Conv2D(num_filters, kernel_size= (filter_size, 1), kernel_initializer="normal")(relu)
        #         # conv output -> (batch_size, len-2*filter_size+1, 1, num_filters)
        #         bnlayer = bn()(conv)
        #         relu = Activation("relu")(bnlayer)

        maxpool = GlobalMaxPooling2D()(relu)
        # maxpool shape -> (batch_size, num_filters)
        ys.append(maxpool)

    z = Concatenate(axis=1)(ys)
    # z shape -> (batch_size, num_filters*4)
    z = Dropout(0.2)(z)
    z = Dense(300)(z)
    z = bn()(z)
    z = Activation("relu")(z)
    z = Dropout(0.2)(z)

    z = Dense(200)(z)
    z = bn()(z)
    z = Activation("relu")(z)
    z = Dropout(0.2)(z)
    outp = Dense(6, activation="sigmoid")(z)

    model = Model(inputs=inp, outputs=outp)
    model.compile(loss='binary_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])

    return model
Пример #3
0
def get_model():
    inp = Input(shape=(maxlen, ))  #maxlen
    x = Embedding(max_words + 1,
                  embed_size * 2,
                  weights=[embeding_matrix],
                  trainable=False)(inp)
    x = SpatialDropout1D(0.2)(x)
    x = Bidirectional(CuDNNLSTM(units, return_sequences=True))(x)
    x_1 = Bidirectional(CuDNNGRU(units, return_sequences=True))(x)
    # x shape is (batch_size, seqsize, units)

    max_pool = GlobalMaxPooling1D()(x)
    ave_pool = GlobalAveragePooling1D()(x)
    max_pool_1 = GlobalMaxPooling1D()(x_1)
    ave_pool_1 = GlobalAveragePooling1D()(x_1)
    pool = concatenate([max_pool, ave_pool, max_pool_1, ave_pool_1])
    # pool shape is (batch_size, units*4)
    pool = Dropout(0.3)(pool)

    z = Dense(800)(pool)
    z = bn()(z)
    z = Activation("relu")(z)
    z = Dropout(0.3)(z)

    z = Dense(500)(z)
    z = bn()(z)
    z = Activation("relu")(z)
    z = Dropout(0.3)(z)
    oup = Dense(6, activation='sigmoid', W_regularizer=None)(z)

    model = Model(input=inp, output=oup)
    model.compile(loss='binary_crossentropy',
                  optimizer=Adam(lr=1e-3, decay=0.0),
                  metrics=['accuracy'])
    return model
Пример #4
0
def get_model():
    inp = Input(shape=(maxlen, ))  #maxlen
    x = Embedding(max_words + 1,
                  embed_size * 2,
                  weights=[embeding_matrix],
                  trainable=False)(inp)
    x = SpatialDropout1D(0.2)(x)
    x = Bidirectional(CuDNNLSTM(units, return_sequences=True))(x)
    x_1 = Bidirectional(CuDNNGRU(units, return_sequences=True))(x)
    # x shape is (batch_size, seqsize, units*2)

    ###### simple pooling layer
    #     max_pool = GlobalMaxPooling1D()(x)
    #     ave_pool = GlobalAveragePooling1D()(x)
    #     max_pool_1 = GlobalMaxPooling1D()(x_1)
    #     ave_pool_1 = GlobalAveragePooling1D()(x_1)
    #     pool = concatenate([max_pool, ave_pool, max_pool_1, ave_pool_1])
    #     # pool shape is (batch_size, units*4)
    #     pool = Dropout(0.3)(pool)

    ###### attention implmention att_w = softmax(tanh(h*w+b))
    #     t = TimeDistributed(Dense(units, activation="tanh"))(x_1)
    #     t = Lambda(lambda t: K.sum(t, axis=2), output_shape=(maxlen,))(t)# t shape is (batch_size, seqsize)
    #     s = Activation("softmax")(t) # s shape is (batch_size, seqsize)
    #     x_1 = Reshape((units*2, maxlen))(x_1)
    #     aw = Multiply()([s,x_1]) # s shape is (batch_size, units*2, seqsize)
    #     p =  Lambda(lambda s: K.sum(s, axis=2),output_shape=(units*2,))(aw)# s shape is (batch_size, units*2)

    ###### k-max pooling
    x = KMaxPooling(3)(x)
    x_1 = KMaxPooling(3)(x_1)  # x & x_1 shape is (batch_size, units*2*3)
    p = concatenate([x, x_1])
    p = Dropout(0.3)(p)

    z = Dense(1000)(p)
    z = bn()(z)
    z = Activation("relu")(z)
    z = Dropout(0.3)(z)

    z = Dense(400)(p)
    z = bn()(z)
    z = Activation("relu")(z)
    z = Dropout(0.3)(z)

    oup = Dense(6, activation='sigmoid', W_regularizer=None)(z)

    model = Model(input=inp, output=oup)
    model.compile(loss='binary_crossentropy',
                  optimizer=Adam(lr=1e-3, decay=0.0),
                  metrics=['accuracy'])
    return model
Пример #5
0
 def conv1_block(self, inp, dim):
     from keras.layers import BatchNormalization as bn, Activation, Conv2D, Dropout
     x = bn()(inp)
     x = Activation(self.acti)(x)
     x = Conv2D(dim, (1, 1), padding='same',
                kernel_initializer=self.init)(x)
     return x
Пример #6
0
 def tconv_block(self, inp, dim):
     from keras.layers import BatchNormalization as bn, Activation, Conv2DTranspose, Dropout
     x = bn()(inp)
     x = Activation(self.acti)(x)
     x = Conv2DTranspose(dim,
                         2,
                         strides=2,
                         padding='same',
                         kernel_initializer=self.init)(x)
     return x
Пример #7
0
 def RCL_block(self, inp, dim):
     from keras.layers import BatchNormalization as bn, Activation, Conv2D, Dropout, Add
     RCL = Conv2D(dim, (3, 3), padding='same', kernel_initializer=self.init)
     conv = bn()(inp)
     conv = Activation(self.acti)(conv)
     conv = Conv2D(dim, (3, 3),
                   padding='same',
                   kernel_initializer=self.init)(conv)
     conv2 = bn()(conv)
     conv2 = Activation(self.acti)(conv2)
     conv2 = RCL(conv2)
     conv2 = Add()([conv, conv2])
     for i in range(0, self.ite - 2):
         conv2 = bn()(conv2)
         conv2 = Activation(self.acti)(conv2)
         conv2 = Conv2D(dim, (3, 3),
                        padding='same',
                        weights=RCL.get_weights())(conv2)
         conv2 = Add()([conv, conv2])
     return conv2