Esempio n. 1
0
def gru_keras(max_features,
              maxlen,
              bidirectional,
              dropout_rate,
              embed_dim,
              rec_units,
              mtype='GRU',
              reduction=None,
              classes=4,
              lr=0.001):

    if K.backend == 'tensorflow':
        K.clear_session()

    input_layer = Input(shape=(maxlen, ))
    embedding_layer = Embedding(max_features,
                                output_dim=embed_dim,
                                trainable=True)(input_layer)
    x = SpatialDropout1D(dropout_rate)(embedding_layer)

    if reduction:
        if mtype == 'GRU':
            if bidirectional:
                x = Bidirectional(
                    CuDNNGRU(units=rec_units, return_sequences=True))(x)
            else:
                x = CuDNNGRU(units=rec_units, return_sequences=True)(x)
        elif mtype == 'LSTM':
            if bidirectional:
                x = Bidirectional(
                    CuDNNLSTM(units=rec_units, return_sequences=True))(x)
            else:
                x = CuDNNLSTM(units=rec_units, return_sequences=True)(x)

        if reduction == 'average':
            x = GlobalAveragePooling1D()(x)
        elif reduction == 'maximum':
            x = GlobalMaxPool1D()(x)
    else:
        if mtype == 'GRU':
            if bidirectional:
                x = Bidirectional(
                    CuDNNGRU(units=rec_units, return_sequences=False))(x)
            else:
                x = CuDNNGRU(units=rec_units, return_sequences=False)(x)
        elif mtype == 'LSTM':
            if bidirectional:
                x = Bidirectional(
                    CuDNNLSTM(units=rec_units, return_sequences=False))(x)
            else:
                x = CuDNNLSTM(units=rec_units, return_sequences=False)(x)

    output_layer = Dense(classes, activation="sigmoid")(x)
    model = Model(inputs=input_layer, outputs=output_layer)
    model.compile(loss='categorical_crossentropy',
                  optimizer=RMSprop(learning_rate=lr, clipvalue=1, clipnorm=1),
                  metrics=['acc'])
    return model
Esempio n. 2
0
def NN_huaweiv1(maxlen, embedding_matrix=None, class_num1=17, class_num2=12):
    emb_layer = Embedding(
        embedding_matrix.shape[0],
        embedding_matrix.shape[1],
        input_length=maxlen,
        weights=[embedding_matrix],
        trainable=False,
    )
    seq1 = Input(shape=(maxlen, ))

    x1 = emb_layer(seq1)
    sdrop = SpatialDropout1D(rate=0.2)
    lstm_layer = Bidirectional(CuDNNGRU(128, return_sequences=True))
    gru_layer = Bidirectional(CuDNNGRU(128, return_sequences=True))
    cnn1d_layer = Conv1D(64,
                         kernel_size=3,
                         padding="same",
                         kernel_initializer="he_uniform")
    x1 = sdrop(x1)
    lstm1 = lstm_layer(x1)
    gru1 = gru_layer(lstm1)
    att_1 = Attention(maxlen)(lstm1)
    att_2 = Attention(maxlen)(gru1)
    cnn1 = cnn1d_layer(lstm1)

    avg_pool = GlobalAveragePooling1D()
    max_pool = GlobalMaxPooling1D()

    x1 = concatenate([
        att_1, att_2,
        Attention(maxlen)(cnn1),
        avg_pool(cnn1),
        max_pool(cnn1)
    ])

    x = Dropout(0.2)(Activation(activation="relu")(BatchNormalization()(
        Dense(128)(x1))))
    x = Activation(activation="relu")(BatchNormalization()(Dense(64)(x)))
    pred1_d = Dense(class_num1)(x)
    pred1 = Activation(activation='sigmoid', name='pred1')(pred1_d)

    y = concatenate([x1, x])
    y = Activation(activation="relu")(BatchNormalization()(Dense(64)(x)))
    pred2_d = Dense(class_num2)(y)
    pred2 = Activation(activation='sigmoid', name='pred2')(pred2_d)

    z = Dropout(0.2)(Activation(activation="relu")(BatchNormalization()(
        Dense(128)(x1))))
    z = concatenate([pred1_d, pred2_d, z])
    pred3 = Dense(class_num1 + class_num2, activation='sigmoid',
                  name='pred3')(z)
    model = Model(inputs=seq1, outputs=[pred1, pred2, pred3])
    return model
def Build_RNN_layer(Parametre_layer):

    if Parametre_layer["type_cell"] == "LSTM":
        cell = L.LSTM(units=Parametre_layer["units"],
                      dropout=Parametre_layer["dropout"],
                      recurrent_dropout=Parametre_layer["recurrent_dropout"],
                      return_sequences=True,
                      return_state=True,
                      stateful=Parametre_layer["stateful"],
                      unroll=Parametre_layer["unroll"])
    elif Parametre_layer["type_cell"] == "CuDNNGRU":
        cell = CuDNNGRU(units=Parametre_layer["units"],
                        dropout=Parametre_layer["dropout"],
                        recurrent_dropout=Parametre_layer["recurrent_dropout"],
                        return_sequences=True,
                        return_state=True,
                        stateful=Parametre_layer["stateful"],
                        unroll=Parametre_layer["unroll"])
    elif Parametre_layer["type_cell"] == "GRU":
        cell = L.GRU(units=Parametre_layer["units"],
                     return_sequences=True,
                     return_state=True,
                     stateful=Parametre_layer["stateful"])
    else:  #by default CuDNNLSTM
        cell = CuDNNLSTM(units=Parametre_layer["units"],
                         return_sequences=True,
                         return_state=True,
                         stateful=Parametre_layer["stateful"])

    return cell
Esempio n. 4
0
def NN_huaweiv1(maxlen, embedding_matrix=None, class_num1=17, class_num2=12):
    emb_layer = Embedding(
        embedding_matrix.shape[0],
        embedding_matrix.shape[1],
        input_length=maxlen,
        weights=[embedding_matrix],
        trainable=False,
    )
    seq1 = Input(shape=(maxlen, ))

    emb = emb_layer(seq1)
    sdrop = SpatialDropout1D(rate=0.2)
    lstm_layer = Bidirectional(CuDNNGRU(128, return_sequences=True))
    gru_layer = Bidirectional(CuDNNGRU(128, return_sequences=True))
    cnn1d_layer = Conv1D(64,
                         kernel_size=3,
                         padding="same",
                         kernel_initializer="he_uniform")
    sd = sdrop(emb)
    lstm1 = lstm_layer(sd)
    gru1 = gru_layer(lstm1)
    cnn1 = cnn1d_layer(gru1)
    gru1 = concatenate([lstm1, gru1, cnn1])
    att_1 = Attention(maxlen)(gru1)
    att_2 = Attention(maxlen)(gru1)
    att_3 = Attention(maxlen)(gru1)
    att_4 = Attention(maxlen)(gru1)

    x1 = Activation(activation="relu")(BatchNormalization()(Dense(128)(att_1)))
    x2 = Activation(activation="relu")(BatchNormalization()(Dense(128)(att_2)))
    x3 = Activation(activation="relu")(BatchNormalization()(Dense(128)(att_3)))
    x4 = Activation(activation="relu")(BatchNormalization()(Dense(128)(att_4)))

    pred1_1 = Dense(class_num1 - 10, activation='sigmoid')(x1)
    pred1_2 = Dense(10, activation='sigmoid')(x2)
    pred1 = concatenate([pred1_1, pred1_2], axis=-1, name='pred1')

    pred2_1 = Dense(class_num2 - 9, activation='sigmoid')(x3)
    pred2_2 = Dense(9, activation='sigmoid')(x4)

    pred2 = concatenate(
        [pred2_1, pred2_2], axis=-1, name='pred2'
    )  # Dense(class_num2, activation='sigmoid',name='pred2')(y)

    model = Model(inputs=seq1, outputs=[pred1, pred2])
    return model
Esempio n. 5
0
def create_model():
    embedding_layer = Embedding(num_words,
                                EMBEDDING_DIM,
                                input_length=MAX_POST_LENGTH,
                                weights=[embedding_matrix],
                                trainable=False)

    sequence_input = Input(shape=(MAX_POST_LENGTH, ))
    embedded_sequences = embedding_layer(sequence_input)
    l_lstm_sent = Bidirectional(CuDNNGRU(
        50, return_sequences=True))(embedded_sequences)
    l_lstm_sent = Dropout(0.2)(l_lstm_sent)
    l_lstm_sent = AttentionWithContext()(l_lstm_sent)
    l_lstm_sent = Dropout(0.2)(l_lstm_sent)
    preds = Dense(units=2, activation='softmax')(l_lstm_sent)
    sentEncoder = Model(sequence_input, preds)
    print(sentEncoder.summary())

    ana_input = Input(shape=(MAX_POSTS, len(i_data[0][0])))

    review_input = Input(shape=(MAX_POSTS, MAX_POST_LENGTH))
    l_lstm_sent = TimeDistributed(sentEncoder)(review_input)
    l_lstm_sent = concatenate([l_lstm_sent, ana_input
                               ])  # combine time series and categories
    l_lstm_sent = BatchNormalization()(l_lstm_sent)
    l_lstm_sent = Dropout(0.2)(l_lstm_sent)
    l_lstm_sent = Bidirectional(CuDNNGRU(16,
                                         return_sequences=True))(l_lstm_sent)
    l_lstm_sent = Dropout(0.2)(l_lstm_sent)
    l_lstm_sent = AttentionWithContext()(l_lstm_sent)
    l_lstm_sent = Dropout(0.2)(l_lstm_sent)
    preds = Dense(2, activation='softmax')(l_lstm_sent)
    model = Model([review_input, ana_input], preds)
    print(model.summary())

    from keras.optimizers import Adam, AdaMod

    adam = AdaMod()
    model.compile(loss='binary_crossentropy', optimizer=adam, metrics=['acc'])

    return model
def get_model(embedding_matrix, max_len, max_features, embed_size):
    inp = Input(shape=(max_len, ))
    x = Embedding(max_features, embed_size, weights=[embedding_matrix])(inp)
    x = CuDNNGRU(64, return_sequences=True)(x)
    x = Bidirectional(CuDNNLSTM(64, return_sequences=True))(x)
    avg_pool = GlobalAveragePooling1D()(x)
    max_pool = GlobalMaxPooling1D()(x)
    conc = concatenate([avg_pool, max_pool])
    conc = Dense(64, activation="relu")(conc)
    conc = Dropout(0.1)(conc)
    outp = Dense(1, activation="sigmoid")(conc)

    model = Model(inputs=inp, outputs=outp)
    model.compile(loss='binary_crossentropy',
                  optimizer=tf.optimizers.Adam(learning_rate=0.005),
                  metrics=['AUC'])

    return model
Esempio n. 7
0
def new_lpcnet_model(rnn_units1=384,
                     rnn_units2=16,
                     nb_used_features=38,
                     training=False,
                     adaptation=False,
                     quantize=False):
    pcm = Input(shape=(None, 3))
    feat = Input(shape=(None, nb_used_features))
    pitch = Input(shape=(None, 1))
    dec_feat = Input(shape=(None, 128))
    dec_state1 = Input(shape=(rnn_units1, ))
    dec_state2 = Input(shape=(rnn_units2, ))

    padding = 'valid' if training else 'same'
    fconv1 = Conv1D(128,
                    3,
                    padding=padding,
                    activation='tanh',
                    name='feature_conv1')
    fconv2 = Conv1D(128,
                    3,
                    padding=padding,
                    activation='tanh',
                    name='feature_conv2')

    embed = Embedding(256,
                      embed_size,
                      embeddings_initializer=PCMInit(),
                      name='embed_sig')
    cpcm = Reshape((-1, embed_size * 3))(embed(pcm))

    pembed = Embedding(256, 64, name='embed_pitch')
    cat_feat = Concatenate()([feat, Reshape((-1, 64))(pembed(pitch))])

    cfeat = fconv2(fconv1(cat_feat))

    fdense1 = Dense(128, activation='tanh', name='feature_dense1')
    fdense2 = Dense(128, activation='tanh', name='feature_dense2')

    cfeat = fdense2(fdense1(cfeat))

    rep = Lambda(lambda x: K.repeat_elements(x, frame_size, 1))

    quant = quant_regularizer if quantize else None

    if training:
        rnn = CuDNNGRU(rnn_units1,
                       return_sequences=True,
                       return_state=True,
                       name='gru_a',
                       recurrent_constraint=constraint,
                       recurrent_regularizer=quant)
        rnn2 = CuDNNGRU(rnn_units2,
                        return_sequences=True,
                        return_state=True,
                        name='gru_b',
                        kernel_constraint=constraint,
                        kernel_regularizer=quant)
    else:
        rnn = GRU(rnn_units1,
                  return_sequences=True,
                  return_state=True,
                  recurrent_activation="sigmoid",
                  reset_after='true',
                  name='gru_a',
                  recurrent_constraint=constraint,
                  recurrent_regularizer=quant)
        rnn2 = GRU(rnn_units2,
                   return_sequences=True,
                   return_state=True,
                   recurrent_activation="sigmoid",
                   reset_after='true',
                   name='gru_b',
                   kernel_constraint=constraint,
                   kernel_regularizer=quant)

    rnn_in = Concatenate()([cpcm, rep(cfeat)])
    md = MDense(pcm_levels, activation='sigmoid', name='dual_fc')
    gru_out1, _ = rnn(rnn_in)
    gru_out2, _ = rnn2(Concatenate()([gru_out1, rep(cfeat)]))
    ulaw_prob = Lambda(tree_to_pdf_train)(md(gru_out2))

    if adaptation:
        rnn.trainable = False
        rnn2.trainable = False
        md.trainable = False
        embed.Trainable = False

    model = Model([pcm, feat, pitch], ulaw_prob)
    model.rnn_units1 = rnn_units1
    model.rnn_units2 = rnn_units2
    model.nb_used_features = nb_used_features
    model.frame_size = frame_size

    encoder = Model([feat, pitch], cfeat)

    dec_rnn_in = Concatenate()([cpcm, dec_feat])
    dec_gru_out1, state1 = rnn(dec_rnn_in, initial_state=dec_state1)
    dec_gru_out2, state2 = rnn2(Concatenate()([dec_gru_out1, dec_feat]),
                                initial_state=dec_state2)
    dec_ulaw_prob = Lambda(tree_to_pdf_infer)(md(dec_gru_out2))

    decoder = Model([pcm, dec_feat, dec_state1, dec_state2],
                    [dec_ulaw_prob, state1, state2])
    return model, encoder, decoder
Esempio n. 8
0
x = Conv1D(16, 11, padding='valid', activation='relu', strides=1)(x)
x = MaxPooling1D(3)(x)
x = Dropout(0.3)(x)

# Third Conv1D layer
x = Conv1D(32, 9, padding='valid', activation='relu', strides=1)(x)
x = MaxPooling1D(3)(x)
x = Dropout(0.3)(x)

x = BatchNormalization(axis=-1,
                       momentum=0.99,
                       epsilon=1e-3,
                       center=True,
                       scale=True)(x)

x = Bidirectional(CuDNNGRU(128, return_sequences=True), merge_mode='sum')(x)
x = Bidirectional(CuDNNGRU(128, return_sequences=True), merge_mode='sum')(x)
x = Bidirectional(CuDNNGRU(128, return_sequences=False), merge_mode='sum')(x)

x = BatchNormalization(axis=-1,
                       momentum=0.99,
                       epsilon=1e-3,
                       center=True,
                       scale=True)(x)

# Flatten layer
# x = Flatten()(x)

# Dense Layer 1
x = Dense(256, activation='relu')(x)
outputs = Dense(len(labels), activation="softmax")(x)
Esempio n. 9
0
xte,yte = test
xtr = sequence.pad_sequences(xtr, padding='pre', maxlen=max_review_length)
xte = sequence.pad_sequences(xte, padding='pre', maxlen=max_review_length)

embedding_vecor_length =64
bs=32
ep=2
num_classes=2 

ytr1 = keras.utils.to_categorical(ytr, num_classes)
yte1 = keras.utils.to_categorical(yte, num_classes)

rl=LeakyReLU(alpha=.01)

model = Sequential()
model.add(Embedding(NUM_WORDS, embedding_vecor_length, input_length=max_review_length))
# model.add(Dropout(0.5))
model.add(Conv1D(filters=64, kernel_size=3, padding='same', activation=rl))
model.add(MaxPooling1D(pool_size=2))
model.add(Dropout(0.5))
model.add(CuDNNGRU(50, return_sequences=True))
model.add(CuDNNGRU(50))
model.add(Dropout(0.3))
model.add(Dense(num_classes, activation='softmax'))

model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])


# print(model.summary())
model.fit(xtr, ytr1, epochs=ep, batch_size=bs, validation_data=(xte, yte1))