Ejemplo n.º 1
0
 def f(input):
     rnn2 = Bidirectional(IndRNN(rnn_dim, return_sequences=True))(input)
     drop2 = Dropout(0.75)(rnn2)
     dense = TimeDistributed(Dense(dense_dim))(drop2)
     drop3 = Dropout(0.5)(dense)
     att = AttentionWithContext()(drop3)
     return att
Ejemplo n.º 2
0
def lstm_atten(sent_sequences, out_dim):
    #sent_sequences = Bidirectional(GRU(out_dim, return_sequences=True))(sent_sequences)
    #sent_sequences = IndRNN(out_dim, return_sequences=True)(sent_sequences)
    #sent_sequences = IndRNN(out_dim, return_sequences=True)(sent_sequences)
    sent_sequences = Bidirectional(IndRNN(out_dim, return_sequences=True))(sent_sequences)
    #sent_sequences = TimeDistributed(Dense(2*out_dim))(sent_sequences)
    # =============lstm 全局attention机制====================
    doc_presentation = atten(type="global", inputs=sent_sequences)
    return doc_presentation
Ejemplo n.º 3
0
print('Build model...')
#weights=[embedding_W]
model = Sequential()
model.add(Embedding(len(word_index)+1, 300, input_shape=(maxlen,),trainable=False,weights=[embedding_matrix]))
model.add(Dropout(0.5))
model.add(LAttenLayer())
model.add(Convolution1D(filters=32,
                        kernel_size=5,
                        strides=1,
                        padding='same'))

model.add(Activation('relu'))

model.add(MaxPooling1D(pool_size=2,strides=2,padding='same'))

model.add(Bidirectional(IndRNN(300, return_sequences=True)))
model.add(GAttenLayer())
model.add(Activation('relu'))
model.add(Dense(num_classes, activation='softmax'))

# try using different optimizers and different optimizer configs

model.compile(
        loss='categorical_crossentropy',
        optimizer='adadelta',
        metrics=['accuracy']
    )

model.summary()

print('Train...')
x_train = sequence.pad_sequences(x_train, maxlen=maxlen)
x_test = sequence.pad_sequences(x_test, maxlen=maxlen)
x_val = sequence.pad_sequences(x_val, maxlen=maxlen)
print('x_train shape:', x_train.shape)
print('x_test shape:', x_test.shape)
print('x_val shape:', x_val.shape)

# configuration matches 4.47 Million parameters with `units=600` and `64 embedding dim`
print('Build model...')

inputs = Input(shape=(maxlen, ))
embed = Embedding(embed_size + 1, 128, input_shape=(maxlen, ))(inputs)
indRNN = Bidirectional(
    IndRNN(128,
           recurrent_clip_min=-1,
           recurrent_clip_max=-1,
           dropout=0.0,
           recurrent_dropout=0.0,
           return_sequences=True))(embed)
#注意力
x_score = Convolution1D(filters=1,
                        kernel_size=3,
                        padding='same',
                        activation='sigmoid')(indRNN)
x_atten = Multiply()([x_score, embed])

#卷积层
cnn = Convolution1D(filters=32, kernel_size=5, strides=1,
                    padding='same')(x_atten)

ac = Activation('relu')(cnn)
pool = MaxPooling1D(pool_size=2, strides=2, padding='same')(ac)
Ejemplo n.º 5
0
# configuration matches 4.47 Million parameters with `units=600` and `64 embedding dim`
print('Build model...')

# model = Sequential()
# model.add(Embedding(embed_size+1, 128, input_shape=(maxlen,)))
# model.add(IndRNN(128, recurrent_clip_min=-1, recurrent_clip_max=-1, dropout=0.0, recurrent_dropout=0.0,
#                  return_sequences=True))
# model.add(IndRNN(128, recurrent_clip_min=-1, recurrent_clip_max=-1, dropout=0.0, recurrent_dropout=0.0,
#                  return_sequences=False))
# model.add(Dense(10, activation='softmax'))

inputs = Input(shape=(maxlen, ))
embed = Embedding(embed_size + 1, 128, input_shape=(maxlen, ))(inputs)
first_ind = IndRNN(128,
                   recurrent_clip_min=-1,
                   recurrent_clip_max=-1,
                   dropout=0.0,
                   recurrent_dropout=0.0,
                   return_sequences=True)(embed)
second_ind = IndRNN(128,
                    recurrent_clip_min=-1,
                    recurrent_clip_max=-1,
                    dropout=0.0,
                    recurrent_dropout=0.0,
                    return_sequences=True)(first_ind)
x_atten = GAttenLayer()(second_ind)
output = Dense(10, activation='softmax')(x_atten)
model = Model(input=[inputs], output=output)
# try using different optimizers and different optimizer configs
model.compile(loss='categorical_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])
Ejemplo n.º 6
0
print('Pad sequences (samples x time)')

x_train = sequence.pad_sequences(x_train, maxlen=maxlen)
x_test = sequence.pad_sequences(x_test, maxlen=maxlen)
x_val = sequence.pad_sequences(x_val, maxlen=maxlen)
print('x_train shape:', x_train.shape)
print('x_test shape:', x_test.shape)
print('x_val shape:', x_val.shape)

# configuration matches 4.47 Million parameters with `units=600` and `64 embedding dim`
print('Build model...')

model = Sequential()
model.add(Embedding(max_features, 128, input_shape=(maxlen, )))
model.add(IndRNN(128, return_sequences=True))
model.add(BatchNormalization())
model.add(IndRNN(128, return_sequences=True))
model.add(BatchNormalization())
model.add(IndRNN(128, return_sequences=False))
model.add(BatchNormalization())
model.add(Dense(10, activation='softmax'))

# try using different optimizers and different optimizer configs
learning_rate = 2e-4
adam = Adam(lr=learning_rate)
model.compile(loss='categorical_crossentropy',
              optimizer=adam,
              metrics=['accuracy'])

model.summary()
Ejemplo n.º 7
0
print('Loading data...')
(x_train, y_train), (x_test, y_test) = imdb.load_data(num_words=max_features)
print(len(x_train), 'train sequences')
print(len(x_test), 'test sequences')

print('Pad sequences (samples x time)')
x_train = sequence.pad_sequences(x_train, maxlen=maxlen)
x_test = sequence.pad_sequences(x_test, maxlen=maxlen)
print('x_train shape:', x_train.shape)
print('x_test shape:', x_test.shape)

print('Build model...')
model = Sequential()
model.add(Embedding(max_features, 128, input_shape=(maxlen,)))
model.add(IndRNN(128, recurrent_clip_min=-1, recurrent_clip_max=-1, dropout=0.0, recurrent_dropout=0.0,
                 return_sequences=True))
model.add(IndRNN(128, recurrent_clip_min=-1, recurrent_clip_max=-1, dropout=0.0, recurrent_dropout=0.0,
                 return_sequences=False))
model.add(Dense(1, activation='sigmoid'))

# try using different optimizers and different optimizer configs
model.compile(loss='binary_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])

model.summary()

print('Train...')
model.fit(x_train, y_train,
          batch_size=batch_size,
          epochs=15,
Ejemplo n.º 8
0
    print('x_val shape:', x_val.shape)

    # configuration matches 4.47 Million parameters with `units=600` and `64 embedding dim`
    print('Build model...')

    inputs = Input(shape=(maxlen, ))
    embed = Embedding(embed_size + 1, 128, input_shape=(maxlen, ))(inputs)
    groupNormal = GroupNormalization(groups=32, axis=-1)(embed)
    x_score = Convolution1D(filters=1,
                            kernel_size=3,
                            padding='same',
                            activation='sigmoid')(groupNormal)
    x_atten = Multiply()([x_score, embed])
    first_ind = IndRNN(FLAGS.units,
                       recurrent_clip_min=-1,
                       recurrent_clip_max=-1,
                       dropout=0.0,
                       recurrent_dropout=0.0,
                       return_sequences=True)(x_atten)
    second_ind = IndRNN(FLAGS.units,
                        recurrent_clip_min=-1,
                        recurrent_clip_max=-1,
                        dropout=0.0,
                        recurrent_dropout=0.0,
                        return_sequences=False)(first_ind)

    fc = Dense(128, kernel_initializer='he_normal')(second_ind)
    ac = Activation('relu')(fc)
    output = Dropout(FLAGS.dropout)(ac)
    output = Dense(num_classes, activation='softmax')(output)
    model = Model(input=[inputs], output=output)
    # try using different optimizers and different optimizer configs
# fit network
history = model.fit(train_X, train_y, epochs=20, batch_size=130, validation_data=(test_X, test_y), verbose=2, shuffle=False)

# design network TG-LSTM
print('Build Our model...')
model1 = Sequential()
model1.add(LSTM(128, input_shape=(train_X.shape[1], train_X.shape[2]),implementation=2))
model1.add(Dense(1))
model1.compile(loss='mae', optimizer='adam',metrics=['mae'])
# fit network
history1 = model1.fit(train_X, train_y, epochs=20, batch_size=130, validation_data=(test_X1, test_y1), verbose=2, shuffle=False)

#IndRNN
print('Build IndRNN model...')
model2 = Sequential()
model2.add(IndRNN(128, input_shape=(train_X.shape[1], train_X.shape[2]),recurrent_clip_min=-1, recurrent_clip_max=-1, dropout=0.0, recurrent_dropout=0.0
                 ))
model2.add(Dense(1, activation='sigmoid'))
# try using different optimizers and different optimizer configs
model2.compile(loss='mae',optimizer='adam',metrics=['mae'])            
history2 = model2.fit(train_X, train_y, epochs=20, batch_size=130, validation_data=(test_X2, test_y2), verbose=2, shuffle=False)

# design network LSTM+zoneout
print('Build LSTM+Zoneout model...')
model3 = Sequential()
model3.add(LSTM_Custom(128, zoneout_c=0.5, zoneout_h=0.05,dropout=0.2,
    input_shape=(train_X.shape[1], train_X.shape[2])))#unit_size=128
model3.add(Dense(1))
model3.compile(loss='mae',optimizer='adam',metrics=['mae'])
# fit network
history3 = model3.fit(train_X, train_y, epochs=20, batch_size=130, validation_data=(test_X3, test_y3), verbose=2, shuffle=False)
Ejemplo n.º 10
0
Archivo: fit.py Proyecto: kav128/SkyNet
from keras.layers import Input, Dense, Dropout, Flatten, GaussianDropout, LSTM, Bidirectional, BatchNormalization, MaxPooling1D, MaxPooling2D, MaxPooling3D, AveragePooling1D
import keras_metrics as km
import datetime

from edf_preprocessor import EDF_Preprocessor
from ind_rnn import IndRNN
from ind_rnn import IndRNNCell, RNN

# Не работает под Windows. Под Docker пока не проверял
# import subprocess
# subprocess.call(["rm", "/rf", "logs/*"])

# BEGIN MODEL DESCRIPTION

ip = Input(shape=(256, 23))
x = IndRNN(512, return_sequences=True)(ip)
x = BatchNormalization()(x)
x = AveragePooling1D()(x)
x = IndRNN(512, return_sequences=True)(x)
x = BatchNormalization()(x)
x = AveragePooling1D()(x)
x = IndRNN(512, return_sequences=True)(x)
x = BatchNormalization()(x)
x = AveragePooling1D(2, 2)(x)
x = IndRNN(256, return_sequences=True)(x)
x = BatchNormalization()(x)
x = AveragePooling1D()(x)
x = IndRNN(256, return_sequences=True)(x)
x = BatchNormalization()(x)
x = AveragePooling1D()(x)
x = IndRNN(128, return_sequences=True)(x)