def build_network(self, ilearning_rate=0.001):
        '''
        :param ilearning_rate:
        :return:
        Build network
        '''

        #tf.reset_default_graph()
        net = input_data(shape=[None, 200])
        net = embedding(net,
                        input_dim=51887,
                        output_dim=200,
                        trainable=False,
                        name="EmbeddingLayer")

        #net = embedding(net, input_dim=20000, output_dim=128, trainable=False, weights_init=W,
        #                        name="EmbeddingLayer")
        #net = tflearn.embedding(net, input_dim=20000, output_dim=128, trainable=False, weights_init = W, name="EmbeddingLayer")
        net = bidirectional_rnn(net, BasicLSTMCell(128), BasicLSTMCell(128))
        net = dropout(net, 0.5)
        net = fully_connected(net, 2, activation='softmax')
        net = regression(net,
                         optimizer='adam',
                         loss='categorical_crossentropy',
                         learning_rate=ilearning_rate)

        return net
Example #2
0
    def network(self):
        in_layer = input_data([None, 1, self.str_len * 2 + 2, 1])
        indices = in_layer[:, 0, :2, 0]

        if self.emb > 1:
            lstm1 = lstm(embedding(in_layer[:, 0, 2:, 0], 26, self.emb),
                         300,
                         return_seq=True)
        else:
            lstm1 = lstm(in_layer[:, 0, 2:, :], 300, return_seq=True)

        # lstm branch
        lstm2 = lstm(lstm1, 300, return_seq=True)
        lstm3 = lstm(lstm2, 300, return_seq=True)
        lstm4 = lstm(lstm3, 300)

        # cnn branch
        in_layer = bn(in_layer)
        conv1 = conv_2d(in_layer, 64, [1, 7], 1)
        norm1 = relu(bn(conv1))
        block1 = self.residual_block(norm1, 128, [1, 3], 2, stride=2)
        block2 = self.residual_block(block1, 256, [1, 3], 2, stride=2)
        block3 = self.residual_block(block2, 512, [1, 3], 2)
        block4 = self.residual_block(block3, 1024, [1, 3], 2)
        n_out_filters = block4.get_shape().as_list()[-1]
        gap = tf.reshape(global_avg_pool(block4), [-1, n_out_filters])

        # fully-connected branch
        fc_ind = fc(indices, 100, activation='tanh')
        fc_ind2 = fc(fc_ind, 100, activation='tanh')

        # merge lstm, conv, and fc layers
        merged = tf.concat([lstm4, gap, fc_ind2], 1)

        out = fc(merged, self.num_classes,
                 activation='softmax')  # output layer

        # describe optimization
        net = regression(out,
                         optimizer='adam',
                         loss='categorical_crossentropy',
                         learning_rate=self.lr)

        # build model
        model = tflearn.DNN(net, tensorboard_verbose=2, tensorboard_dir='.')

        return model
Example #3
0
def bi_LSTM():
    # Network building
    net = input_data(shape=[None, charvec_len])
    net = embedding(net, input_dim=in_dim, output_dim=nn_dim)
    net = dropout(net, drop1)

    net = bidirectional_rnn(net, BasicLSTMCell(nn_dim, forget_bias=1.),
                            BasicLSTMCell(nn_dim, forget_bias=1.))
    net = dropout(net, drop2)
    net = fully_connected(net, 2, activation='softmax')

    net = regression(net,
                     optimizer='adam',
                     loss='categorical_crossentropy',
                     learning_rate=lrate)

    return net
Example #4
0
def bi_LSTM():
    # Network building
    net = input_data(shape=[None, 440])
    net = embedding(net, input_dim=20000, output_dim=128)
    net = dropout(net, 0.9)

    net = bidirectional_rnn(net, BasicLSTMCell(128, forget_bias=1.),
                            BasicLSTMCell(128, forget_bias=1.))
    net = dropout(net, 0.7)
    net = fully_connected(net, 2, activation='softmax')

    net = regression(net,
                     optimizer='adam',
                     loss='categorical_crossentropy',
                     learning_rate=0.001)

    return net
Example #5
0
def ProteinNet(str_len, emb, lr, num_classes):
    in_layer = input_data([None, 1, str_len * 2 + 2, 1])
    indices = in_layer[:, 0, :2, 0]

    if emb > 1:
        lstm1 = lstm(embedding(in_layer[:, 0, 2:, 0], 26, emb),
                     300,
                     return_seq=True)
    else:
        lstm1 = lstm(in_layer[:, 0, 2:, :], 300, return_seq=True)

    # lstm branch
    lstm2 = lstm(lstm1, 300, return_seq=True)
    lstm3 = lstm(lstm2, 300, return_seq=True)
    lstm4 = lstm(lstm3, 300)

    # cnn branch
    in_layer = bn(in_layer)
    conv1 = conv_2d(in_layer, 64, [1, 10], 1)
    norm1 = relu(bn(conv1))
    conv2 = conv_2d(norm1, 128, [1, 6], 2)
    norm2 = relu(bn(conv2))
    conv3 = conv_2d(norm2, 256, [1, 3], 2)
    norm3 = relu(bn(conv3))
    gap = tf.reshape(global_avg_pool(norm3), [-1, 256])

    # fully-connected branch
    fc_ind = fc(indices, 50, activation='tanh')
    fc_ind2 = fc(fc_ind, 50, activation='tanh')

    # merge lstm, conv, and fc layers
    merged = tf.concat([lstm4, gap, fc_ind2], 1)

    out = fc(merged, num_classes, activation='softmax')
    net = regression(out,
                     optimizer='adam',
                     loss='categorical_crossentropy',
                     learning_rate=lr)
    model = tflearn.DNN(net, tensorboard_verbose=2, tensorboard_dir='.')

    return model
Example #6
0
print("POS : ", len(POS_labels))
print("Max Len : ", maxLen)

# Data preprocessing
# Sequence padding
trainX = pad_sequences(trainX, maxlen=maxLen, value=0)
#Converting labels to binary vectors
trainY = pad_sequences(trainY, maxlen=maxLen, value=0)
embeddings = concat_2Dvectors(embeddings, Flatten_3Dto2D(POS_vectors))

# Network building
print("Beginning neural network")
net = input_data(shape=[None, maxLen])
net = embedding(net,
                input_dim=len(embeddings),
                output_dim=len(embeddings[0]),
                trainable=False,
                name="EmbeddingLayer")
print("After embeddings : ", net.get_shape().as_list())
net = bidirectional_rnn(net,
                        BasicLSTMCell(1024),
                        BasicLSTMCell(1024),
                        return_seq=True)
#net = [dropout(net[i], 0.5) for i in range(len(net))]
net = [
    fully_connected(net[i], 1, activation='sigmoid') for i in range(len(net))
]
net = merge(net, mode='concat')
print("After RNN : ", net.get_shape().as_list())
print("After Dropout : ", net.get_shape().as_list())
net = regression(net,
Example #7
0
from tflearn.layers.core import input_data, dropout, fully_connected
from tflearn.layers.embedding_ops import embedding
from tflearn.layers.recurrent import bidirectional_rnn, BasicLSTMCell
from tflearn.layers.estimator import regression

# IMDB Dataset loading
train, test, _ = imdb.load_data(path='imdb.pkl', n_words=10000,
                                valid_portion=0.1)
trainX, trainY = train
testX, testY = test

# Data preprocessing
# Sequence padding
trainX = pad_sequences(trainX, maxlen=200, value=0.)
testX = pad_sequences(testX, maxlen=200, value=0.)
# Converting labels to binary vectors
trainY = to_categorical(trainY, nb_classes=2)
testY = to_categorical(testY, nb_classes=2)

# Network building
net = input_data(shape=[None, 200])
net = embedding(net, input_dim=20000, output_dim=128)
net = bidirectional_rnn(net, BasicLSTMCell(128), BasicLSTMCell(128))
net = dropout(net, 0.5)
net = fully_connected(net, 2, activation='softmax')
net = regression(net, optimizer='adam', loss='categorical_crossentropy')

# Training
model = tflearn.DNN(net, clip_gradients=0., tensorboard_verbose=2)
model.fit(trainX, trainY, validation_set=0.1, show_metric=True, batch_size=64)
# IMDB Dataset loading
train, test, _ = imdb.load_data(path='imdb.pkl', n_words=10000,
                                valid_portion=0.1)
trainX, trainY = train
testX, testY = test


# Data preprocessing
# Sequence padding
trainX = pad_sequences(trainX, maxlen=200, value=0.)
testX = pad_sequences(testX, maxlen=200, value=0.)


# Converting labels to binary vectors
trainY = to_categorical(trainY,2)
testY = to_categorical(testY,2)

# Network building
net = input_data(shape=[None, 200])
net = embedding(net, input_dim=20000, output_dim=128)
net = bidirectional_rnn(net, BasicLSTMCell(128), BasicLSTMCell(128))
net = dropout(net, 0.5)
net = fully_connected(net, 2, activation='softmax')
net = regression(net, optimizer='adam', loss='categorical_crossentropy')

# Training
model = tflearn.DNN(net, clip_gradients=0., tensorboard_verbose=2)
model.fit(trainX, trainY, n_epoch=100,validation_set=0.1, show_metric=True, batch_size=64)
model.save('sentiment_model.tfl')
n_words = len(vocab_processor.vocabulary_)
print('Total words: %d' % n_words)

# pickle.dump (X_train, open ("xtrain.p", b))
# pickle.dump (X_test, open ("xtest.p", b))

# X_train = pickle.load (open ("xtrain.p", rb))
# X_test = pickle.load (open ("xtest.p", rb))

### Models

print('Build model')

net = input_data([None, model_size])
net = embedding(net, input_dim=n_words, output_dim=embedding_size)
if cell_type == "lstm":
    for i in range(len(cell_size)):
        if i < len(cell_size) - 1:
            net = bidirectional_rnn(net,
                                    BasicLSTMCell(cell_size[i]),
                                    BasicLSTMCell(cell_size[i]),
                                    return_seq=True)
            net = dropout(net, dropout_ratio)
        else:
            net = bidirectional_rnn(net, BasicLSTMCell(cell_size[i]),
                                    BasicLSTMCell(cell_size[i]))
            net = dropout(net, dropout_ratio)
elif cell_type == "gru":
    for i in range(len(cell_size)):
        if i < len(cell_size) - 1:
trainX, trainY = train
testX, testY = test

# Sequence padding
trainX = pad_sequences(trainX, maxlen=maxlen, value=0.)
testX = pad_sequences(testX, maxlen=maxlen, value=0.)

# Converting labels to binary vectors
trainY = to_categorical(trainY, nb_classes=2)
testY = to_categorical(testY, nb_classes=2)

# Building network
network = input_data(shape=[None, maxlen], name='input')

network = embedding(network,
                    input_dim=vocab_size,
                    output_dim=embedding_dim,
                    trainable=True)

network = bidirectional_rnn(network,
                            BasicLSTMCell(rnn_hidden_size,
                                          activation='tanh',
                                          inner_activation='sigmoid'),
                            BasicLSTMCell(rnn_hidden_size,
                                          activation='tanh',
                                          inner_activation='sigmoid'),
                            return_seq=True,
                            dynamic=True)
network = tf.pack(network, axis=1)

fw_outputs, bw_outputs = tf.split(split_dim=2, num_split=2, value=network)
network = tf.add(fw_outputs, bw_outputs)
Example #11
0
#                                valid_portion=0.1)
trainX, trainY = train
testX, testY = test

# Data preprocessing
# Sequence padding
trainX = pad_sequences(trainX, maxlen=200, value=0.)
testX = pad_sequences(testX, maxlen=200, value=0.)
# Converting labels to binary vectors
trainY = to_categorical(trainY, nb_classes=2)
testY = to_categorical(testY, nb_classes=2)

# Network building
net = input_data(shape=[None, 200])  #same shape as the max length
net = embedding(
    net, input_dim=20000, output_dim=128
)  #creates embedding matrix. Not sure if I need this for project 4
net = bidirectional_rnn(net, BasicLSTMCell(128), BasicLSTMCell(
    128))  #has two LSTMs with 128 units go in forward and backward directions.
net = dropout(net,
              0.5)  #dropout with keep probability of 0.5. All are finalized
net = fully_connected(
    net, 2, activation='softmax'
)  #makes softmax probabilities over 2 categories, true and false
net = regression(
    net, optimizer='adam', loss='categorical_crossentropy'
)  #runs adam optimizer on net minimizing catagorical cross entropy loss

# Training
model = tflearn.DNN(
    net, clip_gradients=5, tensorboard_verbose=1
Example #12
0
print(embeddings[3][300:])
print(embeddings[4][300:])
print(embeddings[5][300:])
print(embeddings[6][300:])
print(embeddings[7][300:])
print(embeddings[8][300:])
print(embeddings[9][300:])
print(embeddings[10][300:])
#"""

# Network Description
print("Beginning neural network")
net = input_data(shape=[None, maxLen])
emb1 = embedding(net,
                 input_dim=len(embeddings),
                 output_dim=len(embeddings[0]),
                 trainable=True,
                 name="EmbeddingLayer")
emb2 = embedding(net,
                 input_dim=len(POS_vectors),
                 output_dim=len(POS_vectors[0]),
                 trainable=True,
                 name="POSLayer")
net = tf.concat([emb1, emb2], 2)
#net = merge([emb1, emb2], mode='concat', axis=2)
print("Shape after embeddings : ", net.get_shape().as_list())
branch1 = conv_1d(net,
                  150,
                  2,
                  padding='valid',
                  activation='relu',
Example #13
0
test_length = int(test_size * len(dataset))

# print('dataset length', len(dataset))
# print('test length', test_length)

train_data = x[:-test_length]
train_label = y[:-test_length]
test_data = x[-test_length:]
test_label = y[-test_length:]

# print(test_data)
# print(test_label)

#defining architecture of LSTM Network
input_data = tflearn.input_data(shape=[None, 100], name='input')
embed_layer = embedding(input_data, input_dim=len_lexicon, output_dim=100)
lstm_layer_1 = tflearn.lstm(embed_layer, 512, return_seq=True)
dropout_layer_1 = tflearn.dropout(lstm_layer_1, 0.5)
lstm_layer_2 = tflearn.lstm(dropout_layer_1, 512, return_seq=True)
dropout_layer_2 = tflearn.dropout(lstm_layer_2, 0.5)
lstm_layer_3 = tflearn.lstm(dropout_layer_2, 512)
dropout_layer_3 = tflearn.dropout(lstm_layer_3, 0.5)
fc = fully_connected(dropout_layer_3, 2, activation='softmax')
lstm_net = tflearn.regression(fc,
                              optimizer='adam',
                              loss='categorical_crossentropy')

#training
model = tflearn.DNN(lstm_net, clip_gradients=0.5, tensorboard_verbose=2)
model.fit(train_data,
          train_label,
Example #14
0
    def create_network(self):
        print('function: create_network')
        convnet = input_data(shape=[None, 6], name='input')
        convnet = embedding(convnet, input_dim=10000, output_dim=128)
        convnet = lstm(convnet, 128, dropout=0.8)

        convnet = fully_connected(convnet, 1024, activation='relu')
        convnet = dropout(convnet, 0.8)
        convnet = fully_connected(convnet, 1024, activation='relu')
        convnet = dropout(convnet, 0.8)
        convnet = fully_connected(convnet, 1024, activation='relu')
        # convnet = conv_2d(convnet, 32, 5, activation='relu')
        # convnet = max_pool_2d(convnet, 5)

        # convnet = conv_2d(convnet, 32, 5, activation='relu')
        # convnet = max_pool_2d(convnet, 5)

        # convnet = conv_2d(convnet, 32, 5, activation='relu')
        # convnet = max_pool_2d(convnet, 5)

        # convnet = fully_connected(convnet, 1024, activation='relu')

        # convnet = dropout(convnet, 0.5)

        # convnet = fully_connected(convnet, 1024, activation='relu')

        # convnet = dropout(convnet, 0.6)

        # convnet = fully_connected(convnet, 1024, activation='relu')

        # convnet = embedding(convnet, input_dim=256, output_dim=256)
        # convnet = lstm(convnet, 256, dropout=0.8)
        # convnet = embedding(convnet, input_dim=256, output_dim=255)
        # convnet = lstm(convnet, 256, dropout=0.8)
        # convnet = embedding(convnet, input_dim=256, output_dim=255)
        # convnet = lstm(convnet, 256, dropout=0.8)
        # convnet = embedding(convnet, input_dim=256, output_dim=255)
        # convnet = lstm(convnet, 255, dropout=0.8)

        # convnet = simple_rnn(convnet, 6, dropout=0.8)
        # convnet = simple_rnn(convnet, 6, dropout=0.8)
        # convnet = simple_rnn(convnet, 6, dropout=0.8)
        # convnet = simple_rnn(convnet, 6, dropout=0.8)

        # convnet = fully_connected(convnet, 1024, activation='relu')
        # convnet = dropout(convnet, 0.8)
        # convnet = fully_connected(convnet, 1024, activation='relu')

        # convnet = fully_connected(convnet, 1024, activation='relu')
        # convnet = dropout(convnet, 0.8)
        # convnet = fully_connected(convnet, 1024, activation='relu')

        convnet = fully_connected(convnet, 3, activation='softmax')
        convnet = regression(convnet,
                             optimizer='adam',
                             learning_rate=0.0001,
                             loss='categorical_crossentropy',
                             name='targets')

        model = tflearn.DNN(convnet, tensorboard_dir='log')
        return model
Example #15
0
from tflearn.layers.recurrent import bidirectional_rnn, BasicLSTMCell
from tflearn.layers.estimator import regression

#Get data
trainX, trainY, maxLen, vocabSize, _ = data.get_Data_Vectors()

# Data preprocessing
# Sequence padding
trainX = pad_sequences(trainX, maxlen=maxLen, value=0.)
#Converting labels to binary vectors
trainY = pad_sequences(trainY, maxlen=maxLen, value=0.)

# Network building
print("Beginning neural network")
net = input_data(shape=[None, maxLen])
net = embedding(net, input_dim=vocabSize, output_dim=128)
#print(net.get_shape().as_list())
net = bidirectional_rnn(net, BasicLSTMCell(512), BasicLSTMCell(512))
net = dropout(net, 0.5)
net = fully_connected(net, maxLen, activation='softmax')
net = regression(net, optimizer='adam',
                 loss='categorical_crossentropy')  #, learning_rate=0.001)

#"""
# Training
model = tflearn.DNN(net, clip_gradients=0., tensorboard_verbose=2)
model.fit(trainX,
          trainY,
          validation_set=0.2,
          show_metric=True,
          batch_size=64,
Example #16
0
train, test, _ = imdb.load_data(path='imdb.pkl', n_words=n_datas,
                                valid_portion=0.1)
trainX, trainY = train
testX, testY = test

# Data preprocessing
# Sequence padding
trainX = pad_sequences(trainX, maxlen=input_length_each_seq, value=0.)
testX = pad_sequences(testX, maxlen=input_length_each_seq, value=0.)
# Converting labels to binary vectors
trainY = to_categorical(trainY,n_class)
testY = to_categorical(testY,n_class)

# Network building
net = input_data(shape=[None, input_length_each_seq]) # [none,200]
net = embedding(net, input_dim=n_datas, output_dim=hiddle_layes) # [none,200,128]
net=tf.unstack(net,input_length_each_seq,1)
print(net);exit(-1)
net = bidirectional_rnn(net, BasicLSTMCell(hiddle_layes), BasicLSTMCell(hiddle_layes))

net = dropout(net, 0.5)
net = fully_connected(net, n_class, activation='softmax')
net = regression(net, optimizer='adam', loss='categorical_crossentropy')

# Training
model = tflearn.DNN(net, clip_gradients=0., tensorboard_verbose=2)
model.fit(trainX, trainY, validation_set=0.1, show_metric=True, batch_size=64)
2、tensorflow

参考:TensorFlow-Examples