Esempio n. 1
0
def build_deepvo(inputs,
                 is_training,
                 seq_length,
                 height,
                 width,
                 scope='deepvo'):
    # print (inputs)
    # inputs_list = tf.unstack(inputs)
    # for inputs in inputs_list:
    #     pred = build_flownet(inputs, is_training=is_training)
    #
    with tf.variable_scope('timedistruted_cnn', reuse=tf.AUTO_REUSE):
        print(inputs)
        inputs = tf.reshape(inputs, (1, seq_length, height, width, 2))
        net = core.input_data(shape=(None, seq_length, height, width, 2),
                              placeholder=inputs)
        net = core.time_distributed(net, build_flownet, [inputs])
        # net = core.time_distributed(net, conv.conv_2d, [32, 7, 2, 'same', 'relu'])
        # net = core.time_distributed(net, conv.conv_2d, [64, 5, 2, 'same', 'relu'])
        # net = core.time_distributed(net, conv.conv_2d, [128, 3, 2, 'same', 'relu'])
        # net = core.time_distributed(net, conv.conv_2d, [256, 3, 2, 'same', 'relu'])
        # net = core.time_distributed(net, conv.conv_2d, [256, 3, 2, 'same', 'relu'])
        # net = core.time_distributed(net, conv.conv_2d, [256, 3, 2, 'same', 'relu'])
        net = core.time_distributed(net, conv.global_max_pool)
    with tf.variable_scope('rnn'):
        net = recurrent.lstm(net,
                             n_units=124,
                             activation=tf.nn.relu,
                             return_seq=True,
                             name='lstm1')
        net = recurrent.lstm(net,
                             n_units=124,
                             activation=tf.nn.relu,
                             return_seq=True,
                             name='lstm2')

        net = core.time_distributed(net, core.fully_connected, [128, 'relu'])
        net = core.time_distributed(net, core.fully_connected, [12])
        pose, uncertainty = tf.split(net, 2, axis=2)
        pose = tf.cast(pose, tf.float64)

        print("pose output shape")
        print(pose)

        #pose = core.fully_connected(net, activation=tf.nn.relu, n_units=128)
        #pose = core.fully_connected(net, n_units=6)
        #pose = tf.cast(pose, tf.float64)
        #print (pose)
        #print (tf.trainable_variables())

    return pose, uncertainty
Esempio n. 2
0
    def network(self):
        in_layer = input_data([None, 1, self.str_len * 2 + 2, 1])
        indices = in_layer[:, 0, :2, 0]

        if self.emb > 1:
            lstm1 = lstm(embedding(in_layer[:, 0, 2:, 0], 26, self.emb),
                         300,
                         return_seq=True)
        else:
            lstm1 = lstm(in_layer[:, 0, 2:, :], 300, return_seq=True)

        # lstm branch
        lstm2 = lstm(lstm1, 300, return_seq=True)
        lstm3 = lstm(lstm2, 300, return_seq=True)
        lstm4 = lstm(lstm3, 300)

        # cnn branch
        in_layer = bn(in_layer)
        conv1 = conv_2d(in_layer, 64, [1, 7], 1)
        norm1 = relu(bn(conv1))
        block1 = self.residual_block(norm1, 128, [1, 3], 2, stride=2)
        block2 = self.residual_block(block1, 256, [1, 3], 2, stride=2)
        block3 = self.residual_block(block2, 512, [1, 3], 2)
        block4 = self.residual_block(block3, 1024, [1, 3], 2)
        n_out_filters = block4.get_shape().as_list()[-1]
        gap = tf.reshape(global_avg_pool(block4), [-1, n_out_filters])

        # fully-connected branch
        fc_ind = fc(indices, 100, activation='tanh')
        fc_ind2 = fc(fc_ind, 100, activation='tanh')

        # merge lstm, conv, and fc layers
        merged = tf.concat([lstm4, gap, fc_ind2], 1)

        out = fc(merged, self.num_classes,
                 activation='softmax')  # output layer

        # describe optimization
        net = regression(out,
                         optimizer='adam',
                         loss='categorical_crossentropy',
                         learning_rate=self.lr)

        # build model
        model = tflearn.DNN(net, tensorboard_verbose=2, tensorboard_dir='.')

        return model
def get_network():
	network = input_data(shape=[None, LEN_HISTORY], name="inp")
	network = reshape(network, [tf.shape(network)[0], LEN_HISTORY, 1], name="added_inp_dim")
	network = lstm(network, LEN_HISTORY, name="hidden", return_seq=True)
	network = [fully_connected(ts, 1, activation='softmax') for ts in network]
	network = merge(network, "concat", axis=1, name="concat_merge")
	network = network / tf.expand_dims(tf.reduce_mean(network, axis=1), 1, name="normed")
	network = regression(network, optimizer="sgd", loss=revenue)
	return network
Esempio n. 4
0
def ProteinNet(str_len, emb, lr, num_classes):
    in_layer = input_data([None, 1, str_len * 2 + 2, 1])
    indices = in_layer[:, 0, :2, 0]

    if emb > 1:
        lstm1 = lstm(embedding(in_layer[:, 0, 2:, 0], 26, emb),
                     300,
                     return_seq=True)
    else:
        lstm1 = lstm(in_layer[:, 0, 2:, :], 300, return_seq=True)

    # lstm branch
    lstm2 = lstm(lstm1, 300, return_seq=True)
    lstm3 = lstm(lstm2, 300, return_seq=True)
    lstm4 = lstm(lstm3, 300)

    # cnn branch
    in_layer = bn(in_layer)
    conv1 = conv_2d(in_layer, 64, [1, 10], 1)
    norm1 = relu(bn(conv1))
    conv2 = conv_2d(norm1, 128, [1, 6], 2)
    norm2 = relu(bn(conv2))
    conv3 = conv_2d(norm2, 256, [1, 3], 2)
    norm3 = relu(bn(conv3))
    gap = tf.reshape(global_avg_pool(norm3), [-1, 256])

    # fully-connected branch
    fc_ind = fc(indices, 50, activation='tanh')
    fc_ind2 = fc(fc_ind, 50, activation='tanh')

    # merge lstm, conv, and fc layers
    merged = tf.concat([lstm4, gap, fc_ind2], 1)

    out = fc(merged, num_classes, activation='softmax')
    net = regression(out,
                     optimizer='adam',
                     loss='categorical_crossentropy',
                     learning_rate=lr)
    model = tflearn.DNN(net, tensorboard_verbose=2, tensorboard_dir='.')

    return model
Esempio n. 5
0
    def transform_embedded_sequences(self, embedded_sequences):
        net = embedded_sequences
        for i in range(1, self.params['layers'] + 1):
            # if there are more lstms downstream - return sequences
            return_sequences = i < self.params['layers']
            net = lstm(net,
                self.params['units'],
                dropout=(self.params['dropout_rate'],self.params['rec_dropout_rate']),
                return_seq=return_sequences)

        preds = fully_connected(net, self.class_count, activation='softmax')
        return preds
def cnnLSTM_model():
    global model
    filter_size_conv1 = 11
    num_filters_conv1 = 5

    filter_size_conv2 = 6
    num_filters_conv2 = 10

    filter_size_conv3 = 5
    num_filters_conv3 = 5

    filter_size_conv4 = 2
    num_filters_conv4 = 2

    lstm_units = 500

    learning_rate = 1e-4
    y_true = tf.placeholder(tf.float32,
                            shape=[None, num_classes],
                            name='y_true')

    net = tflearn.input_data([None, 22, 64, 48, 1], name="input")
    net = time_distributed(
        net,
        conv_2d,
        args=[num_filters_conv1, filter_size_conv1, 1, 'same', 'tanh'])
    net = time_distributed(net, max_pool_2d, args=[2])
    net = time_distributed(
        net,
        conv_2d,
        args=[num_filters_conv2, filter_size_conv2, 1, 'same', 'tanh'])
    net = time_distributed(net, max_pool_2d, args=[2])
    net = time_distributed(
        net,
        conv_2d,
        args=[num_filters_conv3, filter_size_conv3, 1, 'same', 'tanh'])
    net = time_distributed(net, max_pool_2d, args=[2])
    net = time_distributed(net, flatten, args=['flat'])
    net = lstm(net, lstm_units)
    fc_layer = tflearn.fully_connected(net, num_classes, activation='softmax')
    loss = tflearn.objectives.categorical_crossentropy(fc_layer, y_true)
    network = regression(fc_layer,
                         optimizer='adam',
                         loss='categorical_crossentropy',
                         learning_rate=0.001)

    # Training
    model = tflearn.DNN(network,
                        tensorboard_verbose=0,
                        checkpoint_path='gestureCNNLSTM.tfl.ckpt')
Esempio n. 7
0
def typeSubtypeNameGeneratorModel(maxLength, charIndex, checkpoint_path='./generator_checkpoints/'):
  '''
  Recurrent network model for generating card types, subtypes, and names
  Inputs:
    maxLength: the maximum length for a generated sequence
    charIndex: map from chars to the index they represent in a onehot encoding
    checkpoint_path: path to save model after every epoch ('./generator_checkpoints/')
  '''
  network = input_data(shape=[None, maxLength, len(charIndex)])
  network = lstm(network, 512, return_seq=True)
  network = dropout(network, 0.5)
  network = lstm(network, 512, return_seq=True)
  network = dropout(network, 0.5)
  network = lstm(network, 512)
  network = dropout(network, 0.5)
  network = fully_connected(network, len(charIndex), activation='softmax')
  network = regression(network, optimizer='adam', loss='categorical_crossentropy',
                        learning_rate=0.001)

  model = tflearn.SequenceGenerator(network, tensorboard_verbose=0, dictionary=charIndex,
                                      seq_maxlen=maxLength, clip_gradients=5.0,
                                      checkpoint_path='./generator_checkpoints/')
  return model
Esempio n. 8
0
def getLSTM(num_features, units, dropout):
    network = input_data([None, 1, num_features])

    for i, u in enumerate(units):
        network = lstm(network,
                       u,
                       dropout=dropout,
                       return_seq=i != len(units) - 1)
    network = fully_connected(
        network, 10, activation="tanh")  # this defaults to linear activation
    network = fully_connected(
        network, 1, activation=None)  # this defaults to linear activation
    network = regression(network,
                         optimizer='rmsprop',
                         learning_rate=0.01,
                         loss='mean_square')
    return network
Esempio n. 9
0
cnnlstmmodel = max_pool_2d(cnnlstmmodel, 2)

cnnlstmmodel = conv_2d(cnnlstmmodel, 256, 2, activation='relu')
cnnlstmmodel = max_pool_2d(cnnlstmmodel, 2)

cnnlstmmodel = conv_2d(cnnlstmmodel, 128, 2, activation='relu')
cnnlstmmodel = max_pool_2d(cnnlstmmodel, 2)

cnnlstmmodel = conv_2d(cnnlstmmodel, 64, 2, activation='relu')
cnnlstmmodel = max_pool_2d(cnnlstmmodel, 2)

cnnlstmmodel = conv_2d(cnnlstmmodel, 32, 2, activation='relu')
cnnlstmmodel = max_pool_2d(cnnlstmmodel, 2)

cnnlstmmodel = time_distributed(cnnlstmmodel, flatten, args=['flat'])
cnnlstmmodel = lstm(cnnlstmmodel, 1024)

cnnlstmmodel = fully_connected(cnnlstmmodel, 1000, activation='relu')
cnnlstmmodel = dropout(cnnlstmmodel, 0.7)

cnnlstmmodel = fully_connected(cnnlstmmodel, 10, activation='softmax')

cnnlstmmodel = regression(cnnlstmmodel,
                          optimizer='adam',
                          learning_rate=0.001,
                          loss='categorical_crossentropy',
                          name='regression')
model = tflearn.DNN(cnnlstmmodel, tensorboard_verbose=0)

# Load Saved Model
model.load("TrainedModelCNNLSTM/CNNLSTMFIX.tfl")
Esempio n. 10
0
    def create_network(self):
        print('function: create_network')
        convnet = input_data(shape=[None, 6], name='input')
        convnet = embedding(convnet, input_dim=10000, output_dim=128)
        convnet = lstm(convnet, 128, dropout=0.8)

        convnet = fully_connected(convnet, 1024, activation='relu')
        convnet = dropout(convnet, 0.8)
        convnet = fully_connected(convnet, 1024, activation='relu')
        convnet = dropout(convnet, 0.8)
        convnet = fully_connected(convnet, 1024, activation='relu')
        # convnet = conv_2d(convnet, 32, 5, activation='relu')
        # convnet = max_pool_2d(convnet, 5)

        # convnet = conv_2d(convnet, 32, 5, activation='relu')
        # convnet = max_pool_2d(convnet, 5)

        # convnet = conv_2d(convnet, 32, 5, activation='relu')
        # convnet = max_pool_2d(convnet, 5)

        # convnet = fully_connected(convnet, 1024, activation='relu')

        # convnet = dropout(convnet, 0.5)

        # convnet = fully_connected(convnet, 1024, activation='relu')

        # convnet = dropout(convnet, 0.6)

        # convnet = fully_connected(convnet, 1024, activation='relu')

        # convnet = embedding(convnet, input_dim=256, output_dim=256)
        # convnet = lstm(convnet, 256, dropout=0.8)
        # convnet = embedding(convnet, input_dim=256, output_dim=255)
        # convnet = lstm(convnet, 256, dropout=0.8)
        # convnet = embedding(convnet, input_dim=256, output_dim=255)
        # convnet = lstm(convnet, 256, dropout=0.8)
        # convnet = embedding(convnet, input_dim=256, output_dim=255)
        # convnet = lstm(convnet, 255, dropout=0.8)

        # convnet = simple_rnn(convnet, 6, dropout=0.8)
        # convnet = simple_rnn(convnet, 6, dropout=0.8)
        # convnet = simple_rnn(convnet, 6, dropout=0.8)
        # convnet = simple_rnn(convnet, 6, dropout=0.8)

        # convnet = fully_connected(convnet, 1024, activation='relu')
        # convnet = dropout(convnet, 0.8)
        # convnet = fully_connected(convnet, 1024, activation='relu')

        # convnet = fully_connected(convnet, 1024, activation='relu')
        # convnet = dropout(convnet, 0.8)
        # convnet = fully_connected(convnet, 1024, activation='relu')

        convnet = fully_connected(convnet, 3, activation='softmax')
        convnet = regression(convnet,
                             optimizer='adam',
                             learning_rate=0.0001,
                             loss='categorical_crossentropy',
                             name='targets')

        model = tflearn.DNN(convnet, tensorboard_dir='log')
        return model
Esempio n. 11
0
                  regularizer="L2")
# branch6 = conv_1d(network, 128, 6, padding='valid', activation='relu', regularizer="L2")
# branch7 = conv_1d(network, 128, 7, padding='valid', activation='relu', regularizer="L2")
# branch8 = conv_1d(network, 128, 8, padding='valid', activation='relu', regularizer="L2")
network = merge([branch1, branch2, branch3, branch4, branch5],
                mode='concat',
                axis=1)
network = tf.expand_dims(network, 2)
network = global_max_pool(network)

# network = fully_connected(network, 64, activation='tanh')
# network = dropout(network, 0.8)
network = fully_connected(network, 128, activation='relu')
network = dropout(network, 0.8)
network = tf.expand_dims(network, 2)
network = lstm(network, 128, return_seq=True)
network = lstm(network, 128)
network = fully_connected(network, 64, activation='relu')
network = dropout(network, 0.8)
network = fully_connected(network, 2, activation='tanh')
network = regression(network,
                     optimizer='adam',
                     learning_rate=0.001,
                     loss=concordance_cc,
                     name='target')
# Training
model = tflearn.DNN(network, tensorboard_verbose=0)
# model = tflearn.DNN(network, tensorboard_verbose=0)
model.load('./saved_model/cnn.tfl')
# model.fit(trainX, trainY, n_epoch=1, shuffle=True, validation_set=(evalX, evalY), show_metric=True, batch_size=128)
# model.save('./saved_model/cnn.tfl')
Esempio n. 12
0
    cv2.putText(textImage, "Pedicted Class : " + className, (30, 30),
                cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2)

    cv2.putText(textImage, "Confidence : " + str(confidence * 100) + '%',
                (30, 100), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2)

    cv2.imshow("Statistics", textImage)


tf.reset_default_graph()
lstmmodel = input_data(shape=[None, 89, 100, 1], name='input')

lstmmodel = time_distributed(lstmmodel, dropout, args=[0.7])
lstmmodel = time_distributed(lstmmodel, flatten, args=['flat'])

lstmmodel = lstm(lstmmodel, 512)

lstmmodel = fully_connected(lstmmodel, 1000, activation='relu')
lstmmodel = dropout(lstmmodel, 0.7)

lstmmodel = fully_connected(lstmmodel, 10, activation='softmax')

lstmmodel = regression(lstmmodel,
                       optimizer='adam',
                       learning_rate=0.001,
                       loss='categorical_crossentropy',
                       name='regression')

model = tflearn.DNN(lstmmodel, tensorboard_verbose=0)

# Load Saved Model
Esempio n. 13
0
from tflearn.layers.recurrent import simple_rnn, lstm
from tflearn.layers.core import input_data, dropout, fully_connected
from tflearn.layers.estimator import regression
import tflearn.datasets.mnist as mnist

X, Y, test_x, test_y = mnist.load_data(one_hot=True)

X = X.reshape([-1, 28, 28])

test_x = test_x.reshape([-1, 28, 28])

# X = X.transpose(X,[1,0,2])
# X = X.reshape(X,[-1,28])
# X = X.split(X,28,0)

net = input_data(shape=[None, 28, 28], name='input')
net = lstm(net, 128, dropout=0.8)
net = fully_connected(net, 10, activation='softmax')
net = regression(net,
                 optimizer='adam',
                 learning_rate=0.001,
                 loss='categorical_crossentropy')

model = tflearn.DNN(net)
model.fit(X,
          Y,
          validation_set=(test_x, test_y),
          show_metric=True,
          batch_size=128,
          n_epoch=3)
Esempio n. 14
0
import tflearn
from tflearn.layers.recurrent import lstm
from tflearn.layers.core import input_data, fully_connected
from tflearn.layers.estimator import regression
import tflearn.datasets.mnist as mnist

trainX, trainY, testX, testY = mnist.load_data(data_dir='/path/to/MNIST_data',
                                               one_hot=True)
trainX = trainX.reshape([-1, 784, 1])
testX = testX.reshape([-1, 784, 1])

net = input_data(shape=[None, 784, 1], name='input')
net = lstm(net, 128, activation='relu')
output = fully_connected(net, 10)

output = regression(output,
                    optimizer='Adam',
                    learning_rate=0.03,
                    loss='categorical_crossentropy')
model = tflearn.DNN(net, tensorboard_verbose=0)
model.fit(trainX,
          trainY,
          n_epoch=20,
          validation_set=([testX, trainY]),
          batch_size=128,
          show_metric=True)

score = model.evaluate(testX, testY)
print('Test loss: ', score[0])
print('Test accuracy: ', score[1])