Beispiel #1
0
    def create_critic_network(self, state_dim, action_dim):
        inputs = input_data(shape=state_dim)
        action = input_data(shape=[None, action_dim])

        net = conv_1d(inputs, 128, 2, 2)
        net = max_pool_1d(net, 2, 2, 'same')
        net = batch_normalization(net)

        net = conv_1d(net, 256, 2, 2)
        net = max_pool_1d(net, 2, 2, 'same')
        net = batch_normalization(net)

        print(net.get_shape().as_list())

        net = fully_connected(net, 1024, activation='relu')
        net = dropout(net, 0.8)
        net = fully_connected(net, 1024, activation='relu')

        # Add the action tensor in the 2nd hidden layer
        # Use two temp layers to get the corresponding weights and biases
        t1 = fully_connected(net, 2048)
        t2 = fully_connected(action, 2048)

        net = activation(tf.matmul(net, t1.W) + tf.matmul(action, t2.W) + t2.b,
                         activation='relu')

        # linear layer connected to 1 output representing Q(s,a)
        # Weights are init to Uniform[-3e-3, 3e-3]
        w_init = initializations.uniform(minval=-0.003, maxval=0.003)
        out = fully_connected(net, 1, weights_init=w_init)
        return inputs, action, out
Beispiel #2
0
    def create_network(self):
        # TODO try convolutional RNN and LSTM shapes
        net = tflearn.input_data(shape=[None, len(self.string_to_number)])

        # self.net = tflearn.fully_connected(self.net, 32)
        # self.net = tflearn.fully_connected(self.net, 64)

        # usually we need a 3D-Tensor
        net = tflearn.reshape(net, (-1, len(self.string_to_number), 1))

        # get the "good" parts
        net = tflearn.conv_1d(net, 64, 3, activation='relu', regularizer="L2")
        net = tflearn.max_pool_1d(net, 2)
        net = tflearn.batch_normalization(net)
        net = tflearn.conv_1d(net, 128, 3, activation='relu', regularizer="L2")
        net = tflearn.max_pool_1d(net, 2)
        net = tflearn.batch_normalization(net)

        # remember the meanings
        # net = tflearn.lstm(net, 64)
        # net = tflearn.dropout(net, 0.5)

        # net = tflearn.simple_rnn(net, len(self.string_to_number) * 2)

        # map to next value
        net = tflearn.fully_connected(net, 256, activation='tanh')
        net = tflearn.dropout(net, 0.5)
        net = tflearn.fully_connected(net,
                                      len(self.string_to_number),
                                      activation='softmax')
        self.net = tflearn.regression(net, optimizer='adam')
        self.model = tflearn.DNN(self.net)
Beispiel #3
0
    def input_transform_net(point_cloud, K):
        num_point = point_cloud.get_shape()[1].value

        net = tflearn.conv_1d(point_cloud,
                              nb_filter=64,
                              filter_size=1,
                              padding="valid",
                              strides=1,
                              activation="relu")
        net = tflearn.conv_1d(net,
                              nb_filter=128,
                              filter_size=1,
                              padding="valid",
                              strides=1,
                              activation="relu")
        net = tflearn.conv_1d(net,
                              nb_filter=256,
                              filter_size=1,
                              padding="valid",
                              strides=1,
                              activation="relu")
        net = tflearn.max_pool_1d(net, kernel_size=num_point, padding="valid")
        net = tflearn.fully_connected(net, 256, activation="relu")
        net = tflearn.fully_connected(net, 64, activation="relu")

        weights = tf.Variable(tf.zeros(shape=[64, K * K], dtype=tf.float32))
        biases = tf.Variable(
            tf.reshape(tf.eye(K, dtype=tf.float32), shape=[-1]))
        transform = tf.matmul(net, weights)
        transform = tf.nn.bias_add(transform, biases)
        transform = tf.reshape(transform, [-1, K, K])

        return transform
Beispiel #4
0
def build_audio_cnn_model():
    net = tf.input_data(shape=[None, 128, 128], name='Input')
    for l in [64, 64, 100]:
        net = tf.conv_1d(net, nb_filter=l, filter_size=3, activation='relu')
        net = tf.max_pool_1d(net, 2)

    net = tf.flatten(net)
    net = tf.fully_connected(net, 12, activation='relu')
    net = tf.fully_connected(net, 3, activation='softmax')
    net = tf.regression(net, learning_rate=0.001)
    model = tf.DNN(net)
    return model
Beispiel #5
0
    def create_actor_network(self, state_dim, action_dim):
        inputs = input_data(shape=state_dim)
        net = conv_1d(inputs, 128, 2, 2)
        net = max_pool_1d(net, 2, 2, 'same')
        net = batch_normalization(net)

        net = conv_1d(net, 256, 2, 2)
        net = max_pool_1d(net, 2, 2, 'same')
        net = batch_normalization(net)

        shape = net.get_shape().as_list()
        net = fully_connected(net, 1024, activation='relu', regularizer='L2')
        net = dropout(net, 0.8)
        net = fully_connected(net, 1024, activation='relu', regularizer='L2')

        # Final layer weights are init to Uniform[-3e-3, 3e-3]
        w_init = initializations.uniform(minval=-0.003, maxval=0.003)
        out = fully_connected(net,
                              action_dim,
                              activation='softmax',
                              weights_init=w_init)
        # Scale output to -action_bound to action_bound
        scaled_out = tf.multiply(out, self.action_bound)
        return inputs, out, scaled_out
  def create_model(self, l, tN, N=100000, d=10, K=5, H=1000, m=0.05, reuse=False):
    '''
    N = 1000000 (Paper)
    d = Unknown
    '''
    with tf.variable_scope('TagSpace', reuse=reuse):
      lr = tf.placeholder('float32', shape=[1], name='lr')
      doc = tf.placeholder('float32', shape=[None, l], name='doc')
      tag_flag = tf.placeholder('float32', shape=[None, tN], name='tag_flag')

      doc_embed = tflearn.embedding(doc, input_dim=N, output_dim=d)
      self.lt_embed = lt_embed = tf.Variable(tf.random_normal([tN, d], stddev=0.1))

      net = tflearn.conv_1d(doc_embed, K, H, activation='tanh')
      net = tflearn.max_pool_1d(net, l)
      net = tflearn.tanh(net)
      self.logit = logit = tflearn.fully_connected(net, d, activation=None)

      zero_vector = tf.zeros(shape=(1,1), dtype=tf.float32)

      logit = tf.expand_dims(logit, 1)
      logit_set = tf.concat([logit for i in range(tN)], axis=1)

      tag_flag_ex = tf.expand_dims(tag_flag, 2)
      tg = tf.concat([tag_flag_ex for i in range(d)], axis=2)

      self.tag_logit = tf.reduce_sum(tf.multiply(logit_set, tf.multiply(tf.ones_like(tg), lt_embed)), axis=2)

      self.positive_logit = positive_logit = tf.reduce_sum(tf.multiply(logit_set, tf.multiply(tg, lt_embed)), axis=2)
      with tf.device('/cpu:0'):
        self.f_positive = f_positive = tf.map_fn(lambda x: (tf.boolean_mask(x[0], x[1]), True), (positive_logit, tf.not_equal(positive_logit, zero_vector)))
      positive = tf.reduce_min(f_positive[0], axis=1)
      self.positive = positive

      tag_flag_ex = tf.expand_dims(1-tag_flag, 2)
      tg = tf.concat([tag_flag_ex for i in range(d)], axis=2)
      negative_logit = tf.reduce_sum(tf.multiply(logit_set, tf.multiply(tg, lt_embed)), axis=2)

      with tf.device('/cpu:0'):
        self.f_negative = f_negative = tf.map_fn(lambda x: (tf.boolean_mask(x[0], x[1]), True), (negative_logit, tf.not_equal(negative_logit, zero_vector)))
      self.negative = negative = tf.reduce_max(f_negative[0], axis=1)

      self.f_loss = f_loss = tf.reduce_mean(tf.reduce_max([tf.reduce_min([tf.expand_dims(m - positive + negative,1), tf.expand_dims(tf.fill([tf.shape(doc)[0]], 10e7),1)], axis=0), tf.zeros([tf.shape(doc)[0], 1])], axis=0))

      opt = tf.train.AdamOptimizer(learning_rate=lr[0])
      self.op = opt.minimize(f_loss)
def simple_cnn():
    input_layer = tf.input_data(shape=[None, 5, 19])
    model = tf.conv_1d(input_layer,
                       256,
                       4,
                       padding='valid',
                       activation='sigmoid',
                       regularizer='L2')
    model = tf.max_pool_1d(model, kernel_size=4)
    model = tf.dropout(model, 0.7)
    model = tf.fully_connected(model, 11, activation='sigmoid')

    sgd = tf.SGD(learning_rate=0.01, lr_decay=0.96, decay_step=32000)
    model = tf.regression(model,
                          optimizer=sgd,
                          loss='categorical_crossentropy')

    return tf.DNN(model)
Beispiel #8
0
	names = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 'Nan']

	for i in mod.predict(X):
		h = []
		for x in zip(i, names):
			h.append(x)

		pp = sorted(h, key=boop)[-1]

	return pp

n_classes = 11

net = tfl.input_data([None, 34, 30])
net = tfl.conv_1d(net, 25, 5, activation='relu')
net = tfl.max_pool_1d(net, 2)
net = tfl.dropout(net, 0.8)
net = tfl.conv_1d(net, 35, 3, activation='relu')
net = tfl.max_pool_1d(net, 2)
net = tfl.dropout(net, 0.75)
net = tfl.fully_connected(net, 128, activation='relu')
net = tfl.fully_connected(net, 50, activation='relu')
net = tfl.fully_connected(net, n_classes, activation='softmax')

reg = tfl.regression(net, optimizer='adam', loss='categorical_crossentropy', learning_rate=0.00003)

mod = tfl.DNN(reg, tensorboard_verbose=0)

mod.load('conv_nn8.1')

cum = cv2.VideoCapture(0)
Beispiel #9
0
from LoadData import loadData

X, Y = loadData('./Train', spectrogram_step=15)
X_test, Y_test = loadData('./Test', spectrogram_step=15)

import tflearn
import os

os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'

# Building Residual Network

# | Adam | epoch: 040 | loss: 0.03133 - acc: 0.9901 | val_loss: 0.06419 - val_acc: 0.9819 -- iter: 5206/5206
net = tflearn.input_data(shape=[None, 121, 35])

net = tflearn.max_pool_1d(net, 4)
net = tflearn.conv_1d(net, 128, 6, activation='relu')
net = tflearn.max_pool_1d(net, 2)
net = tflearn.conv_1d(net, 128, 3, activation='relu')
net = tflearn.avg_pool_1d(net, 2)

net = tflearn.fully_connected(net, 128, activation='relu')
net = tflearn.dropout(net, 0.7)

net = tflearn.fully_connected(net, 14, activation='softmax')

net = tflearn.regression(net,
                         optimizer='adam',
                         loss='categorical_crossentropy',
                         learning_rate=0.005)
Beispiel #10
0
def create(bs):
    def input_transform_net(point_cloud, K):
        num_point = point_cloud.get_shape()[1].value

        net = tflearn.conv_1d(point_cloud,
                              nb_filter=64,
                              filter_size=1,
                              padding="valid",
                              strides=1,
                              activation="relu")
        net = tflearn.conv_1d(net,
                              nb_filter=128,
                              filter_size=1,
                              padding="valid",
                              strides=1,
                              activation="relu")
        net = tflearn.conv_1d(net,
                              nb_filter=256,
                              filter_size=1,
                              padding="valid",
                              strides=1,
                              activation="relu")
        net = tflearn.max_pool_1d(net, kernel_size=num_point, padding="valid")
        net = tflearn.fully_connected(net, 256, activation="relu")
        net = tflearn.fully_connected(net, 64, activation="relu")

        weights = tf.Variable(tf.zeros(shape=[64, K * K], dtype=tf.float32))
        biases = tf.Variable(
            tf.reshape(tf.eye(K, dtype=tf.float32), shape=[-1]))
        transform = tf.matmul(net, weights)
        transform = tf.nn.bias_add(transform, biases)
        transform = tf.reshape(transform, [-1, K, K])

        return transform

    net = tflearn.input_data(shape=[None, 57, 8])
    net_p = net[:, :, :3]
    net_m = net[:, :, 3:]
    transform = input_transform_net(net_p, K=3)
    net = tf.matmul(net_p, transform)

    net = tflearn.conv_1d(net,
                          nb_filter=32,
                          filter_size=1,
                          activation="relu",
                          strides=1,
                          padding="valid")
    net = tflearn.conv_1d(net,
                          nb_filter=64,
                          filter_size=1,
                          activation="relu",
                          strides=1,
                          padding="valid")
    transform = input_transform_net(net, K=64)
    net = tf.matmul(net, transform)
    net = tf.concat((net, net_m), axis=2)

    net = tflearn.conv_1d(net,
                          nb_filter=128,
                          filter_size=1,
                          activation="relu",
                          strides=1,
                          padding="valid")
    net = tflearn.conv_1d(net,
                          nb_filter=256,
                          filter_size=1,
                          activation="relu",
                          strides=1,
                          padding="valid")
    net = tflearn.max_pool_1d(net, kernel_size=56, padding="valid")
    net = tflearn.fully_connected(net, 256, activation="relu")
    net = tflearn.fully_connected(net, 64, activation="relu")
    net = tflearn.fully_connected(net, 2, activation="softmax")
    net = tflearn.regression(net,
                             optimizer='adam',
                             loss='categorical_crossentropy',
                             batch_size=bs)
    return net
Beispiel #11
0
def create_model(num_vocab,
                 train=False,
                 embedding_matrix=None,
                 model_path='./data/model_final/model_saved.ckpt'):
    '''
        	Creates model with two parallel branches: one with LSTM and another with CNN.
        INPUT:
            num_vocab: number of words in features
            train: if true, train the model from scratch. If false, load the pretrained model. DEFAULT: False
            embedding_matrix: embedding matrix to use with the Neural Net. DEFAULT: None
    		model_path: If train is false, path to the final model. DEFAULT: './data/model_final/model_saved.ckpt'
        OUTPUT:
            model
    	
    '''
    print('initializing the model, will take around 5 mins')

    #Initialize the model
    tensorflow.reset_default_graph()

    #Input data with padded samples until 100
    net = tf.input_data([None, 100], name='input_layer')

    #If not training, initalize with whatever
    if (embedding_matrix == None):
        embedding_matrix = np.random.random((num_vocab, 300))

    ####### CNN #########
    #Initalize weights
    W1 = tensorflow.constant_initializer(embedding_matrix)

    #Embedding layer of size 300
    net1 = tf.embedding(net,
                        input_dim=num_vocab,
                        output_dim=300,
                        weights_init=W1,
                        name='embedded_layer')
    #Convolutional layer with window size = 3
    net1 = tf.conv_1d(net1, 2, 3, activation='relu', name='conv_layer')
    #Select the most representative of the 3 neighbots
    net1 = tf.max_pool_1d(net1, 3, strides=1, name='max_pool_layer')

    #Add 0.8 (keep) dropout for overfitting
    net1 = tf.dropout(net1, 0.8, name='conv_dropout_layer')
    net1 = tf.flatten(net1, name='flatter_conv_layer')

    #Fully connected layer with ReLu
    net1 = tf.fully_connected(net1, 150, activation='relu', name='first_fc')

    ####### LSTM #########
    #Initialize weigths
    W2 = tensorflow.constant_initializer(embedding_matrix)
    #Embedding layer of size 300
    net2 = tf.embedding(net,
                        input_dim=num_vocab,
                        output_dim=300,
                        weights_init=W1,
                        name='embedded_layer2')
    #LSTM with 0.8 dropout and 256 hidden cells
    net2 = tf.lstm(net2, 256, dropout=0.8, name='LSTM_layer', return_seq=True)

    #If a list is returned (error with return_seq), stack them into a 3D matrix
    if isinstance(net2, list):
        net2 = tensorflow.stack(net2, axis=1, name='stack')

    #Fully connected layer with ReLu
    net2 = tf.fully_connected(net2, 200, activation='relu', name='second_fc')

    ###### MERGE LAYER ######
    #Concatenate results from both branches
    net_final = tensorflow.concat([net1, net2], 1, name="concat")

    #Fully connected layer with ReLu
    net_final = tf.fully_connected(net_final, 128, activation='relu')

    #Dropout of 0.8
    net_final = tf.dropout(net_final, 0.8, name='merge')

    ###### OUTPUT  LAYER ######
    #Final layer with softmax
    net_final = tf.fully_connected(net_final,
                                   2,
                                   activation='softmax',
                                   name='output')
    #Backpropagation with Adam and output with categorical_crossentropy: two columns each with the probability of smile or not
    net_final = tf.regression(net_final,
                              optimizer='adam',
                              learning_rate=0.001,
                              loss='categorical_crossentropy')

    model = tf.DNN(net_final,
                   tensorboard_verbose=0,
                   checkpoint_path=None,
                   tensorboard_dir='./data/')

    #if not training new model then load old saved model.
    if (not train):
        model.load(model_path)

    return model
Beispiel #12
0
l = word_pad_length
tN = tag_size
N=100000
d=10
K=5
H=1000
m=0.05
lr = tf.placeholder('float32', shape=[1], name='lr')
doc = tf.placeholder('float32', shape=[None, l], name='doc')
tag_flag = tf.placeholder('float32', shape=[None, tN], name='tag_flag')

doc_embed = tflearn.embedding(doc, input_dim=N, output_dim=d)
lt_embed = tf.Variable(tf.random_normal([tN, d], stddev=0.1))
net = tflearn.conv_1d(doc_embed, H, K, activation='tanh')
net = tflearn.max_pool_1d(net, K)
net = tflearn.tanh(net)
logit = tflearn.fully_connected(net, d, activation=None)

zero_vector = tf.zeros(shape=(1,1), dtype=tf.float32)

logit = tf.expand_dims(logit, 1)
logit_set = tf.concat([logit for i in range(tN)], axis=1)

tag_flag_ex = tf.expand_dims(tag_flag, 2)
tg = tf.concat([tag_flag_ex for i in range(d)], axis=2)

tag_logit = tf.reduce_sum(tf.multiply(logit_set, tf.multiply(tf.ones_like(tg), lt_embed)), axis=2)

positive_logit = tf.reduce_sum(tf.multiply(logit_set, tf.multiply(tg, lt_embed)), axis=2)
random_sample = tf.random_uniform([batch_size],minval=0,maxval=1,dtype=tf.float32)