def build_network():
    network = tflearn.input_data(shape=[None, 2])
    network = tflearn.fully_connected(network, 64, activation='relu', regularizer='L2', weight_decay=0.001)
    network = tflearn.fully_connected(network, 1, activation='sigmoid')
    network = tflearn.regression(network, optimizer='sgd', learning_rate=0.3,
                           loss='mean_square')
    return network
Example #2
0
def use_tflearn():
    import tflearn

    # Data loading and preprocessing
    import tflearn.datasets.mnist as mnist
    X, Y, testX, testY = mnist.load_data(one_hot=True)

    # Building deep neural network
    input_layer = tflearn.input_data(shape=[None, 784])
    dense1 = tflearn.fully_connected(input_layer, 64, activation='tanh',
                                     regularizer='L2', weight_decay=0.001)
    dropout1 = tflearn.dropout(dense1, 0.8)
    dense2 = tflearn.fully_connected(dropout1, 64, activation='tanh',
                                     regularizer='L2', weight_decay=0.001)
    dropout2 = tflearn.dropout(dense2, 0.8)
    softmax = tflearn.fully_connected(dropout2, 10, activation='softmax')

    # Regression using SGD with learning rate decay and Top-3 accuracy
    sgd = tflearn.SGD(learning_rate=0.1, lr_decay=0.96, decay_step=1000)
    top_k = tflearn.metrics.Top_k(3)
    net = tflearn.regression(softmax, optimizer=sgd, metric=top_k,
                             loss='categorical_crossentropy')

    # Training
    model = tflearn.DNN(net, tensorboard_verbose=0)
    model.fit(X, Y, n_epoch=20, validation_set=(testX, testY),
              show_metric=True, run_id="dense_model")
Example #3
0
    def __init__(self, s_date, n_frame):
        self.n_epoch = 20
        prev_bd = int(s_date[:6])-1
        prev_ed = int(s_date[9:15])-1
        if prev_bd%100 == 0: prev_bd -= 98
        if prev_ed%100 == 0: prev_ed -= 98
        pred_s_date = "%d01_%d01" % (prev_bd, prev_ed)
        prev_model = '../model/tflearn/reg_l3_bn/big/%s' % pred_s_date
        self.model_dir = '../model/tflearn/reg_l3_bn/big/%s' % s_date

        tf.reset_default_graph()
        tflearn.init_graph(gpu_memory_fraction=0.1)
        input_layer = tflearn.input_data(shape=[None, 23*n_frame], name='input')
        dense1 = tflearn.fully_connected(input_layer, 400, name='dense1', activation='relu')
        dense1n = tflearn.batch_normalization(dense1, name='BN1')
        dense2 = tflearn.fully_connected(dense1n, 100, name='dense2', activation='relu')
        dense2n = tflearn.batch_normalization(dense2, name='BN2')
        dense3 = tflearn.fully_connected(dense2n, 1, name='dense3')
        output = tflearn.single_unit(dense3)
        regression = tflearn.regression(output, optimizer='adam', loss='mean_square',
                                metric='R2', learning_rate=0.001)
        self.estimators = tflearn.DNN(regression)
        if os.path.exists('%s/model.tfl' % prev_model):
            self.estimators.load('%s/model.tfl' % prev_model)
            self.n_epoch = 10
        if not os.path.exists(self.model_dir):
            os.makedirs(self.model_dir)
Example #4
0
    def build_cnn_network(self, network):
        """ Build CNN network.

        Args:
            network: base network.

        Returns:
            model: CNN model.

        """
        print('Building CNN network.')
        # Convolutional network building
        network = tflearn.conv_2d(network, 32,
                            self.IMAGE_CHANNEL_NUM,
                          activation='relu')
        network = tflearn.max_pool_2d(network, 2)
        network = tflearn.conv_2d(network, 64,
                          self.IMAGE_CHANNEL_NUM,
                          activation='relu')
        network = tflearn.conv_2d(network, 64,
                          self.IMAGE_CHANNEL_NUM,
                          activation='relu')
        network = tflearn.max_pool_2d(network, 2)
        network = tflearn.fully_connected(
            network, 32 * 32, activation='relu')
        network = tflearn.dropout(network, 0.5)
        # Two category. positive or negative.
        network = tflearn.fully_connected(network, 2,
                                  activation='softmax')
        network = tflearn.regression(network, optimizer='adam',
                             loss='categorical_crossentropy',
                             learning_rate=0.001)
        print("CNN network built.")
        return network
Example #5
0
def create_nips_network(input_tensor, output_num):
    l_hid1 = tflearn.conv_2d(input_tensor, 16, 8, strides=4, activation='relu', scope='conv1', padding='valid')
    l_hid2 = tflearn.conv_2d(l_hid1, 32, 4, strides=2, activation='relu', scope='conv2', padding='valid')
    l_hid3 = tflearn.fully_connected(l_hid2, 256, activation='relu', scope='dense3')
    out = tflearn.fully_connected(l_hid3, output_num, scope='denseout')

    return out
def create_a3c_lstm_network(input_tensor, output_num):
    l_hid1 = tflearn.conv_2d(input_tensor, 16, 8, strides=4, activation='relu', scope='conv1', padding='valid')
    l_hid2 = tflearn.conv_2d(l_hid1, 32, 4, strides=2, activation='relu', scope='conv2', padding='valid')
    l_hid3 = tflearn.fully_connected(l_hid2, 256, activation='relu', scope='dense3')

    # reshape l_hid3 to lstm usable shape (1, batch_size, 256)
    l_hid3_reshape = tf.reshape(l_hid3, [1, -1, 256])

    # have to custom make the lstm output here to use tf.nn.dynamic_rnn
    l_lstm = tflearn.BasicLSTMCell(256)
    # BasicLSTMCell lists state size as tuple so we need to pass tuple into dynamic_rnn
    lstm_state_size = tuple([[1, x] for x in l_lstm.state_size])
    # has to specifically be the same type tf.python.ops.rnn_cell.LSTMStateTuple
    from tensorflow.python.ops.nn import rnn_cell as _rnn_cell
    initial_lstm_state = _rnn_cell.LSTMStateTuple(tf.placeholder(tf.float32, shape=lstm_state_size[0], name='initial_lstm_state1'),
                                                  tf.placeholder(tf.float32, shape=lstm_state_size[1], name='initial_lstm_state2'))
    # dynamically get the sequence length
    sequence_length = tf.reshape(tf.shape(l_hid3)[0], [1])
    l_lstm4, new_lstm_state = tf.nn.dynamic_rnn(l_lstm, l_hid3_reshape,
                                                initial_state=initial_lstm_state, sequence_length=sequence_length,
                                                time_major=False, scope='lstm4')

    # reshape lstm back to (batch_size, 256)
    l_lstm4_reshape = tf.reshape(l_lstm4, [-1, 256])
    actor_out = tflearn.fully_connected(l_lstm4_reshape, output_num, activation='softmax', scope='actorout')
    critic_out = tflearn.fully_connected(l_lstm4_reshape, 1, activation='linear', scope='criticout')

    return actor_out, critic_out, initial_lstm_state, new_lstm_state
    def deep_model(self, wide_inputs, n_inputs, n_nodes=[100, 50], use_dropout=False):
        '''
        Model - deep, i.e. two-layer fully connected network model
        '''
        cc_input_var = {}
        cc_embed_var = {}
        flat_vars = []
        if self.verbose:
            print ("--> deep model: %s categories, %d continuous" % (len(self.categorical_columns), n_inputs))
        for cc, cc_size in self.categorical_columns.items():
            cc_input_var[cc] = tflearn.input_data(shape=[None, 1], name="%s_in" % cc,  dtype=tf.int32)
            # embedding layers only work on CPU!  No GPU implementation in tensorflow, yet!
            cc_embed_var[cc] = tflearn.layers.embedding_ops.embedding(cc_input_var[cc],    cc_size,  8, name="deep_%s_embed" % cc)
            if self.verbose:
                print ("    %s_embed = %s" % (cc, cc_embed_var[cc]))
            flat_vars.append(tf.squeeze(cc_embed_var[cc], squeeze_dims=[1], name="%s_squeeze" % cc))

        network = tf.concat(1, [wide_inputs] + flat_vars, name="deep_concat")
        for k in range(len(n_nodes)):
            network = tflearn.fully_connected(network, n_nodes[k], activation="relu", name="deep_fc%d" % (k+1))
            if use_dropout:
                network = tflearn.dropout(network, 0.5, name="deep_dropout%d" % (k+1))
        if self.verbose:
            print ("Deep model network before output %s" % network)
        network = tflearn.fully_connected(network, 1, activation="linear", name="deep_fc_output", bias=False)
        network = tf.reshape(network, [-1, 1])	# so that accuracy is binary_accuracy
        if self.verbose:
            print ("Deep model network %s" % network)
        return network
Example #8
0
def yn_net():
    net = tflearn.input_data(shape=[None, img_rows, img_cols, 1]) #D = 256, 256
    net = tflearn.conv_2d(net,nb_filter=8,filter_size=3, activation='relu', name='conv0.1')
    net = tflearn.conv_2d(net,nb_filter=8,filter_size=3, activation='relu', name='conv0.2')
    net = tflearn.max_pool_2d(net, kernel_size = [2,2], name='maxpool0') #D = 128, 128
    net = tflearn.dropout(net,0.75,name='dropout0')
    net = tflearn.conv_2d(net,nb_filter=16,filter_size=3, activation='relu', name='conv1.1')
    net = tflearn.conv_2d(net,nb_filter=16,filter_size=3, activation='relu', name='conv1.2')
    net = tflearn.max_pool_2d(net, kernel_size = [2,2], name='maxpool1') #D = 64,  64
    net = tflearn.dropout(net,0.75,name='dropout0')
    net = tflearn.conv_2d(net,nb_filter=32,filter_size=3, activation='relu', name='conv2.1')
    net = tflearn.conv_2d(net,nb_filter=32,filter_size=3, activation='relu', name='conv2.2')
    net = tflearn.max_pool_2d(net, kernel_size = [2,2], name='maxpool2') #D = 32 by 32
    net = tflearn.dropout(net,0.75,name='dropout0')
    net = tflearn.conv_2d(net,nb_filter=32,filter_size=3, activation='relu', name='conv3.1')
    net = tflearn.conv_2d(net,nb_filter=32,filter_size=3, activation='relu', name='conv3.2')
    net = tflearn.max_pool_2d(net, kernel_size = [2,2], name='maxpool3') #D = 16 by 16
    net = tflearn.dropout(net,0.75,name='dropout0')
#    net = tflearn.conv_2d(net,nb_filter=64,filter_size=3, activation='relu', name='conv4.1')
#    net = tflearn.conv_2d(net,nb_filter=64,filter_size=3, activation='relu', name='conv4.2')
#    net = tflearn.max_pool_2d(net, kernel_size = [2,2], name='maxpool4') #D = 8 by 8
#    net = tflearn.dropout(net,0.75,name='dropout0')
    net = tflearn.fully_connected(net, n_units = 128, activation='relu', name='fc1')
    net = tflearn.fully_connected(net, 2, activation='sigmoid')
    net = tflearn.regression(net, optimizer='adam', learning_rate=0.001)
    model = tflearn.DNN(net, tensorboard_verbose=1,tensorboard_dir='/tmp/tflearn_logs/')
    return model
def vgg16(input, num_class):

    x = tflearn.conv_2d(input, 64, 3, activation='relu', scope='conv1_1')
    x = tflearn.conv_2d(x, 64, 3, activation='relu', scope='conv1_2')
    x = tflearn.max_pool_2d(x, 2, strides=2, name='maxpool1')

    x = tflearn.conv_2d(x, 128, 3, activation='relu', scope='conv2_1')
    x = tflearn.conv_2d(x, 128, 3, activation='relu', scope='conv2_2')
    x = tflearn.max_pool_2d(x, 2, strides=2, name='maxpool2')

    x = tflearn.conv_2d(x, 256, 3, activation='relu', scope='conv3_1')
    x = tflearn.conv_2d(x, 256, 3, activation='relu', scope='conv3_2')
    x = tflearn.conv_2d(x, 256, 3, activation='relu', scope='conv3_3')
    x = tflearn.max_pool_2d(x, 2, strides=2, name='maxpool3')

    x = tflearn.conv_2d(x, 512, 3, activation='relu', scope='conv4_1')
    x = tflearn.conv_2d(x, 512, 3, activation='relu', scope='conv4_2')
    x = tflearn.conv_2d(x, 512, 3, activation='relu', scope='conv4_3')
    x = tflearn.max_pool_2d(x, 2, strides=2, name='maxpool4')

    x = tflearn.conv_2d(x, 512, 3, activation='relu', scope='conv5_1')
    x = tflearn.conv_2d(x, 512, 3, activation='relu', scope='conv5_2')
    x = tflearn.conv_2d(x, 512, 3, activation='relu', scope='conv5_3')
    x = tflearn.max_pool_2d(x, 2, strides=2, name='maxpool5')

    x = tflearn.fully_connected(x, 4096, activation='relu', scope='fc6')
    x = tflearn.dropout(x, 0.5, name='dropout1')

    x = tflearn.fully_connected(x, 4096, activation='relu', scope='fc7')
    x = tflearn.dropout(x, 0.5, name='dropout2')

    x = tflearn.fully_connected(x, num_class, activation='softmax', scope='fc8',
                                restore=False)

    return x
def model_for_type(neural_net_type, tile_size, on_band_count):
    """The neural_net_type can be: one_layer_relu,
                                   one_layer_relu_conv,
                                   two_layer_relu_conv."""
    network = tflearn.input_data(shape=[None, tile_size, tile_size, on_band_count])

    # NN architectures mirror ch. 3 of www.cs.toronto.edu/~vmnih/docs/Mnih_Volodymyr_PhD_Thesis.pdf
    if neural_net_type == "one_layer_relu":
        network = tflearn.fully_connected(network, 64, activation="relu")
    elif neural_net_type == "one_layer_relu_conv":
        network = conv_2d(network, 64, 12, strides=4, activation="relu")
        network = max_pool_2d(network, 3)
    elif neural_net_type == "two_layer_relu_conv":
        network = conv_2d(network, 64, 12, strides=4, activation="relu")
        network = max_pool_2d(network, 3)
        network = conv_2d(network, 128, 4, activation="relu")
    else:
        print("ERROR: exiting, unknown layer type for neural net")

    # classify as road or not road
    softmax = tflearn.fully_connected(network, 2, activation="softmax")

    # hyperparameters based on www.cs.toronto.edu/~vmnih/docs/Mnih_Volodymyr_PhD_Thesis.pdf
    momentum = tflearn.optimizers.Momentum(learning_rate=0.005, momentum=0.9, lr_decay=0.0002, name="Momentum")

    net = tflearn.regression(softmax, optimizer=momentum, loss="categorical_crossentropy")

    return tflearn.DNN(net, tensorboard_verbose=0)
Example #11
0
def create_a3c_network(input_tensor, output_num):
    l_hid1 = tflearn.conv_2d(input_tensor, 16, 8, strides=4, activation='relu', padding='valid', scope='conv1')
    l_hid2 = tflearn.conv_2d(l_hid1, 32, 4, strides=2, activation='relu', padding='valid', scope='conv2')
    l_hid3 = tflearn.fully_connected(l_hid2, 256, activation='relu', scope='dense3')
    actor_out = tflearn.fully_connected(l_hid3, output_num, activation='softmax', scope='actorout')
    critic_out = tflearn.fully_connected(l_hid3, 1, activation='linear', scope='criticout')

    return actor_out, critic_out
Example #12
0
File: ddpg.py Project: ataitler/DQN
 def create_actor_network(self): 
     inputs = tflearn.input_data(shape=[None, self.s_dim])
     net = tflearn.fully_connected(inputs, 400, activation='relu')
     net = tflearn.fully_connected(net, 300, activation='relu')
     # Final layer weights are init to Uniform[-3e-3, 3e-3]
     w_init = tflearn.initializations.uniform(minval=-0.003, maxval=0.003)
     out = tflearn.fully_connected(net, self.a_dim, activation='tanh', weights_init=w_init)
     scaled_out = tf.mul(out, self.action_bound) # Scale output to -action_bound to action_bound
     return inputs, out, scaled_out 
Example #13
0
def define_dnn_topology(input_num, first_layer, second_layer):
    tf.Graph().as_default()
    g = tflearn.input_data(shape=[None, input_num])
    g = tflearn.fully_connected(g, first_layer, activation='linear')
    g = tflearn.fully_connected(g, second_layer, activation='linear')
    g = tflearn.fully_connected(g, 1, activation='sigmoid')
    g = tflearn.regression(g, optimizer='sgd', learning_rate=2., loss='mean_square')
    tf.Graph().finalize() 
    return g 
 def make_core_network(network):
     dense1 = tflearn.fully_connected(network, 64, activation='tanh',
                                      regularizer='L2', weight_decay=0.001, name="dense1")
     dropout1 = tflearn.dropout(dense1, 0.8)
     dense2 = tflearn.fully_connected(dropout1, 64, activation='tanh',
                                      regularizer='L2', weight_decay=0.001, name="dense2")
     dropout2 = tflearn.dropout(dense2, 0.8)
     softmax = tflearn.fully_connected(dropout2, 10, activation='softmax', name="softmax")
     return softmax
Example #15
0
 def simple_learn(self):
     tflearn.init_graph()
     net=tflearn.input_data(shape=[None,64,64,3])
     net=tflearn.fully_connected(net,64)
     net=tflearn.dropout(net,.5)
     net=tflearn.fully_connected(net,10,activation='softmax')
     net=tflearn.regression(net,optimizer='adam',loss='softmax_categorical_crossentropy')
     model = tflearn.DNN(net)
     model.fit(self.trainset,self.trainlabels)
Example #16
0
    def __init__(self,cluster,env,task_index,learning_rate=0.001):
        ''' Set-up network '''
        action_dim, discrete = check_action_space(env) # detect action space
        with tf.device(tf.train.replica_device_setter(worker_device="/job:worker/task:{}".format(task_index),cluster=cluster)):
            import tflearn # need to import within the tf.device statement for the tflearn.is_training variable to be shared !
            #tflearn.init_graph()
            #training = tf.get_variable(tflearn.get_training_mode().name,initializer=False)            
            #tf.get_variable(                  
            # Placeholders
            self.s = tf.placeholder("float32",np.array(np.append(None,env.observation_shape)))
            self.A = tf.placeholder("float32", (None,))
            self.V = tf.placeholder("float32", (None,))
            if discrete:
                self.a = tf.placeholder("int32", (None,)) # discrete action space
                self.a_one_hot = tf.one_hot(self.a,action_dim)
            else:
                self.a = tf.placeholder("float32", np.append(None,action_dim)) # continuous action space
            
            # Network
            ff = encoder_s(self.s,scope='encoder',reuse=False)
            self.p_out = tflearn.fully_connected(ff, n_units=action_dim, activation='softmax')
            self.v_out = tflearn.fully_connected(ff, n_units=1, activation='linear')
            
            ##### A3C #######        
            # Compute log_pi       
            log_probs = tf.log(tf.clip_by_value(self.p_out,1e-20,1.0)) # log pi
            if discrete:
                log_pi_given_a = tf.reduce_sum(log_probs * self.a_one_hot,reduction_indices=1)
            else: 
                raise(NotImplementedError)

            # Losses
            p_loss = -1*log_pi_given_a * self.A            
            entropy_loss = -1*tf.reduce_sum(self.p_out * log_probs,reduction_indices=1,name="entropy_loss") # policy entropy            
            v_loss = tf.nn.l2_loss(self.V - self.v_out, name="v_loss")
            loss1 = tf.add(p_loss,0.01*entropy_loss)
            self.loss = tf.add(loss1,v_loss)
    
            # Trainer
            optimizer = tf.train.AdamOptimizer(learning_rate)
            self.trainer = optimizer.minimize(self.loss)
                   
            # Global counter 
            #self.global_step = tf.get_variable('global_step', [], 
            #                    initializer = tf.constant_initializer(0), 
            #                    trainable = False)
            #self.global_step = tf.Variable(0)     
            #self.step_op = tf.Variable(0, trainable=False, name='step')
            #self.step_t = tf.placeholder("int32",(1,))
            #self.step_inc_op = self.step_op.assign_add(tf.squeeze(self.step_t), use_locking=True)
            
            # other stuff
            self.summary_placeholders, self.update_ops, self.summary_op = setup_summaries() # Summary operations
            self.saver = tf.train.Saver(max_to_keep=10)
            self.init_op = tf.initialize_all_variables()
            print('network initialized')
Example #17
0
def discriminator(x, reuse=False):
    with tf.variable_scope('Discriminator', reuse=reuse):
        x = tflearn.conv_2d(x, 64, 5, activation='tanh')
        x = tflearn.avg_pool_2d(x, 2)
        x = tflearn.conv_2d(x, 128, 5, activation='tanh')
        x = tflearn.avg_pool_2d(x, 2)
        x = tflearn.fully_connected(x, 1024, activation='tanh')
        x = tflearn.fully_connected(x, 2)
        x = tf.nn.softmax(x)
        return x
def build_network():
    network = tflearn.input_data(shape=[None, 2])
    network = tflearn.fully_connected(network, 64, activation='relu')
    network = dropout(network, 0.9)
    network = tflearn.fully_connected(network, 128, activation='relu')
    network = dropout(network, 0.9)
    network = tflearn.fully_connected(network, 2, activation='softmax')
    network = tflearn.regression(network, optimizer='sgd', learning_rate=0.1,
                           loss='categorical_crossentropy')
    return network
 def _build_Q_network(self):
     trainable_params_start = len(tf.trainable_variables())
     inputs = tf.placeholder(tf.float32, [None, FLAGS.agent_history_length, self.dimensions["network_in_y"], self.dimensions["network_in_x"]])
     transposed_input = tf.transpose(inputs, [0, 2, 3, 1])
     conv1 = tflearn.conv_2d(transposed_input, 32, 8, strides=4, activation='relu')
     conv2 = tflearn.conv_2d(conv1, 64, 4, strides=2, activation='relu')
     conv3 = tflearn.conv_2d(conv2, 128, 3, strides=1, activation='relu')
     flatten = tflearn.fully_connected(conv2, 512, activation='relu')
     softmax = tflearn.fully_connected(flatten, self.num_actions)
     softmax = tf.div(softmax, tf.reduce_sum(softmax, reduction_indices=1))
     argmax = tf.argmax(softmax, dimension=1)
     return inputs, softmax, tf.trainable_variables()[trainable_params_start:], argmax
Example #20
0
def run_mnist():
    X, Y, testX, testY = mnist.load_data(one_hot=True)
    g = tflearn.input_data(shape=[None, 784], name='input')
    g = tflearn.fully_connected(g, 128, name='dense1')
    g = tflearn.fully_connected(g, 256, name='dense2')
    g = tflearn.fully_connected(g, 10, activation='softmax', name='softmax')
    g = tflearn.regression(
        g, optimizer='adam',
        learning_rate=0.001,
        loss='categorical_crossentropy')

    if not os.path.isdir('models'):
        os.mkdir('models')
    m = tflearn.DNN(g, checkpoint_path='models/model.tfl.ckpt')
    m.fit(X, Y, n_epoch=1,
          validation_set=(testX, testY),
          show_metric=True,
          # Snapshot (save & evaluate) model every epoch.
          snapshot_epoch=True,
          # Snapshot (save & evalaute) model every 500 steps.
          snapshot_step=500,
          run_id='model_and_weights')
    m.save('models/mnist.tfl')

    # # load from file or ckpt and continue training
    # m.load('models/mnist.tfl')
    # # m.load('models/mnist.tfl.ckpt-500')
    # m.fit(X, Y, n_epoch=1,
    #       validation_set=(testX, testY),
    #       show_metric=True,
    #       # Snapshot (save & evaluate) model every epoch.
    #       snapshot_epoch=True,
    #       # Snapshot (save & evalaute) model every 500 steps.
    #       snapshot_step=500,
    #       run_id='model_and_weights')

    # retrieve layer by name, print weights
    dense1_vars = tflearn.variables.get_layer_variables_by_name('dense1')
    print('Dense1 layer weights:')
    print(m.get_weights(dense1_vars[0]))
    # or using generic tflearn function
    print('Dense1 layer biases:')
    with m.session.as_default():
        print(tflearn.variables.get_value(dense1_vars[1]))

    # or can even retrieve using attr `W` or `b`!
    print('Dense2 layer weights:')
    dense2 = tflearn.get_layer_by_name('dense2')
    print(dense2)
    print(m.get_weights(dense2.W))
    print('Dense2 layer biases:')
    with m.session.as_default():
        print(tflearn.variables.get_value(dense2.b))
Example #21
0
def run_XOR():
    X = [[0., 0.], [0., 1.], [1., 0.], [1., 1.]]
    Y = [[0.], [1.], [1.], [0.]]

    g = tflearn.input_data(shape=[None, 2])
    g = tflearn.fully_connected(g, 128, activation='linear')
    g = tflearn.fully_connected(g, 128, activation='linear')
    g = tflearn.fully_connected(g, 1, activation='sigmoid')
    g = tflearn.regression(
        g, optimizer='sgd', learning_rate=2., loss='mean_square')

    train_model(g, X, Y)
def build_dqn(num_actions, action_repeat):
    """
    Building a DQN.
    """
    inputs = tf.placeholder(tf.float32, [None, action_repeat, 84, 84])
    # Inputs shape: [batch, channel, height, width] need to be changed into
    # shape [batch, height, width, channel]
    net = tf.transpose(inputs, [0, 2, 3, 1])
    net = tflearn.conv_2d(net, 32, 8, strides=4, activation='relu')
    net = tflearn.conv_2d(net, 64, 4, strides=2, activation='relu')
    net = tflearn.fully_connected(net, 256, activation='relu')
    q_values = tflearn.fully_connected(net, num_actions)
    return inputs, q_values
Example #23
0
def run_NOT():
    X = [[0.], [1.]]
    Y = [[1.], [0.]]

    # this shape cuz the next layer must take 2D+ tensor
    g = tflearn.input_data(shape=[None, 1])
    g = tflearn.fully_connected(g, 128, activation='linear')
    g = tflearn.fully_connected(g, 128, activation='linear')
    g = tflearn.fully_connected(g, 1, activation='sigmoid')
    g = tflearn.regression(
        g, optimizer='sgd', learning_rate=2., loss='mean_square')

    train_model(g, X, Y)
def train_with_data(onehot_training_labels, onehot_test_labels, test_images, training_images,
                    neural_net_type, band_list, tile_size, number_of_epochs, model):
    """Package data for tensorflow and analyze."""
    npy_training_labels = numpy.asarray(onehot_training_labels)
    npy_test_labels = numpy.asarray(onehot_test_labels)

    # normalize 0-255 values to 0-1
    norm_training_images = numpy.array([img_loc_tuple[0] for img_loc_tuple in training_images])
    norm_train_images = norm_training_images.astype(numpy.float32)
    norm_train_images = numpy.multiply(norm_train_images, 1.0 / 255.0)

    norm_test_images = numpy.array([img_loc_tuple[0] for img_loc_tuple in test_images])
    norm_test_images = norm_test_images.astype(numpy.float32)
    norm_test_images = numpy.multiply(norm_test_images, 1.0 / 255.0)

    if not model:
        on_band_count = 0
        for b in band_list:
            if b == 1:
                on_band_count += 1

        network = tflearn.input_data(shape=[None, tile_size, tile_size, on_band_count])
        if neural_net_type == 'one_layer_relu':
            network = tflearn.fully_connected(network, 32, activation='relu')
        elif neural_net_type == 'one_layer_relu_conv':
            network = conv_2d(network, 256, 16, activation='relu')
            network = max_pool_2d(network, 3)
        else:
            print("ERROR: exiting, unknown layer type for neural net")

        # classify as road or not road
        softmax = tflearn.fully_connected(network, 2, activation='softmax')

        # based on parameters from www.cs.toronto.edu/~vmnih/docs/Mnih_Volodymyr_PhD_Thesis.pdf
        momentum = tflearn.optimizers.Momentum(
            learning_rate=.005, momentum=0.9,
            lr_decay=0.0002, name='Momentum')

        net = tflearn.regression(softmax, optimizer=momentum, loss='categorical_crossentropy')

        model = tflearn.DNN(net, tensorboard_verbose=0)

    model.fit(norm_train_images,
              npy_training_labels,
              n_epoch=number_of_epochs,
              shuffle=False,
              validation_set=(norm_test_images, npy_test_labels),
              show_metric=True,
              run_id='mlp')

    return model
Example #25
0
    def test_conv_layers(self):

        X = [[0., 0., 0., 0.], [1., 1., 1., 1.], [0., 0., 1., 0.], [1., 1., 1., 0.]]
        Y = [[1., 0.], [0., 1.], [1., 0.], [0., 1.]]

        with tf.Graph().as_default():
            g = tflearn.input_data(shape=[None, 4])
            g = tflearn.reshape(g, new_shape=[-1, 2, 2, 1])
            g = tflearn.conv_2d(g, 4, 2, activation='relu')
            g = tflearn.max_pool_2d(g, 2)
            g = tflearn.fully_connected(g, 2, activation='softmax')
            g = tflearn.regression(g, optimizer='sgd', learning_rate=1.)

            m = tflearn.DNN(g)
            m.fit(X, Y, n_epoch=100, snapshot_epoch=False)
            # TODO: Fix test
            #self.assertGreater(m.predict([[1., 0., 0., 0.]])[0][0], 0.5)

        # Bulk Tests
        with tf.Graph().as_default():
            g = tflearn.input_data(shape=[None, 4])
            g = tflearn.reshape(g, new_shape=[-1, 2, 2, 1])
            g = tflearn.conv_2d(g, 4, 2)
            g = tflearn.conv_2d(g, 4, 1)
            g = tflearn.conv_2d_transpose(g, 4, 2, [2, 2])
            g = tflearn.max_pool_2d(g, 2)
def generate_nnet(feats):
    """Generate a neural network.

    Parameters
    ----------
    feats : list with at least one feature vector

    Returns
    -------
    Neural network object
    """
    # Load it here to prevent crash of --help when it's not present
    import tflearn

    tflearn.init_graph(num_cores=2, gpu_memory_fraction=0.6)

    input_shape = (None,
                   feats[0].shape[0],
                   feats[0].shape[1],
                   feats[0].shape[2])
    logging.info("input shape: %s", input_shape)
    net = tflearn.input_data(shape=input_shape)
    net = tflearn.conv_2d(net, 10, 3, activation='relu', regularizer="L2")
    net = tflearn.conv_2d(net, 10, 3, activation='relu', regularizer="L2")
    net = tflearn.fully_connected(net, 2, activation='sigmoid')
    net = tflearn.regression(net, optimizer='adam', learning_rate=0.01,
                             loss='categorical_crossentropy', name='target')
    return tflearn.DNN(net)
Example #27
0
def run():
    # imagine cnn, the third dim is like the 'chnl'
    g = tflearn.input_data(shape=[None, maxlen, len(char_idx)])
    g = tflearn.lstm(g, 512, return_seq=True)
    g = tflearn.dropout(g, 0.5)
    g = tflearn.lstm(g, 512)
    g = tflearn.dropout(g, 0.5)
    g = tflearn.fully_connected(g, len(char_idx), activation='softmax')
    g = tflearn.regression(g, optimizer='adam',
                           loss='categorical_crossentropy',
                           learning_rate=0.001)

    m = tflearn.SequenceGenerator(g, dictionary=char_idx,
                                  seq_maxlen=maxlen,
                                  clip_gradients=5.0,
                                  checkpoint_path='models/model_us_cities')

    for i in range(40):
        seed = random_sequence_from_textfile(path, maxlen)
        m.fit(X, Y, validation_set=0.1, batch_size=128,
              n_epoch=1, run_id='us_cities')
        print("-- TESTING...")
        print("-- Test with temperature of 1.2 --")
        print(m.generate(30, temperature=1.2, seq_seed=seed))
        print("-- Test with temperature of 1.0 --")
        print(m.generate(30, temperature=1.0, seq_seed=seed))
        print("-- Test with temperature of 0.5 --")
        print(m.generate(30, temperature=0.5, seq_seed=seed))
Example #28
0
    def test_sequencegenerator(self):

        with tf.Graph().as_default():
            text = "123456789101234567891012345678910123456789101234567891012345678910"
            maxlen = 5

            X, Y, char_idx = \
                tflearn.data_utils.string_to_semi_redundant_sequences(text, seq_maxlen=maxlen, redun_step=3)

            g = tflearn.input_data(shape=[None, maxlen, len(char_idx)])
            g = tflearn.lstm(g, 32)
            g = tflearn.dropout(g, 0.5)
            g = tflearn.fully_connected(g, len(char_idx), activation='softmax')
            g = tflearn.regression(g, optimizer='adam', loss='categorical_crossentropy',
                                   learning_rate=0.1)

            m = tflearn.SequenceGenerator(g, dictionary=char_idx,
                                          seq_maxlen=maxlen,
                                          clip_gradients=5.0)
            m.fit(X, Y, validation_set=0.1, n_epoch=100, snapshot_epoch=False)
            res = m.generate(10, temperature=1., seq_seed="12345")
            self.assertEqual(res, "123456789101234", "SequenceGenerator test failed! Generated sequence: " + res + " expected '123456789101234'")

            # Testing save method
            m.save("test_seqgen.tflearn")
            self.assertTrue(os.path.exists("test_seqgen.tflearn"))

            # Testing load method
            m.load("test_seqgen.tflearn")
            res = m.generate(10, temperature=1., seq_seed="12345")
            self.assertEqual(res, "123456789101234", "SequenceGenerator test failed after loading model! Generated sequence: " + res + " expected '123456789101234'")
Example #29
0
def do_rnn(x,y):
    global max_document_length
    print "RNN"
    trainX, testX, trainY, testY = train_test_split(x, y, test_size=0.4, random_state=0)
    y_test=testY

    trainX = pad_sequences(trainX, maxlen=max_document_length, value=0.)
    testX = pad_sequences(testX, maxlen=max_document_length, value=0.)
    # Converting labels to binary vectors
    trainY = to_categorical(trainY, nb_classes=2)
    testY = to_categorical(testY, nb_classes=2)

    # Network building
    net = tflearn.input_data([None, max_document_length])
    net = tflearn.embedding(net, input_dim=10240000, output_dim=128)
    net = tflearn.lstm(net, 128, dropout=0.8)
    net = tflearn.fully_connected(net, 2, activation='softmax')
    net = tflearn.regression(net, optimizer='adam', learning_rate=0.001,
                             loss='categorical_crossentropy')

    # Training
    model = tflearn.DNN(net, tensorboard_verbose=0)
    model.fit(trainX, trainY, validation_set=0.1, show_metric=True,
              batch_size=10,run_id="webshell",n_epoch=5)

    y_predict_list=model.predict(testX)
    y_predict=[]
    for i in y_predict_list:
        if i[0] > 0.5:
            y_predict.append(0)
        else:
            y_predict.append(1)

    do_metrics(y_test, y_predict)
Example #30
0
    def test_regression_placeholder(self):
        '''
        Check that regression does not duplicate placeholders
        '''

        with tf.Graph().as_default():

            g = tflearn.input_data(shape=[None, 2])
            g_nand = tflearn.fully_connected(g, 1, activation='linear')
            with tf.name_scope("Y"):
                Y_in = tf.placeholder(shape=[None, 1], dtype=tf.float32, name="Y")
            tflearn.regression(g_nand, optimizer='sgd',
                               placeholder=Y_in,
                               learning_rate=2.,
                               loss='binary_crossentropy', 
                               op_name="regression1",
                               name="Y")
            # for this test, just use the same default trainable_vars
            # in practice, this should be different for the two regressions
            tflearn.regression(g_nand, optimizer='adam',
                               placeholder=Y_in,
                               learning_rate=2.,
                               loss='binary_crossentropy', 
                               op_name="regression2",
                               name="Y")

            self.assertEqual(len(tf.get_collection(tf.GraphKeys.TARGETS)), 1)
Example #31
0
        output_row = out_empty[:]  # making copy of out_empty list
        output_row[labels.index(docs_y[x])] = 1  # looking in labels list, setting 1 in output row
        # by looking where the tag is in that list.
        training.append(bag)  # contains bags of words (lists of 0s and 1s)
        output.append(output_row)  # also contains 0s and 1s

    # converting  training and output into numpy arrays for TF learn. converting lists in arrays, as they are int lists.
    training = numpy.array(training)
    output = numpy.array(output)

    with open("data.pickle", "wb") as f:  # saving workspace
        pickle.dump((words, labels, training, output), f)

tensorflow.reset_default_graph()  # reset to get rid of previous settings
net = tflearn.input_data(shape=[None, len(training[0])])  # define input shape for model, length of input = data length
net = tflearn.fully_connected(net, 8)  # adding fully-connected layer to our neural network
net = tflearn.fully_connected(net, 8)
net = tflearn.fully_connected(net, len(output[0]), activation="softmax")  # sofmax will give us probability of each
net = tflearn.regression(net)  # output result.

model = tflearn.DNN(net)  # DNN is a network, it'll take the network|(net) we created and use it.

try:  # opening model if its already trained. It saves the existing trained mode, else open model.
    model.load("model.tflearn")

except:
    model.fit(training, output, n_epoch=1500, batch_size=8,
              show_metric=True)  # n_epoch is amount of times model is gonna
    # see the same data.
    model.save("model.tflearn")
# train, test, _ = ,X
trainX, trainY = X, Y
testX, testY = X, Y #overfit for now

# Data preprocessing
# Sequence padding
# trainX = pad_sequences(trainX, maxlen=100, value=0.)
# testX = pad_sequences(testX, maxlen=100, value=0.)
# # Converting labels to binary vectors
# trainY = to_categorical(trainY, nb_classes=2)
# testY = to_categorical(testY, nb_classes=2)

# Network building
net = tflearn.input_data([None, width, height])
# net = tflearn.embedding(net, input_dim=10000, output_dim=128)
net = tflearn.lstm(net, 128, dropout=0.8)
net = tflearn.fully_connected(net, classes, activation='softmax')
net = tflearn.regression(net, optimizer='adam', learning_rate=learning_rate, loss='categorical_crossentropy')
# Training
model = tflearn.DNN(net, tensorboard_verbose=0)
model.load("tflearn.lstm.model")
while 1: #training_iters
  model.fit(trainX, trainY, n_epoch=100, validation_set=(testX, testY), show_metric=True,
          batch_size=batch_size)
  _y=model.predict(X)
model.save("tflearn.lstm.model")
print (_y)
print (y)

Example #33
0
    textfile_to_semi_redundant_sequences(path, seq_maxlen=maxlen, redun_step=3,
                                         pre_defined_char_idx=char_idx)

# store char index
pickle.dump(char_idx, open(char_idx_file,'wb'))

# initialize neural net, forward seq to seq (LSTM)

g = tflearn.input_data([None, maxlen, len(char_idx)])
g = tflearn.lstm(g, 512, return_seq=True)
g = tflearn.dropout(g, 0.5)
g = tflearn.lstm(g, 512, return_seq=True)
g = tflearn.dropout(g, 0.5)
g = tflearn.lstm(g, 512)
g = tflearn.dropout(g, 0.5) 
g = tflearn.fully_connected(g, len(char_idx), activation='softmax')
g = tflearn.regression(g, optimizer='adam', loss='categorical_crossentropy', learning_rate=0.001)

model = tflearn.SequenceGenerator(g, dictionary=char_idx,
                              seq_maxlen=maxlen,
                              clip_gradients=5.0,
                              checkpoint_path='model_tweets')



#train model
for i in range(90):
    seed = random_sequence_from_textfile(path, maxlen)
    model.fit(X, Y, validation_set=0.1, batch_size=128,
          n_epoch=1, show_metric=True, snapshot_epoch=True,
          snapshot_step=500, run_id='tweets')
Example #34
0
# Load CSV file, indicate that the first column represents labels
from tflearn.data_utils import load_csv
data, labelsA = load_csv('clus_ts_training_2.5_1.5.csv',
                         target_column=0,
                         categorical_labels=True,
                         n_classes=2)
input, labelsB = load_csv('clus_ts_test_2.5_1.5.csv',
                          target_column=0,
                          categorical_labels=True,
                          n_classes=2)

# Build neural network
#Data has 5 features
net = tflearn.input_data(shape=[None, 5])
net = tflearn.fully_connected(net, 32)
dropout1 = tflearn.dropout(net, 0.5)
net = tflearn.fully_connected(dropout1,
                              2,
                              activation='softmax',
                              bias=False,
                              weights_init='truncated_normal')

net = tflearn.regression(net)

# Define model
model = tflearn.DNN(net)
# Start training (apply gradient descent algorithm)
model.fit(data,
          labelsA,
          n_epoch=10,
    output_row[labels.index(
        docs_y[x]
    )] = 1  # we'll look through labels list where that value is and will set that value to 1
    training.append(bag)
    output.append(output_row)
'''
Now We're gona convert training and output lists to numpy array
'''
training = np.array(training)
output = np.array(output)

# In[ ]:

tf.reset_default_graph()
net = tflearn.input_data(shape=[None, len(training[0])])  # input layer
net = tflearn.fully_connected(net, 8)  # hidden layer
net = tflearn.fully_connected(net, 8)  # hidden layer
net = tflearn.fully_connected(net, len(
    output[0]), activation="softmax")  # activation function along wiht output
net = tflearn.regression(net)

model = tflearn.DNN(net)

model.fit(training, output, n_epoch=2000, batch_size=8, show_metric=True)
model.save("Model.tflearn")
# Starting Predictions
"""
Now its time to actually use the model! Ideally we want to generate a response 
to any sentence the user types in. To do this we need to remember that our model 
does not take string input, it takes a bag of words. We also need to realize that 
our model does not spit out sentences, it generates a list of probabilities for all 
Example #36
0
def train():
    global stemmer, data, words, labels, training, output
    stemmer = LancasterStemmer()

    with open("intents.json") as file:
        data = json.load(file)

    try:
        file = open('reporter.bin', 'rb')
        changes = pickle.load(file)
        if changes:
            print(1 / 0)
        else:
            with open('data.pickle', 'rb') as f:
                words, labels, training, output = pickle.load(f)

    except:

        words = []
        labels = []
        docs_x = []
        docs_y = []

        for intent in data['intents']:
            for pattern in intent["patterns"]:
                wrds = nltk.word_tokenize(pattern)
                words.extend(wrds)
                docs_x.append(wrds)
                docs_y.append(intent['tag'])

            if intent['tag'] not in labels:
                labels.append(intent['tag'])

        words = [stemmer.stem(w.lower()) for w in words]
        words = sorted(list(set(words)))

        training = []
        output = []

        out_empty = [0 for _ in range(len(labels))]

        for x, doc in enumerate(docs_x):
            bag = []
            wrds = [stemmer.stem(w) for w in doc if w != '?']

            for w in words:
                if w in wrds:
                    bag.append(1)
                else:
                    bag.append(0)

            output_row = out_empty[:]
            output_row[labels.index(docs_y[x])] = 1

            training.append(bag)
            output.append(output_row)

        training = numpy.array(training)
        output = numpy.array(output)

        with open('data.pickle', 'wb') as f:
            pickle.dump((words, labels, training, output), f)

    tensorflow.reset_default_graph()

    net = tflearn.input_data(shape=[None, len(training[0])])
    net = tflearn.fully_connected(net, 8)
    net = tflearn.fully_connected(net, 8)
    net = tflearn.fully_connected(net, len(output[0]), activation='softmax')
    net = tflearn.regression(net)

    global model
    model = tflearn.DNN(net)

    try:
        if not changes:
            model.load('model.tflearn')
        else:
            print(1 / 0)
    except:
        model.fit(training,
                  output,
                  n_epoch=1000,
                  batch_size=8,
                  show_metric=False)
        model.save('model.tflearn')
Example #37
0
                       name='conv_layer_2')
pool_layer_2 = max_pool_2d(conv_layer_2, 2, name='pool_layer_2')
conv_layer_3 = conv_2d(pool_layer_2,
                       nb_filter=80,
                       filter_size=2,
                       activation='relu',
                       name='conv_layer_3')
pool_layer_3 = max_pool_2d(conv_layer_3, 2, name='pool_layer_3')
conv_layer_4 = conv_2d(pool_layer_2,
                       nb_filter=160,
                       filter_size=2,
                       activation='relu',
                       name='conv_layer_4')
pool_layer_4 = max_pool_2d(conv_layer_4, 2, name='pool_layer_4')
fc_layer_1 = fully_connected(pool_layer_3,
                             100,
                             activation='relu',
                             name='fc_layer_1')
fc_layer_2 = fully_connected(fc_layer_1,
                             3,
                             activation='softmax',
                             name='fc_layer_2')
network = regression(fc_layer_2,
                     optimizer='sgd',
                     loss='categorical_crossentropy',
                     learning_rate=0.01)
model = tflearn.DNN(network)

#model.fit(beeX, beeY, validation_set = 0.2, n_epoch=100,shuffle=True,show_metric=True,run_id='ANN_BEE1_3Layer')

#model.save('/home/jer/Workspace/cs5600/project1/trained_nets/ANN_Bee_3Layer.tfl')
#Let's see if there's already a trained network with the right name in FOLDER
Example #38
0
num_stresses = 10
num_kinase = 29
num_transcription_factors = 200
num_genes = 6692

# Build neural network
# Input variables (10)
# Which Node to dropout (32)
stress = tflearn.input_data(shape=[None, num_stresses])
kinase_deletion = tflearn.input_data(shape=[None, num_kinase])

# This is the layer that I want to perform selective dropout on,
# I should be able to specify which of the 32 nodes should output zero
# based on a 1X32 vector of ones and zeros.
kinase = tflearn.fully_connected(stress, num_kinase, activation='relu')
kinase_dropout = tf.mul(kinase, kinase_deletion)

transcription_factor = tflearn.fully_connected(kinase_dropout,
                                               num_transcription_factors,
                                               activation='relu')

gene = tflearn.fully_connected(transcription_factor,
                               num_genes,
                               activation='linear')

adam = tflearn.Adam(learning_rate=0.00001, beta1=0.99)

regression = tflearn.regression(gene,
                                optimizer=adam,
                                loss='mean_square',
        y = Y[d].astype(np.float32)
        y = y.reshape(-1, 1)
        y = to_categorical(y, nb_classes=2)  # Convert label to categorical to train with tflearn

        # Train and test data
        X_train, X_test, Y_train, Y_test = train_test_split(X, y, test_size=0.1, random_state=0)

        # Standardize the data
        sc = StandardScaler()
        sc.fit(X_train)
        X_train_sd = sc.transform(X_train)
        X_test_sd = sc.transform(X_test)

        # Model
        input_layer = tflearn.input_data(shape=[None, 100], name='input')
        dense1 = tflearn.fully_connected(input_layer, 128, activation='linear', name='dense1')
        dropout1 = tflearn.dropout(dense1, 0.8)
        dense2 = tflearn.fully_connected(dropout1, 128, activation='linear', name='dense2')
        dropout2 = tflearn.dropout(dense2, 0.8)
        output = tflearn.fully_connected(dropout2, 2, activation='softmax', name='output')
        regression = tflearn.regression(output, optimizer='adam', loss='categorical_crossentropy', learning_rate=.001)

        # Define model with checkpoint (autosave)
        model = tflearn.DNN(regression, tensorboard_verbose=3)

        # load the previously trained model
        model.load('Saved_Models/Fully_Connected/dense_fully_connected_dropout_5645_{}.tfl'.format(d))

        ''''# Train model with checkpoint every epoch and every 500 steps
        model.fit(X_train_sd, Y_train, n_epoch=n_epoch, show_metric=True, snapshot_epoch=True, snapshot_step=500,
                  run_id='model_and_weights_{}'.format(c + 1),
Example #40
0
    Xtest = Aux

except:
    print("Base corrompida ou inexistente, verifique")
    exit()

encoder = tflearn.input_data(shape=[None, 13, 216])
encoder = tflearn.dropout(encoder, 0.6)
encoder = tflearn.layers.recurrent.simple_rnn(
    encoder, 128, return_seq=True, activation='relu')  #,dynamic=True
encoder = tflearn.layers.recurrent.simple_rnn(
    encoder, 128, return_seq=False,
    activation='relu')  #,dynamic=True #,dropout=0.5
encoder = tflearn.dropout(encoder, 0.6)
encoder = tflearn.fully_connected(encoder, 200, activation='elu')

net = tflearn.dropout(encoder, 0.6)
net = tflearn.fully_connected(
    net, number_classes, activation='softmax'
)  #number_classes,  numero de locutores  para essa camada e 'softmax' nome ou função  de ativação para essa camada, default "linear"
#uma camada de regressão (a seguir à saída) é necessária como parte das operações de treinamento da estrutura.
net = tflearn.regression(
    net,
    optimizer='adam',
    loss='categorical_crossentropy',
    learning_rate=0.00005
)  # "adam" =  default gradient descent optimizer,loss= Função de perda utilizada por este otimizador de camada. Padrão: 'categorical_crossentropy'.

#criando a rede .
model = tflearn.DNN(net)
n_inputs = 6  #[성별코드, 연령대코드, 신장, 체중, 허리둘레, 흡연상태]
n_hidden1 = 18
n_hidden2 = 20
n_hidden3 = 24
n_hidden4 = 9
n_outputs = 2  #마지막에 결과는 당뇨병 이다 / 아니다 이기때문에 -> 2개

n_epochs = 50
batch_size = 128

# 망

inputs = tflearn.input_data(shape=[None, n_inputs])
hidden1 = tflearn.fully_connected(inputs,
                                  n_hidden1,
                                  activation='relu',
                                  name='hidden1')
hidden2 = tflearn.fully_connected(hidden1,
                                  n_hidden2,
                                  activation='relu',
                                  name='hidden2')
hidden3 = tflearn.fully_connected(hidden2,
                                  n_hidden3,
                                  activation='relu',
                                  name='hidden3')
hidden4 = tflearn.fully_connected(hidden3,
                                  n_hidden4,
                                  activation='relu',
                                  name='hidden4')
softmax = tflearn.fully_connected(hidden4,
                                  n_outputs,
Example #42
0
                        target_column=0,
                        columns_to_ignore=[2, 7],
                        categorical_labels=True,
                        n_classes=2)

for p in data:
    if p[1] == "female":
        p[1] = 1
    else:
        p[1] = 0

# for x in data:
#     print(x)

net = tflearn.input_data(shape=[None, 6])
net = tflearn.fully_connected(net, 32)
net = tflearn.fully_connected(net, 32)
net = tflearn.fully_connected(net, 32)
net = tflearn.fully_connected(net, 32)
net = tflearn.fully_connected(net, 2, activation='sigmoid')
net = tflearn.regression(net)

model = tflearn.DNN(net)
model.fit(data, labels, n_epoch=100, batch_size=30, show_metric=True)

print("Rohan's odds of survival: ",
      model.predict([[2, 0, 16, 2, 0, 80.00]])[0][1])
print("Aryaman's odds of survival: ",
      model.predict([[2, 0, 14, 2, 0, 80.00]])[0][1])
print("Michiko's odds of survival: ",
      model.predict([[2, 1, 17, 3, 0, 80.00]])[0][1])
Example #43
0
# preName = 'AS' #55
# preName = 'HXZT' #1
# preName = 'ZY' #6
preName = 'TL' #13
evalNum =13

model = SelfAttentive()
with tf.Session() as sess:
  # build graph
  model.build_graph(n=word_pad_length)
  # Downstream Application
  with tf.variable_scope('DownstreamApplication'):
    global_step = tf.Variable(0, trainable=False, name='global_step')
    learn_rate = tf.train.exponential_decay(lr, global_step, FLAGS.decay_step, 0.95, staircase=True)
    labels = tf.placeholder('float32', shape=[None, tag_size])
    net = tflearn.fully_connected(model.M, 50, activation='relu')
    logits = tflearn.fully_connected(net, tag_size, activation=None)
    loss = tf.reduce_sum(tf.nn.sigmoid_cross_entropy_with_logits(labels=labels, logits=logits), axis=1)
    if FLAGS.penalization == True:
      p_coef = 0.004
      p_loss = p_coef * model.P
      loss = loss + p_loss
      p_loss = tf.reduce_mean(p_loss)
    loss = tf.reduce_mean(loss)
    params = tf.trainable_variables()
    #clipped_gradients = [tf.clip_by_value(x, -0.5, 0.5) for x in gradients]
    optimizer = tf.train.AdamOptimizer(learn_rate)
    grad_and_vars = tf.gradients(loss, params)
    clipped_gradients, _ = tf.clip_by_global_norm(grad_and_vars, 0.5)
    opt = optimizer.apply_gradients(zip(clipped_gradients, params), global_step=global_step)
Example #44
0
        return loss


relu_weights_init = tflearn.initializations.xavier(seed=20171011)
relu_bias_init = tf.contrib.keras.initializers.Constant(value=0.001)
relu_regularizer = 'L2'
softmax_regularizer = 'L2'
softmax_weights_init = tflearn.initializations.xavier(seed=20171013)
softmax_bias_init = 'zeros'

input = tflearn.input_data(shape=[None, 121], name='input')

shared_hl_1 = tflearn.fully_connected(input,
                                      128,
                                      activation='prelu',
                                      bias=True,
                                      weights_init=relu_weights_init,
                                      regularizer=None,
                                      bias_init=relu_bias_init,
                                      name='shared_hl_1')
shared_hl_2 = tflearn.fully_connected(shared_hl_1,
                                      128,
                                      activation='prelu',
                                      bias=True,
                                      weights_init=relu_weights_init,
                                      regularizer=None,
                                      bias_init=relu_bias_init,
                                      name='shared_hl_2')
shared_hl_3 = tflearn.fully_connected(shared_hl_2,
                                      128,
                                      activation='prelu',
                                      bias=True,
Example #45
0
def trainOrLoadModel():
    with open("intents.json") as file:
        data = json.load(file)

    try:
        with open("data.pickle", "rb") as f:
            words, labels, training, output = pickle.load(f)
    except:
        words = []
        labels = []
        docs_x = []
        docs_y = []

        for intent in data["intents"]:
            for pattern in intent["patterns"]:
                wrds = nltk.word_tokenize(pattern)
                wrds = [reformWords(w) for w in wrds]
                words.extend(wrds)
                docs_x.append(wrds)
                docs_y.append(intent["tag"])

            if intent["tag"] not in labels:
                labels.append(intent["tag"])

        words = [stemmer.stem(del_Punctutation(w.lower())) for w in words]
        words = list(filter(None, words))
        words = sorted(list(set(words)))

        labels = sorted(labels)

        training = []
        output = []

        out_empty = [0 for _ in range(len(labels))]

        for x, doc in enumerate(docs_x):
            bag = []

            wrds = [stemmer.stem(del_Punctutation(w)) for w in doc]
            wrds = list(filter(None, wrds))
            for w in words:
                if w in wrds:
                    bag.append(1)
                else:
                    bag.append(0)

            output_row = out_empty[:]
            output_row[labels.index(docs_y[x])] = 1

            training.append(bag)
            output.append(output_row)

        training = numpy.array(training)
        output = numpy.array(output)

        with open("data.pickle", "wb") as f:
            pickle.dump((words, labels, training, output), f)

    tensorflow.reset_default_graph()

    net = tflearn.input_data(shape=[None, len(training[0])])
    net = tflearn.fully_connected(net, 8)
    net = tflearn.fully_connected(net, 8)
    net = tflearn.fully_connected(net, len(output[0]), activation="softmax")
    net = tflearn.regression(net)

    model = tflearn.DNN(net)

    IsModelExist = os.path.isfile('model.tflearn.index')
    if IsModelExist:
        model.load("model.tflearn")
    else:
        model.fit(training,
                  output,
                  n_epoch=1000,
                  batch_size=8,
                  show_metric=True)
        model.save("model.tflearn")

    return model, words, labels, data
Example #46
0
""" An example shoeing how to save/restore models and retrive Weights. """

import tflearn

import tflearn.datasets.mnist as mnist

# MNIST data
X, Y, testX, testY = mnist.load_data(one_hot=True)

# Model
input_layer = tflearn.input_data(shape=[None, 784], name='input')
dense1 = tflearn.fully_connected(input_layer, 128, name='dense1')
dense2 = tflearn.fully_connected(dense1, 256, name='dense2')
softmax = tflearn.fully_connected(dense2, 10, activation='softmax')
regression = tflearn.regression(softmax,
                                optimizer='adam',
                                learning_rate=0.001,
                                loss='categorical_crossentropy')

# Define classifier, with model checkpoint (autosave)
model = tflearn.DNN(regression, checkpoint_path='model.tf1.ckpt')

# Train model, with model checkpoint every epoch and every 200 training steps.
model.fit(
    X,
    Y,
    n_epoch=1,
    validation_set=(testX, testY),
    show_metric=True,
    snapshot_epoch=True,  # Snapshot (save & evaluate) model every epoch.
    snapshot_step=500,  # Snapshot (save & evaluate) model every 500 steps.
Example #47
0
"""
from __future__ import division, print_function, absolute_import

import tflearn

# Data loading and preprocessing
import tflearn.datasets.mnist as mnist

X, Y, testX, testY = mnist.load_data(one_hot=True)

# Building deep neural network
input_layer = tflearn.input_data(shape=[None, 784])
dense1 = tflearn.fully_connected(input_layer,
                                 64,
                                 activation='tanh',
                                 regularizer='L2',
                                 weight_decay=0.001)
dropout1 = tflearn.dropout(dense1, 0.5)
dense2 = tflearn.fully_connected(dropout1,
                                 64,
                                 activation='tanh',
                                 regularizer='L2',
                                 weight_decay=0.001)
dropout2 = tflearn.dropout(dense2, 0.5)
softmax = tflearn.fully_connected(dropout2, 10, activation='softmax')

# Regression using SGD with learning rate decay and Top-3 accuracy
sgd = tflearn.SGD(learning_rate=0.1, lr_decay=0.96, decay_step=1000)
top_k = tflearn.metrics.Top_k(3)
net = tflearn.regression(softmax,
Example #48
0
def classifier(url):
    global words
    try:
        headers = {
            'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) '
            'AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36',
            'Connection': 'keep-alive'
        }
        request_result = requests.get(url,
                                      timeout=30,
                                      stream=True,
                                      headers=headers)

        if request_result.status_code == 200 and request_result.text is not None:
            soup = BeautifulSoup(request_result.text, "html.parser")
            tag = soup.find('meta',
                            attrs={'name': re.compile('keywords', re.I)})
            keywords = tag.get('content') if tag is not None and tag.get('content') is not None \
                                             and tag.get('content').strip() else None
            if keywords is None:
                tag = soup.find(
                    'meta', attrs={'name': re.compile('description', re.I)})
                description = tag.get('content') if tag is not None and tag.get('content') is not None \
                                                    and tag.get('content').strip() else None
                lookup = (description) if description is not None else None
            else:
                lookup = (keywords) if keywords is not None else None
        else:
            print('Page Not Open:\t' + url)
    except Exception as e:
        print("URL:" + url + str(e))

    tbl = dict.fromkeys(i for i in range(sys.maxunicode)
                        if unicodedata.category(chr(i)).startswith('P'))

    #remove punctuations from sentences.
    def remove_punctuation(text):
        return text.translate(tbl)

    stemmer = LancasterStemmer()
    data = None

    # read the json file and load the training data
    with open('training.json') as json_data:
        data = json.load(json_data)
        # print(data)

    # get a list of all categories to train for
    categories = list(data.keys())
    words = []
    # a list of tuples with words in the sentence and category name
    docs = []

    for each_category in data.keys():
        for each_sentence in data[each_category]:

            each_sentence = remove_punctuation(each_sentence)
            # print(each_sentence)

            w = nltk.word_tokenize(each_sentence)
            # print("tokenized words: ", w)
            words.extend(w)
            docs.append((w, each_category))

    words = [stemmer.stem(w.lower()) for w in words]
    words = sorted(list(set(words)))

    # print(words)
    # print(docs)

    # create our training data
    training = []
    output = []
    # create an empty array for our output
    output_empty = [0] * len(categories)

    for doc in docs:
        bow = []
        token_words = doc[0]
        token_words = [stemmer.stem(word.lower()) for word in token_words]
        for w in words:
            bow.append(1) if w in token_words else bow.append(0)

        output_row = list(output_empty)
        output_row[categories.index(doc[1])] = 1

        training.append([bow, output_row])

    random.shuffle(training)
    training = np.array(training)

    # trainX contains the Bag of words and train_y contains the label/ category
    train_x = list(training[:, 0])
    train_y = list(training[:, 1])

    tf.reset_default_graph()
    # Build neural network
    net = tflearn.input_data(shape=[None, len(train_x[0])])
    net = tflearn.fully_connected(net, 8)
    net = tflearn.fully_connected(net, 8)
    net = tflearn.fully_connected(net, len(train_y[0]), activation='softmax')
    net = tflearn.regression(net)
    # Define model and setup tensorboard
    model = tflearn.DNN(net, tensorboard_dir='tflearn_logs')

    # model.fit(train_x, train_y, n_epoch=1000, batch_size=8, show_metric=True)
    # model.save('model.tflearn')
    model.load('model.tflearn')

    def get_tf_record(sentence):
        # global words
        # tokenize the pattern
        sentence_words = nltk.word_tokenize(sentence)
        # stem each word
        sentence_words = [
            stemmer.stem(word.lower()) for word in sentence_words
        ]
        # bag of words
        bow = [0] * len(words)
        for s in sentence_words:
            for i, w in enumerate(words):
                if w == s:
                    bow[i] = 1

        return (np.array(bow))

    # print(categories[np.argmax(model.predict([get_tf_record(lookup)]))])
    return (categories[np.argmax(model.predict([get_tf_record(lookup)]))])


# classifier(url)
Example #49
0
                  tol=0.001,
                  verbose=False)
    elif classifier == "linearsvc":
        clf = LinearSVC()
    elif classifier == "knn":
        clf = KNeighborsClassifier(5)
    elif classifier == 'decisiontree':
        clf = DecisionTreeClassifier()
    elif classifier == 'randomforest':
        clf = RandomForestClassifier()
    elif classifier == 'mlp':
        labels_train = hot_enconding(labels_train)
        # Building deep neural network
        net = tflearn.input_data(shape=[None, X_train.shape[1]])
        net = tflearn.fully_connected(net,
                                      X_train.shape[1] / 2,
                                      activation='relu')
        net = tflearn.fully_connected(net,
                                      X_train.shape[1] / 3,
                                      activation='relu')
        net = tflearn.fully_connected(net, 2, activation='softmax')
        net = tflearn.regression(net)
        # Training
        clf = tflearn.DNN(net, tensorboard_verbose=0)

    clf.fit(X_train, labels_train)

    fprs = []
    fnrs = []
    thresholds = []
    if THRESHOLD:
Example #50
0
        training.append(bag)
        output.append(output_row)

    training = numpy.array(
        training)  #For machine learning we need to convert into numpy array
    output = numpy.array(output)

    with open("data.pickle", "wb") as f:
        pickle.dump((words, labels, training, output), f)

#Model
tensorflow.reset_default_graph()

net = tflearn.input_data(shape=[None, len(training[0])])  #input data
net = tflearn.fully_connected(net, 8)  #8 neurons
net = tflearn.fully_connected(net, 8)  #8 neurons
net = tflearn.fully_connected(net, len(
    output[0]), activation="softmax")  #probability for each output
net = tflearn.regression(net)

model = tflearn.DNN(net)

try:
    #if model exist
    model.load("model.tflearn")
except:
    #Fit
    model.fit(training, output, n_epoch=1000, batch_size=8, show_metric=True)
    model.save("model.tflearn")
Example #51
0
import pandas as pd
import pickle
import numpy as np
import re
import tflearn

from nltk.stem.snowball import RussianStemmer
from collections import Counter
from nltk.tokenize import TweetTokenizer

tweets_col_number = 3
VOCAB_SIZE = 1000

print("Начинаем забирать модель")
net = tflearn.input_data([None, VOCAB_SIZE])
net = tflearn.fully_connected(net, 125, activation='ReLU')
net = tflearn.fully_connected(net, 25, activation='ReLU')
net = tflearn.fully_connected(net, 2, activation='softmax')
model = tflearn.DNN(net)
model.load('model/model.tfl')
print("Забрали модель")

with open('token/tokenizer.pickle', 'rb') as handle:
    vocab = pickle.load(handle)
print("Забрали токены")

stem_count = Counter()
tokenizer = TweetTokenizer()

stemer = RussianStemmer()
regex = re.compile('[^а-яА-Я ]')
Example #52
0
import tflearn

data, labels = tflearn.data_utils.load_csv('f3.csv',
                                           target_column=60,
                                           categorical_labels=True,
                                           n_classes=2)

# Build neural network
net = tflearn.input_data(shape=[None, 60])
net = tflearn.fully_connected(net, 500)
net = tflearn.fully_connected(net, 500)
net = tflearn.fully_connected(net, 2, activation='tanh')
net = tflearn.regression(net, optimizer='rmsprop')

# Define model
model = tflearn.DNN(net)
# Start training (apply gradient descent algorithm)
model.fit(data, labels, n_epoch=10, batch_size=16, show_metric=True)

pred = model.predict(data)

print pred
trainX, trainY = data[:training_size], labels[training_size:]
testX, testY = data[:training_size], labels[training_size:]

# convert to one-hot?
trainY = to_categorical(trainY, nb_classes=11)
testY = to_categorical(testY, nb_classes=11)

# The Network

# as many inputs as there are columns... I don't know how to pick this..
number_of_inputs = len(trainX[0])

net = tflearn.input_data([None, number_of_inputs])
net = tflearn.embedding(net, input_dim=number_of_inputs, output_dim=121)
net = tflearn.lstm(net, 121, dropout=0.8)
net = tflearn.fully_connected(net, 11, activation='softmax')
net = tflearn.regression(net,
                         optimizer='adam',
                         learning_rate=0.01,
                         loss='categorical_crossentropy')

model = tflearn.DNN(net)
model.fit(trainX,
          trainY,
          validation_set=(testX, testY),
          show_metric=True,
          batch_size=32)

# Load into TFLearn model: http://tflearn.org/data_utils/#load_csv
model.save('study_ign.tflearn')
Example #54
0
    training.append([bag, output_row])

# shuffle our features and turn into np.array
random.shuffle(training)
training = np.array(training)

# create train and test lists
train_x = list(training[:, 0])
train_y = list(training[:, 1])

# reset underlying graph data
tf.reset_default_graph()
# Build neural network
net = tflearn.input_data(shape=[None, len(train_x[0])])
net = tflearn.fully_connected(net, 8)
net = tflearn.fully_connected(net, 8)
net = tflearn.fully_connected(net, len(train_y[0]), activation='softmax')
net = tflearn.regression(net)

# Define model and setup tensorboard
model = tflearn.DNN(net, tensorboard_dir='tflearn_logs')
# Start training (apply gradient descent algorithm)
model.fit(train_x, train_y, n_epoch=4000, batch_size=50, show_metric=True)
model.save('model.tflearn')


def clean_up_sentence(sentence):
    # tokenize the pattern
    sentence_words = nltk.word_tokenize(sentence)
    # stem each word
Example #55
0
def vgg16(input, num_class):

    #in the model, we added trainable=False to make sure the parameter are not updated during training
    x = tflearn.conv_2d(input,
                        64,
                        3,
                        activation='relu',
                        scope='conv1_1',
                        trainable=False)
    x = tflearn.conv_2d(x,
                        64,
                        3,
                        activation='relu',
                        scope='conv1_2',
                        trainable=False)
    x = tflearn.max_pool_2d(x, 2, strides=2, name='maxpool1')

    x = tflearn.conv_2d(x,
                        128,
                        3,
                        activation='relu',
                        scope='conv2_1',
                        trainable=False)
    x = tflearn.conv_2d(x,
                        128,
                        3,
                        activation='relu',
                        scope='conv2_2',
                        trainable=False)
    x = tflearn.max_pool_2d(x, 2, strides=2, name='maxpool2')

    x = tflearn.conv_2d(x,
                        256,
                        3,
                        activation='relu',
                        scope='conv3_1',
                        trainable=False)
    x = tflearn.conv_2d(x,
                        256,
                        3,
                        activation='relu',
                        scope='conv3_2',
                        trainable=False)
    x = tflearn.conv_2d(x,
                        256,
                        3,
                        activation='relu',
                        scope='conv3_3',
                        trainable=False)
    x = tflearn.max_pool_2d(x, 2, strides=2, name='maxpool3')

    x = tflearn.conv_2d(x,
                        512,
                        3,
                        activation='relu',
                        scope='conv4_1',
                        trainable=False)
    x = tflearn.conv_2d(x,
                        512,
                        3,
                        activation='relu',
                        scope='conv4_2',
                        trainable=False)
    x = tflearn.conv_2d(x,
                        512,
                        3,
                        activation='relu',
                        scope='conv4_3',
                        trainable=False)
    x = tflearn.max_pool_2d(x, 2, strides=2, name='maxpool4')

    x = tflearn.conv_2d(x, 512, 3, activation='relu', scope='conv5_1')
    x = tflearn.conv_2d(x, 512, 3, activation='relu', scope='conv5_2')
    x = tflearn.conv_2d(x, 512, 3, activation='relu', scope='conv5_3')
    x = tflearn.max_pool_2d(x, 2, strides=2, name='maxpool5')

    x = tflearn.fully_connected(x, 4096, activation='relu', scope='fc6')
    x = tflearn.dropout(x, 0.5, name='dropout1')
    #we changed the structure here to let the fc only have 2048, less parameter, enough for our task
    x = tflearn.fully_connected(x,
                                2048,
                                activation='relu',
                                scope='fc7',
                                restore=False)
    x = tflearn.dropout(x, 0.5, name='dropout2')

    x = tflearn.fully_connected(x,
                                num_class,
                                activation='softmax',
                                scope='fc8',
                                restore=False)

    return x
Links:
    [MNIST Dataset] http://yann.lecun.com/exdb/mnist/
"""
from __future__ import division, print_function, absolute_import

import numpy as np
import matplotlib.pyplot as plt
import tflearn

# Data loading and preprocessing
import tflearn.datasets.mnist as mnist
X, Y, testX, testY = mnist.load_data(one_hot=True)

# Building the encoder
encoder = tflearn.input_data(shape=[None, 784])
encoder = tflearn.fully_connected(encoder, 256)
encoder = tflearn.fully_connected(encoder, 64)

# Building the decoder
decoder = tflearn.fully_connected(encoder, 256)
decoder = tflearn.fully_connected(decoder, 784, activation='sigmoid')

# Regression, with mean square error
net = tflearn.regression(decoder, optimizer='adam', learning_rate=0.001,
                         loss='mean_square', metric=None)

# Training the auto encoder
model = tflearn.DNN(net, tensorboard_verbose=0)
model.fit(X, X, n_epoch=20, validation_set=(testX, testX),
          run_id="auto_encoder", batch_size=1)
import tensorflow as tf

speakers = data.get_speakers()
number_classes = len(speakers)
print("speakers", speakers)

batch = data.wave_batch_generator(batch_size=1000,
                                  source=data.Source.DIGIT_WAVES,
                                  target=data.Target.speaker)
X, Y = next(batch)

# Classification
tflearn.init_graph(num_cores=8, gpu_memory_fraction=0.5)

net = tflearn.input_data(shape=[None, 8192])  #Two wave chunks
net = tflearn.fully_connected(net, 64)
net = tflearn.dropout(net, 0.5)
net = tflearn.fully_connected(net, number_classes, activation='softmax')
net = tflearn.regression(net,
                         optimizer='adam',
                         loss='categorical_crossentropy')

model = tflearn.DNN(net)
model.fit(X, Y, n_epoch=100, show_metric=True, snapshot_step=100)

# demo_file = "8_Vicki_260.wav"
demo_file = "8_Bruce_260.wav"
demo = data.load_wav_file(data.path + demo_file)
result = model.predict([demo])
result = data.one_hot_to_item(result, speakers)
print("predicted speaker for %s : result = %s " %
# pickle.dump (X_train, open ("xtrain.p", b))
# pickle.dump (X_test, open ("xtest.p", b))

# X_train = pickle.load (open ("xtrain.p", rb))
# X_test = pickle.load (open ("xtest.p", rb))

### Models

print('Build model')

net = tflearn.input_data([None, model_size])
net = tflearn.embedding(net, input_dim=n_words, output_dim=lstm_size[0])
for i in range (len (lstm_size)):
  if i < len(lstm_size) - 1:
    net = tflearn.gru(net, lstm_size [i], activation = activation_function, return_seq = True)
    net = tflearn.dropout(net, dropout_ratio)
  else:
    net = tflearn.gru(net, lstm_size [i], activation = activation_function)
    net = tflearn.dropout(net, dropout_ratio)
net = tflearn.fully_connected(net, len (qualities), activation='softmax')
net = tflearn.regression(net, optimizer='adam', learning_rate=0.001,
                         loss='categorical_crossentropy')

print ('Train model')

model = tflearn.DNN(net, tensorboard_verbose=0, tensorboard_dir = "logdir/gru")

print ('Predict')
model.fit(X_train, Y_train, validation_set=(X_test, Y_test), show_metric=True,
          batch_size=32, n_epoch = nb_epochs)
        output.append(output_row)

    # create numpy arrays for training
    training = np.array(training)
    output = np.array(output)

    with open("data.pickle", "wb") as f:
        pickle.dump((words, labels, training, output), f)

## training model neural network taking in bag of words outputs label & group response##

tensorflow.reset_default_graph()
# lenght of the training model
net = tflearn.input_data(shape=[None, len(training[0])])
# neuron hidden layers
net = tflearn.fully_connected(net, 8)
net = tflearn.fully_connected(net, 8)
# output layer with softmax activation (probability)
net = tflearn.fully_connected(net, len(output[0]), activation="softmax")
net = tflearn.regression(net)


model = tflearn.DNN(net)


# if True train the model if False use existing one
if TRAIN_MODEL == False:
    model.load("model.tflearn")

# otherwise construct the model
else:
Example #60
0
    Y = tf.placeholder(shape=[None, 10], dtype=tf.float32)

    net = tf.reshape(X, [-1, 28, 28, 1])

    # Using TFLearn wrappers for network building
    net = tflearn.conv_2d(net, 32, 3, activation='relu')
    net = tflearn.max_pool_2d(net, 2)
    net = tflearn.local_response_normalization(net)
    net = tflearn.dropout(net, 0.8)

    net = tflearn.conv_2d(net, 64, 3, activation='relu')
    net = tflearn.max_pool_2d(net, 2)
    net = tflearn.local_response_normalization(net)
    net = tflearn.dropout(net, 0.8)

    net = tflearn.fully_connected(net, 128, activation='tanh')
    net = tflearn.dropout(net, 0.8)
    net = tflearn.fully_connected(net, 256, activation='tanh')
    net = tflearn.dropout(net, 0.8)

    net = tflearn.fully_connected(net, 10, activation='linear')

    loss = tf.reduce_mean(
        tf.nn.softmax_cross_entropy_with_logits(logits=net, labels=Y))
    optimizer = tf.train.AdamOptimizer(learning_rate=0.01).minimize(loss)

    init = tf.global_variables_initializer()

    # Launch the graph
    with tf.Session() as sess:
        sess.run(init)