Beispiel #1
0
def create_nips_network(input_tensor, output_num):
    l_hid1 = tflearn.conv_2d(input_tensor, 16, 8, strides=4, activation='relu', scope='conv1', padding='valid')
    l_hid2 = tflearn.conv_2d(l_hid1, 32, 4, strides=2, activation='relu', scope='conv2', padding='valid')
    l_hid3 = tflearn.fully_connected(l_hid2, 256, activation='relu', scope='dense3')
    out = tflearn.fully_connected(l_hid3, output_num, scope='denseout')

    return out
    def build_cnn_network(self, network):
        """ Build CNN network.

        Args:
            network: base network.

        Returns:
            model: CNN model.

        """
        print('Building CNN network.')
        # Convolutional network building
        network = tflearn.conv_2d(network, 32,
                            self.IMAGE_CHANNEL_NUM,
                          activation='relu')
        network = tflearn.max_pool_2d(network, 2)
        network = tflearn.conv_2d(network, 64,
                          self.IMAGE_CHANNEL_NUM,
                          activation='relu')
        network = tflearn.conv_2d(network, 64,
                          self.IMAGE_CHANNEL_NUM,
                          activation='relu')
        network = tflearn.max_pool_2d(network, 2)
        network = tflearn.fully_connected(
            network, 32 * 32, activation='relu')
        network = tflearn.dropout(network, 0.5)
        # Two category. positive or negative.
        network = tflearn.fully_connected(network, 2,
                                  activation='softmax')
        network = tflearn.regression(network, optimizer='adam',
                             loss='categorical_crossentropy',
                             learning_rate=0.001)
        print("CNN network built.")
        return network
def generate_nnet(feats):
    """Generate a neural network.

    Parameters
    ----------
    feats : list with at least one feature vector

    Returns
    -------
    Neural network object
    """
    # Load it here to prevent crash of --help when it's not present
    import tflearn

    tflearn.init_graph(num_cores=2, gpu_memory_fraction=0.6)

    input_shape = (None,
                   feats[0].shape[0],
                   feats[0].shape[1],
                   feats[0].shape[2])
    logging.info("input shape: %s", input_shape)
    net = tflearn.input_data(shape=input_shape)
    net = tflearn.conv_2d(net, 10, 3, activation='relu', regularizer="L2")
    net = tflearn.conv_2d(net, 10, 3, activation='relu', regularizer="L2")
    net = tflearn.fully_connected(net, 2, activation='sigmoid')
    net = tflearn.regression(net, optimizer='adam', learning_rate=0.01,
                             loss='categorical_crossentropy', name='target')
    return tflearn.DNN(net)
def create_a3c_lstm_network(input_tensor, output_num):
    l_hid1 = tflearn.conv_2d(input_tensor, 16, 8, strides=4, activation='relu', scope='conv1', padding='valid')
    l_hid2 = tflearn.conv_2d(l_hid1, 32, 4, strides=2, activation='relu', scope='conv2', padding='valid')
    l_hid3 = tflearn.fully_connected(l_hid2, 256, activation='relu', scope='dense3')

    # reshape l_hid3 to lstm usable shape (1, batch_size, 256)
    l_hid3_reshape = tf.reshape(l_hid3, [1, -1, 256])

    # have to custom make the lstm output here to use tf.nn.dynamic_rnn
    l_lstm = tflearn.BasicLSTMCell(256)
    # BasicLSTMCell lists state size as tuple so we need to pass tuple into dynamic_rnn
    lstm_state_size = tuple([[1, x] for x in l_lstm.state_size])
    # has to specifically be the same type tf.python.ops.rnn_cell.LSTMStateTuple
    from tensorflow.python.ops.nn import rnn_cell as _rnn_cell
    initial_lstm_state = _rnn_cell.LSTMStateTuple(tf.placeholder(tf.float32, shape=lstm_state_size[0], name='initial_lstm_state1'),
                                                  tf.placeholder(tf.float32, shape=lstm_state_size[1], name='initial_lstm_state2'))
    # dynamically get the sequence length
    sequence_length = tf.reshape(tf.shape(l_hid3)[0], [1])
    l_lstm4, new_lstm_state = tf.nn.dynamic_rnn(l_lstm, l_hid3_reshape,
                                                initial_state=initial_lstm_state, sequence_length=sequence_length,
                                                time_major=False, scope='lstm4')

    # reshape lstm back to (batch_size, 256)
    l_lstm4_reshape = tf.reshape(l_lstm4, [-1, 256])
    actor_out = tflearn.fully_connected(l_lstm4_reshape, output_num, activation='softmax', scope='actorout')
    critic_out = tflearn.fully_connected(l_lstm4_reshape, 1, activation='linear', scope='criticout')

    return actor_out, critic_out, initial_lstm_state, new_lstm_state
Beispiel #5
0
    def test_conv_layers(self):

        X = [[0., 0., 0., 0.], [1., 1., 1., 1.], [0., 0., 1., 0.], [1., 1., 1., 0.]]
        Y = [[1., 0.], [0., 1.], [1., 0.], [0., 1.]]

        with tf.Graph().as_default():
            g = tflearn.input_data(shape=[None, 4])
            g = tflearn.reshape(g, new_shape=[-1, 2, 2, 1])
            g = tflearn.conv_2d(g, 4, 2, activation='relu')
            g = tflearn.max_pool_2d(g, 2)
            g = tflearn.fully_connected(g, 2, activation='softmax')
            g = tflearn.regression(g, optimizer='sgd', learning_rate=1.)

            m = tflearn.DNN(g)
            m.fit(X, Y, n_epoch=100, snapshot_epoch=False)
            # TODO: Fix test
            #self.assertGreater(m.predict([[1., 0., 0., 0.]])[0][0], 0.5)

        # Bulk Tests
        with tf.Graph().as_default():
            g = tflearn.input_data(shape=[None, 4])
            g = tflearn.reshape(g, new_shape=[-1, 2, 2, 1])
            g = tflearn.conv_2d(g, 4, 2)
            g = tflearn.conv_2d(g, 4, 1)
            g = tflearn.conv_2d_transpose(g, 4, 2, [2, 2])
            g = tflearn.max_pool_2d(g, 2)
Beispiel #6
0
def create_a3c_network(input_tensor, output_num):
    l_hid1 = tflearn.conv_2d(input_tensor, 16, 8, strides=4, activation='relu', padding='valid', scope='conv1')
    l_hid2 = tflearn.conv_2d(l_hid1, 32, 4, strides=2, activation='relu', padding='valid', scope='conv2')
    l_hid3 = tflearn.fully_connected(l_hid2, 256, activation='relu', scope='dense3')
    actor_out = tflearn.fully_connected(l_hid3, output_num, activation='softmax', scope='actorout')
    critic_out = tflearn.fully_connected(l_hid3, 1, activation='linear', scope='criticout')

    return actor_out, critic_out
Beispiel #7
0
def discriminator(x, reuse=False):
    with tf.variable_scope('Discriminator', reuse=reuse):
        x = tflearn.conv_2d(x, 64, 5, activation='tanh')
        x = tflearn.avg_pool_2d(x, 2)
        x = tflearn.conv_2d(x, 128, 5, activation='tanh')
        x = tflearn.avg_pool_2d(x, 2)
        x = tflearn.fully_connected(x, 1024, activation='tanh')
        x = tflearn.fully_connected(x, 2)
        x = tf.nn.softmax(x)
        return x
Beispiel #8
0
def generator(x, reuse=False):
    with tf.variable_scope('Generator', reuse=reuse):
        x = tflearn.fully_connected(x, n_units=7 * 7 * 128)
        x = tflearn.batch_normalization(x)
        x = tf.nn.tanh(x)
        x = tf.reshape(x, shape=[-1, 7, 7, 128])
        x = tflearn.upsample_2d(x, 2)
        x = tflearn.conv_2d(x, 64, 5, activation='tanh')
        x = tflearn.upsample_2d(x, 2)
        x = tflearn.conv_2d(x, 1, 5, activation='sigmoid')
        return x
    def create_actor_network(self):
        inputs = tflearn.input_data(shape=self.s_dim)
        net = tflearn.conv_2d(inputs, 8, 3, activation='relu', name='conv1')
        net = tflearn.conv_2d(net, 16, 3, activation='relu', name='conv2')
        net = tflearn.fully_connected(net, 256, activation='relu')
        # net = tflearn.fully_connected(net, 300, activation='relu')
        # Final layer weights are init to Uniform[-3e-3, 3e-3]
        # w_init = tflearn.initializations.uniform(minval=-0.003, maxval=0.003)
        # out = tflearn.fully_connected(net, self.a_dim, activation='softmax', weights_init=w_init)
        out = tflearn.fully_connected(net, self.a_dim, activation='softmax')

        return inputs, out, out
 def sensor_network(self, input, feature_dim):
     # compress input state into feature dim
     net = tflearn.conv_2d(input, 128, 3, activation='relu', name='conv1')
     net = tflearn.layers.conv.max_pool_2d(net,
                                           2,
                                           strides=None,
                                           padding='same',
                                           name='MaxPool2D1')
     net = tflearn.conv_2d(net, 64, 2, activation='relu', name='conv2')
     net = tflearn.fully_connected(net, 128, activation='relu')
     out = tflearn.fully_connected(net, feature_dim, activation='relu')
     return out
 def _build_Q_network(self):
     trainable_params_start = len(tf.trainable_variables())
     inputs = tf.placeholder(tf.float32, [None, FLAGS.agent_history_length, self.dimensions["network_in_y"], self.dimensions["network_in_x"]])
     transposed_input = tf.transpose(inputs, [0, 2, 3, 1])
     conv1 = tflearn.conv_2d(transposed_input, 32, 8, strides=4, activation='relu')
     conv2 = tflearn.conv_2d(conv1, 64, 4, strides=2, activation='relu')
     conv3 = tflearn.conv_2d(conv2, 128, 3, strides=1, activation='relu')
     flatten = tflearn.fully_connected(conv2, 512, activation='relu')
     softmax = tflearn.fully_connected(flatten, self.num_actions)
     softmax = tf.div(softmax, tf.reduce_sum(softmax, reduction_indices=1))
     argmax = tf.argmax(softmax, dimension=1)
     return inputs, softmax, tf.trainable_variables()[trainable_params_start:], argmax
Beispiel #12
0
def construct_dnn():
    tf.reset_default_graph()
    tflearn.init_graph(num_cores=4, gpu_memory_fraction=0.5)
    tflearn.config.init_training_mode()
    img_aug = tflearn.ImageAugmentation()
    img_aug.add_random_90degrees_rotation()
    img_aug.add_random_flip_leftright()
    img_aug.add_random_flip_updown()
    input_layer = tflearn.input_data(shape=[None, 15, 15, 3], data_augmentation=img_aug)
    # block 1
    net = tflearn.conv_2d(input_layer, 256, 3, activation=None)
    net = tflearn.batch_normalization(net)
    net = tflearn.activation(net, activation='relu')
    # res block 1
    tmp = tflearn.conv_2d(net, 256, 3, activation=None)
    tmp = tflearn.batch_normalization(tmp)
    tmp = tflearn.activation(tmp, activation='relu')
    tmp = tflearn.conv_2d(tmp, 256, 3, activation=None)
    tmp = tflearn.batch_normalization(tmp)
    net = tflearn.activation(net + tmp, activation='relu')
    # res block 2
    tmp = tflearn.conv_2d(net, 256, 3, activation=None)
    tmp = tflearn.batch_normalization(tmp)
    tmp = tflearn.activation(tmp, activation='relu')
    tmp = tflearn.conv_2d(tmp, 256, 3, activation=None)
    tmp = tflearn.batch_normalization(tmp)
    net = tflearn.activation(net + tmp, activation='relu')
    # res block 3
    tmp = tflearn.conv_2d(net, 256, 3, activation=None)
    tmp = tflearn.batch_normalization(tmp)
    tmp = tflearn.activation(tmp, activation='relu')
    tmp = tflearn.conv_2d(tmp, 256, 3, activation=None)
    tmp = tflearn.batch_normalization(tmp)
    net = tflearn.activation(net + tmp, activation='relu')
    # res block 4
    tmp = tflearn.conv_2d(net, 256, 3, activation=None)
    tmp = tflearn.batch_normalization(tmp)
    tmp = tflearn.activation(tmp, activation='relu')
    tmp = tflearn.conv_2d(tmp, 256, 3, activation=None)
    tmp = tflearn.batch_normalization(tmp)
    net = tflearn.activation(net + tmp, activation='relu')
    # value head
    net = tflearn.conv_2d(net, 1, 1, activation=None)
    net = tflearn.batch_normalization(net)
    net = tflearn.activation(net, activation='relu')
    net = tflearn.fully_connected(net, 256, activation='relu')
    final = tflearn.fully_connected(net, 1, activation='tanh')
    # optmizer
    sgd = tflearn.optimizers.SGD(learning_rate=0.01, lr_decay=0.95, decay_step=200000)
    regression = tflearn.regression(final, optimizer=sgd, loss='mean_square',  metric='R2')
    model = tflearn.DNN(regression)
    return model
Beispiel #13
0
def net(input_image):
    ratios = [32, 16, 8, 4, 2, 1]
    conv_num = 8
    network = []
    for i in range(len(ratios)):
        network.append(avg_pool_2d(input_image, ratios[i]))

        # block_i_0, block_i_1, block_i_2
        for block in range(3):
            with tf.name_scope('block_%d_%d' % (i, block)):
                filter_size = 1 if (block + 1) % 3 == 0 else 3
                network[i] = tflearn.conv_2d(network[i],
                                             nb_filter=conv_num,
                                             filter_size=filter_size,
                                             weights_init='xavier',
                                             name='Conv_%d_%d' % (i, block))
                network[i] = tf.nn.batch_normalization(network[i], 0, 1.0, 0.0,
                                                       1.0, 1e-5, 'BatchNorm')
                network[i] = tflearn.activations.leaky_relu(network[i])

        if i == 0:
            network[i] = tflearn.upsample_2d(network[i], 2)
        else:
            upnet = tf.nn.batch_normalization(network[i - 1], 0, 1.0, 0.0, 1.0,
                                              1e-5, 'BatchNorm')
            downnet = tf.nn.batch_normalization(network[i], 0, 1.0, 0.0, 1.0,
                                                1e-5, 'BatchNorm')
            # join_i
            network[i] = tflearn.merge([upnet, downnet], 'concat', axis=3)
            # block_i_3, block_i_4, block_i_5
            for block in range(3, 6):
                with tf.name_scope('block_%d_%d' % (i, block)):
                    filter_size = 1 if (block + 1) % 3 == 0 else 3
                    network[i] = tflearn.conv_2d(network[i],
                                                 nb_filter=conv_num * i,
                                                 filter_size=filter_size,
                                                 weights_init='xavier',
                                                 name='Conv_%d_%d' %
                                                 (i, block))
                    network[i] = tf.nn.batch_normalization(
                        network[i], 0, 1.0, 0.0, 1.0, 1e-5, 'BatchNorm')
                    network[i] = tflearn.activations.leaky_relu(network[i])

            if i != len(ratios) - 1:
                network[i] = tflearn.upsample_2d(network[i], 2)

    network[len(ratios) - 1] = tflearn.conv_2d(network[len(ratios) - 1],
                                               nb_filter=3,
                                               filter_size=1,
                                               weights_init='xavier',
                                               name='Conv2d_out')
    return network[len(ratios) - 1]
    def create_actor_network(self):
        inputs = tflearn.input_data(
            shape=[None, self.s_dim[0], self.s_dim[1], 1])
        features = tflearn.conv_2d(inputs,
                                   16,
                                   3,
                                   activation='relu',
                                   name='conv1')
        features = tflearn.conv_2d(features,
                                   16,
                                   3,
                                   activation='relu',
                                   name='conv1')
        features = tflearn.conv_2d(features,
                                   16,
                                   3,
                                   activation='relu',
                                   name='conv1')

        # res
        features_res = tflearn.layers.core.flatten(features)

        net = tflearn.fully_connected(features_res, 128, activation='relu')
        for i in range(self.layers - 1):
            fc1 = tflearn.fully_connected(net, 64, activation='relu')
            fc2 = tflearn.fully_connected(fc1, 128)
            fc_res = tflearn.fully_connected(features_res, 128)
            net = tflearn.activation(tf.matmul(fc1, fc2.W) +
                                     tf.matmul(features_res, fc_res.W) +
                                     fc2.b + fc_res.b,
                                     activation='relu')
        net_res = tflearn.fully_connected(net, 64, activation='relu')

        # plain
        net_plain = tflearn.fully_connected(features, 128, activation='relu')
        net_plain = tflearn.fully_connected(net_plain, 64, activation='relu')

        # combine
        alpha = tf.Variable(np.random.randn(1) * 0.01, dtype=tf.float32)
        net = tf.add(tf.multiply(net_plain, alpha),
                     tf.multiply(net_res, (1 - alpha)))

        # Final layer weights are init to Uniform[-3e-3, 3e-3]
        w_init = tflearn.initializations.uniform(minval=-0.003, maxval=0.003)
        logits = tflearn.fully_connected(net,
                                         self.a_dim,
                                         activation='tanh',
                                         weights_init=w_init)
        actions_out = tf.reshape(tf.multinomial(logits, 1), [])
        log_prob = tf.log(tf.nn.softmax(logits))

        return inputs, actions_out, log_prob
Beispiel #15
0
def _builder(input_layer):
    net = tflearn.conv_2d(input_layer, 64, 3, strides=1, activation='relu')
    net = tflearn.max_pool_2d(net, 2, strides=2)
    net = tflearn.local_response_normalization(net)
    net = tflearn.conv_2d(net, 128, 3, activation='relu')
    net = tflearn.max_pool_2d(net, 2, strides=2)
    net = tflearn.local_response_normalization(net)
    net = tflearn.conv_2d(net, 256, 3, activation='relu')
    net = tflearn.conv_2d(net, 512, 3, activation='relu')
    net = tflearn.max_pool_2d(net, 2, strides=1)
    net = tflearn.local_response_normalization(net)

    return net
def build_dqn(num_actions, action_repeat):
    """
    Building a DQN.
    """
    inputs = tf.placeholder(tf.float32, [None, action_repeat, 84, 84])
    # Inputs shape: [batch, channel, height, width] need to be changed into
    # shape [batch, height, width, channel]
    net = tf.transpose(inputs, [0, 2, 3, 1])
    net = tflearn.conv_2d(net, 32, 8, strides=4, activation='relu')
    net = tflearn.conv_2d(net, 64, 4, strides=2, activation='relu')
    net = tflearn.fully_connected(net, 256, activation='relu')
    q_values = tflearn.fully_connected(net, num_actions)
    return inputs, q_values
Beispiel #17
0
def build_dqn(num_actions, action_repeat):
    """
    Building a DQN.
    """
    inputs = tf.placeholder(tf.float32, [None, action_repeat, 84, 84])
    # Inputs shape: [batch, channel, height, width] need to be changed into
    # shape [batch, height, width, channel]
    net = tf.transpose(inputs, [0, 2, 3, 1])
    net = tflearn.conv_2d(net, 32, 8, strides=4, activation='relu')
    net = tflearn.conv_2d(net, 64, 4, strides=2, activation='relu')
    net = tflearn.fully_connected(net, 256, activation='relu')
    q_values = tflearn.fully_connected(net, num_actions)
    return inputs, q_values
Beispiel #18
0
    def create_Q_network(self):
        inputs = tflearn.input_data(shape=self.s_dim)

        features = tflearn.conv_2d(inputs,
                                   8,
                                   1,
                                   activation='relu',
                                   name='conv1')
        features = tflearn.conv_2d(features,
                                   16,
                                   3,
                                   activation='relu',
                                   name='conv2')
        features = tflearn.layers.conv.max_pool_2d(features,
                                                   2,
                                                   strides=None,
                                                   padding='same',
                                                   name='MaxPool2D1')

        features = tflearn.conv_2d(features,
                                   8,
                                   3,
                                   activation='relu',
                                   name='conv3')

        # rnn
        features_rnn = tflearn.layers.core.flatten(features)
        fc1 = tflearn.fully_connected(features_rnn, 64)
        fc2 = tflearn.fully_connected(fc1, 32)
        fc_fb = tflearn.fully_connected(fc2, 64)

        net_rnn = tflearn.activation(tf.matmul(features_rnn, fc1.W) + fc1.b,
                                     activation='relu')
        for i in range(self.layers - 1):
            net_rnn = tflearn.activation(tf.matmul(net_rnn, fc2.W) + fc2.b,
                                         activation='relu')
            net_rnn = tflearn.activation(tf.matmul(net_rnn, fc_fb.W) +
                                         tf.matmul(features_rnn, fc1.W) +
                                         fc_fb.b + fc1.b,
                                         activation='relu')
        net_rnn = tflearn.activation(tf.matmul(net_rnn, fc2.W) + fc2.b,
                                     activation='relu')

        # plain
        net_plain = tflearn.fully_connected(net_rnn, 64, activation='relu')
        net_plain = tflearn.fully_connected(net_plain, 32, activation='relu')

        #  output
        net = tflearn.fully_connected(net_plain, 16, activation='relu')
        out = tflearn.fully_connected(net, self.a_dim, activation='tanh')
        return inputs, out
    def network(self, scope):
        # TODO: What is the best NN?
        if self.feature is None:
            indim = self.obs_dim
        else:
            indim = self.feature.num_features()

        obs = tf.placeholder(tf.float32, [None, indim],
                             name=self.name + "_obs")

        with tf.variable_scope(scope, reuse=tf.AUTO_REUSE):
            if self.conv:
                reshaped_obs = tf.reshape(obs, [-1, 105, 80, 3])
                net = tflearn.conv_2d(reshaped_obs,
                                      32,
                                      8,
                                      strides=4,
                                      activation='relu')
                net = tflearn.conv_2d(net, 64, 4, strides=2, activation='relu')
                out = tflearn.fully_connected(
                    net,
                    1,
                    weights_init=tflearn.initializations.uniform(minval=-0.003,
                                                                 maxval=0.003))
            else:
                net = tflearn.fully_connected(
                    obs,
                    self.n_units,
                    name='d1',
                    weights_init=tflearn.initializations.truncated_normal(
                        stddev=1.0 / float(indim)))
                net = tflearn.fully_connected(
                    net,
                    self.n_units,
                    name='d2',
                    weights_init=tflearn.initializations.truncated_normal(
                        stddev=1.0 / float(self.n_units)))
                net = tflearn.fully_connected(
                    net,
                    self.n_units,
                    name='d3',
                    weights_init=tflearn.initializations.truncated_normal(
                        stddev=1.0 / float(self.n_units)))
                # net = tflearn.layers.normalization.batch_normalization(net)
                # net = tf.contrib.layers.batch_norm(net)
                net = tflearn.activations.relu(net)

                w_init = tflearn.initializations.uniform(minval=-0.003,
                                                         maxval=0.003)
                out = tflearn.fully_connected(net, 1, weights_init=w_init)
        return obs, out
    def input_layer(self, INPUT):
        net_layer = tflearn.conv_2d(INPUT, 16 * self.conv_mult, 5, activation='relu')
        net_layer = tflearn.max_pool_2d(net_layer, 2)
        #net_layer = tflearn.dropout(net_layer, 0.8)

        net_layer = tflearn.conv_2d(net_layer, 32 * self.conv_mult, 3, activation='relu')
        net_layer = tflearn.max_pool_2d(net_layer, 2)
        #net_layer = tflearn.dropout(net_layer, 0.8)

        net_layer = tflearn.conv_2d(net_layer, 64 * self.conv_mult, 5, activation='relu')
        net_layer = tflearn.max_pool_2d(net_layer, 3)
        #net_layer = tflearn.dropout(net_layer, 0.8)

        return net_layer
def load_mnist_convnet_4(path):
    ## your code here
    input = tflearn.input_data(shape=[None, 28, 28, 1])
    cov1 = tflearn.conv_2d(input, nb_filter=128, filter_size=5, activation='sigmoid', name='conv_layer_1')
    max_pool1 = tflearn.max_pool_2d(cov1, 2, name='pool1')
    lrn = tflearn.local_response_normalization(max_pool1)
    cov2 = tflearn.conv_2d(lrn, nb_filter=64, filter_size=5, activation='sigmoid', name='conv_layer_2')
    max_pool2 = tflearn.max_pool_2d(cov2, 2, name='pool2')
    lrn2 = tflearn.local_response_normalization(max_pool2)
    cov3 = tflearn.conv_2d(lrn2, nb_filter=32, filter_size=5, activation='sigmoid', name='conv_layer_3')
    max_pool3 = tflearn.max_pool_2d(cov3, 2, name='pool3')
    fc1 = tflearn.fully_connected(max_pool3, 100, activation='sigmoid')
    fc2 = tflearn.fully_connected(fc1, 10, activation='softmax')
    model = tflearn.DNN(fc2)
Beispiel #22
0
 def __spawn_network(self):
     input_state_placeholder = tf.placeholder(tf.uint8, [
         None, self.cfg.agent_history_length, self.cfg.input_imgy,
         self.cfg.input_imgx
     ])
     input_state = tf.transpose(input_state_placeholder, [0, 2, 3, 1])
     input_state = tf.cast(input_state, tf.float32)
     net = conv_2d(input_state, 32, 8, strides=4, activation='relu')
     net = conv_2d(net, 64, 4, strides=2, activation='relu')
     net = conv_2d(net, 64, 3, strides=1, activation='relu')
     net = fully_connected(net, 512, activation='relu')
     Q_values_for_actions = fully_connected(
         net, self.wrapped_env.action_space_size)
     return input_state_placeholder, Q_values_for_actions
Beispiel #23
0
def Network_Conv(S_LEN, Input_LEN, length, class_num):

    data_len = Input_LEN * (S_LEN * 2 + 1)
    x = tf.placeholder(tf.float32, shape=[None, length, data_len], name='x')
    y_ = tf.placeholder(tf.int32, shape=[
        None,
    ], name='y_')
    x_reshape = tflearn.reshape(x, [-1, length, data_len, 1])

    print(x_reshape.shape)
    split_1 = tflearn.conv_2d(x_reshape[:, :, 0:Input_LEN * S_LEN, :],
                              64,
                              3,
                              activation='LeakyReLU',
                              restore=False)
    split_2 = tflearn.conv_2d(x_reshape[:, :, Input_LEN * S_LEN:Input_LEN *
                                        S_LEN * 2, :],
                              64,
                              3,
                              activation='LeakyReLU')
    split_3 = tflearn.conv_2d(x_reshape[:, :,
                                        Input_LEN * S_LEN * 2:data_len, :],
                              64,
                              3,
                              activation='LeakyReLU')
    print(split_1.shape)
    print(split_2.shape)
    print(split_3.shape)

    # dense_concat = tf.concat([split_1[:,:,:,np.newaxis],split_2[:,:,:,np.newaxis],split_3[:,:,:,np.newaxis]],axis = 2)
    dense_concat = tflearn.merge([split_1, split_2, split_3], 'concat', axis=2)
    # print dense_concat.shape

    cov = tflearn.conv_2d(dense_concat, 128, 3, activation='relu')
    #print type(cov)
    cov = tflearn.flatten(cov)
    #print cov.shape
    logits = tf.layers.dense(
        inputs=cov,
        units=256,
        activation=tf.nn.relu,
        kernel_initializer=tf.truncated_normal_initializer(stddev=0.01),
        kernel_regularizer=tf.contrib.layers.l2_regularizer(0.003))
    logits = tf.layers.dense(
        inputs=logits,
        units=class_num,
        activation=None,
        kernel_initializer=tf.truncated_normal_initializer(stddev=0.01),
        kernel_regularizer=tf.contrib.layers.l2_regularizer(0.003))
    return logits, x, y_
Beispiel #24
0
def yn_net():
    net = tflearn.input_data(shape=[None, img_rows, img_cols, 1]) #D = 256, 256
    net = tflearn.conv_2d(net,nb_filter=8,filter_size=3, activation='relu', name='conv0.1')
    net = tflearn.conv_2d(net,nb_filter=8,filter_size=3, activation='relu', name='conv0.2')
    net = tflearn.max_pool_2d(net, kernel_size = [2,2], name='maxpool0') #D = 128, 128
    net = tflearn.dropout(net,0.75,name='dropout0')
    net = tflearn.conv_2d(net,nb_filter=16,filter_size=3, activation='relu', name='conv1.1')
    net = tflearn.conv_2d(net,nb_filter=16,filter_size=3, activation='relu', name='conv1.2')
    net = tflearn.max_pool_2d(net, kernel_size = [2,2], name='maxpool1') #D = 64,  64
    net = tflearn.dropout(net,0.75,name='dropout0')
    net = tflearn.conv_2d(net,nb_filter=32,filter_size=3, activation='relu', name='conv2.1')
    net = tflearn.conv_2d(net,nb_filter=32,filter_size=3, activation='relu', name='conv2.2')
    net = tflearn.max_pool_2d(net, kernel_size = [2,2], name='maxpool2') #D = 32 by 32
    net = tflearn.dropout(net,0.75,name='dropout0')
    net = tflearn.conv_2d(net,nb_filter=32,filter_size=3, activation='relu', name='conv3.1')
    net = tflearn.conv_2d(net,nb_filter=32,filter_size=3, activation='relu', name='conv3.2')
    net = tflearn.max_pool_2d(net, kernel_size = [2,2], name='maxpool3') #D = 16 by 16
    net = tflearn.dropout(net,0.75,name='dropout0')
#    net = tflearn.conv_2d(net,nb_filter=64,filter_size=3, activation='relu', name='conv4.1')
#    net = tflearn.conv_2d(net,nb_filter=64,filter_size=3, activation='relu', name='conv4.2')
#    net = tflearn.max_pool_2d(net, kernel_size = [2,2], name='maxpool4') #D = 8 by 8
#    net = tflearn.dropout(net,0.75,name='dropout0')
    net = tflearn.fully_connected(net, n_units = 128, activation='relu', name='fc1')
    net = tflearn.fully_connected(net, 2, activation='sigmoid')
    net = tflearn.regression(net, optimizer='adam', learning_rate=0.001)
    model = tflearn.DNN(net, tensorboard_verbose=1,tensorboard_dir='/tmp/tflearn_logs/')
    return model
Beispiel #25
0
 def _build_dqn(self):
     s_transpose = tf.transpose(self.s_input, [0, 2, 3, 1])
     s_conv_1 = tflearn.conv_2d(s_transpose,
                                32,
                                8,
                                strides=4,
                                activation='relu')
     s_conv_2 = tflearn.conv_2d(s_conv_1,
                                64,
                                4,
                                strides=2,
                                activation='relu')
     s_fc = tflearn.fully_connected(s_conv_2, 256, activation='relu')
     q_values = tflearn.fully_connected(s_fc, self.action_dim)
     return s_transpose, q_values
Beispiel #26
0
def model(size=32, color_channels=3):
    convnet = input_data((None, size, size, color_channels))
    convnet = conv_2d(convnet, 16, 5, activation='relu')
    convnet = conv_2d(convnet, 16, 5, activation='relu')
    convnet = max_pool_2d(convnet, 2)
    convnet = conv_2d(convnet, 20, 3, activation='relu')
    convnet = max_pool_2d(convnet, 2)
    convnet = conv_2d(convnet, 20, 3, activation='relu')
    convnet = max_pool_2d(convnet, 2)
    convnet = fully_connected(convnet, 1024, 'relu')
    convnet = dropout(convnet, .9)
    convnet = fully_connected(convnet, 10, 'softmax')
    convnet = regression(convnet)

    return DNN(convnet, tensorboard_verbose=3)
Beispiel #27
0
    def build_dqn(self):
        '''Defines the value function model'''

        with tf.variable_scope(self.scope):
            self.inputs = tf.placeholder(tf.float32,
                                         [None] + list(INPUT_SHAPE))
            net = tf.transpose(self.inputs, [0, 2, 3, 1])
            net = tflearn.conv_2d(net, 32, 8, strides=4, activation='relu')
            net = tflearn.conv_2d(net, 64, 4, strides=2, activation='relu')
            net = tflearn.conv_2d(net, 64, 3, strides=1, activation='relu')
            net = tflearn.fully_connected(net, 512, activation='relu')
            self.qvals = tflearn.fully_connected(net,
                                                 self.num_actions,
                                                 activation='linear')
        return
Beispiel #28
0
def get_cnn_layers(save_model):
    input_layer = input_data(shape=[None, 28, 28, 1])
    dropout_layer = dropout(input_layer, keep_prob=0.5)
    net = conv_2d(dropout_layer, nb_filter=32, filter_size=(3, 3))
    net = max_pool_2d(net, kernel_size=(2, 2))
    net = conv_2d(net, nb_filter=32, filter_size=(3, 3))
    net = max_pool_2d(net, kernel_size=(2, 2))

    net = fully_connected(net, n_units=256)

    net = dropout(net, keep_prob=0.5)

    net = fully_connected(net, n_units=10, activation='softmax')

    return create_model(net, 'cnn', save_model)
Beispiel #29
0
def conv_net(input_shape, **kwargs):
    net = input_data(shape=input_shape)
    net = conv_2d(net, 32, 3, activation='relu',
                  regularizer="L2")  # filter_size=32, nb_filter=3
    net = max_pool_2d(net, 2)
    net = local_response_normalization(net)
    net = conv_2d(net, 64, 3, activation='relu', regularizer="L2")
    net = max_pool_2d(net, 2)
    net = local_response_normalization(net)
    net = fully_connected(net, 128, activation='tanh')
    net = dropout(net, 0.8)
    net = fully_connected(net, 256, activation='tanh')
    net = dropout(net, 0.8)
    net = fully_connected(net, 10, activation='softmax')
    return net
Beispiel #30
0
def DenseNet(network):

    # Growth Rate (12, 16, 32, ...)
    k = 3

    # Depth (40, 100, ...)
    L = 28
    nb_layers = int((L - 4) / 3)

    # Building DenseNet Network

    network = tflearn.conv_2d(network,
                              10,
                              4,
                              regularizer='L2',
                              weight_decay=0.0001)
    network = denseblock(network, nb_layers, k, dropout=drop_prob)
    network = denseblock(network, nb_layers, k, dropout=drop_prob)
    network = denseblock(network, nb_layers, k, dropout=drop_prob)
    network = tflearn.global_avg_pool(network)

    # Regression
    network = tflearn.fully_connected(network, 4, activation='softmax')

    return network
Beispiel #31
0
    def ConvNet2(tensorWidth, tensorHeight, tensorDepth, tb_verb=0):
        conv_net = input_data(
            shape=[None, tensorWidth, tensorHeight, tensorDepth])
        conv_net = conv_2d(conv_net,
                           nb_filter=32,
                           filter_size=5,
                           activation='relu',
                           bias=True)
        conv_net = batch_normalization(conv_net)
        conv_net = max_pool_2d(conv_net, 4)
        conv_net = dropout(conv_net, 0.5)
        conv_net = fully_connected(conv_net, 100, activation='relu')
        conv_net = dropout(conv_net, 0.5)

        conv_net = fully_connected(conv_net, 2, activation='softmax')
        conv_net = regression(
            conv_net,
            optimizer='sgd',
            learning_rate=0.01,
            loss='categorical_crossentropy',
        )

        model = tflearn.DNN(conv_net, tensorboard_verbose=tb_verb)

        return model
Beispiel #32
0
    def create_critic_network(self):
        inputs = tflearn.input_data(
            shape=[None, self.s_dim[0], self.s_dim[1], self.s_dim[2]])
        action = tflearn.input_data(shape=[None, self.a_dim])
        net = tflearn.conv_2d(inputs,
                              8,
                              8,
                              activation='relu',
                              name='critic_conv1')
        # net = tflearn.conv_2d(net, 8, 8, activation='relu', name='critic_conv2')
        net = tflearn.layers.normalization.batch_normalization(
            net, name='critic_BatchNormalization1')
        net = tflearn.fully_connected(net, 100, activation='relu')
        # net = tflearn.layers.normalization.batch_normalization (net, name='critic_BatchNormalization1')

        # Add the action tensor in the 2nd hidden layer
        # Use two temp layers to get the corresponding weights and biases
        t1 = tflearn.fully_connected(net, 50)
        t2 = tflearn.fully_connected(action, 50)

        net = tflearn.activation(tf.matmul(net, t1.W) +
                                 tf.matmul(action, t2.W) + t2.b,
                                 activation='relu')
        net = tflearn.layers.normalization.batch_normalization(
            net, name='critic_BatchNormalization2')
        # linear layer connected to 1 output representing Q(s,a)
        # Weights are init to Uniform[-3e-3, 3e-3]
        w_init = tflearn.initializations.uniform(minval=-0.003, maxval=0.003)
        out = tflearn.fully_connected(net, 1, weights_init=w_init)
        return inputs, action, out
Beispiel #33
0
    def ConvNetGS1(tensorWidth, tensorHeight, tensorDepth, tb_verb):
        aug = tflearn.ImageAugmentation()

        conv_net = input_data(
            shape=[None, tensorWidth, tensorHeight, tensorDepth],
            data_augmentation=aug)

        # hidden layer 1
        conv_net = conv_2d(conv_net,
                           nb_filter=29,
                           filter_size=9,
                           strides=1,
                           activation='relu',
                           name='conv_layer_1')
        conv_net = max_pool_2d(conv_net, 2)

        conv_net = fully_connected(conv_net,
                                   2,
                                   activation='softmax',
                                   name='output')
        conv_net = regression(conv_net,
                              optimizer='adam',
                              learning_rate=0.001,
                              loss='categorical_crossentropy',
                              name='targets')

        model = tflearn.DNN(conv_net, tensorboard_verbose=tb_verb)

        return model
Beispiel #34
0
def _model2():
    global yTest, img_aug
    tf.reset_default_graph()
    img_prep = ImagePreprocessing()
    img_prep.add_featurewise_zero_center()
    img_prep.add_featurewise_stdnorm()
    net = input_data(shape=[None, inputSize, inputSize, dim],
                 name='input',
                 data_preprocessing=img_prep,
                 data_augmentation=img_aug)
    n = 2
    j = 64
    '''
    net = tflearn.conv_2d(net, j, 3, regularizer='L2', weight_decay=0.0001)
    net = tflearn.residual_block(net, n, j)
    net = tflearn.residual_block(net, 1, j*2, downsample=True)
    net = tflearn.residual_block(net, n-1, j*2)
    net = tflearn.residual_block(net, 1, j*4, downsample=True)
    net = tflearn.residual_block(net, n-1, j*4)
    net = tflearn.residual_block(net, 1, j*8, downsample=True)
    net = tflearn.residual_block(net, n-1, j*8)
    net = tflearn.batch_normalization(net)
    net = tflearn.activation(net, 'relu')
    net = tflearn.global_avg_pool(net)
    '''
    net = tflearn.conv_2d(net, j, 7, strides = 2, regularizer='L2', weight_decay=0.0001)
    net = max_pool_2d(net, 2, strides=2)
    net = tflearn.residual_block(net, n, j)
    net = tflearn.residual_block(net, 1, j*2, downsample=True)
    net = tflearn.residual_block(net, n-1, j*2)
    net = tflearn.residual_block(net, 1, j*4, downsample=True)
    net = tflearn.residual_block(net, n-1, j*4)
    net = tflearn.residual_block(net, 1, j*8, downsample=True)
    net = tflearn.residual_block(net, n-1, j*8)
    net = tflearn.batch_normalization(net)
    net = tflearn.activation(net, 'relu')
    net = tflearn.global_avg_pool(net)
    net = tflearn.fully_connected(net, len(yTest[0]), activation='softmax')
    mom = tflearn.Momentum(0.1, lr_decay=0.1, decay_step=32000, staircase=True)
    net = tflearn.regression(net, optimizer=mom,
                     loss='categorical_crossentropy')
    model = tflearn.DNN(net, checkpoint_path='model2_resnet',
                max_checkpoints=10, tensorboard_verbose=3, clip_gradients=0.)
    model.load(_path)
    pred = model.predict(xTest)

    df = pd.DataFrame(pred)
    df.to_csv(_path + ".csv")

    newList = pred.copy()
    newList = convert2(newList)
    if _CSV: makeCSV(newList)
    pred = convert2(pred)
    pred = convert3(pred)
    yTest = convert3(yTest)
    print(metrics.confusion_matrix(yTest, pred))
    print(metrics.classification_report(yTest, pred))
    print('Accuracy', accuracy_score(yTest, pred))
    print()
    if _wrFile: writeTest(pred)
Beispiel #35
0
 def ResNet32(tensorWidth, tensorHeight, tensorDepth, tb_verb=0):
     n = 5
     net = tflearn.input_data(
         shape=[None, tensorWidth, tensorHeight, tensorDepth])
     net = tflearn.conv_2d(net,
                           16,
                           3,
                           regularizer='L2',
                           weight_decay=0.0001)
     net = tflearn.residual_block(net, n, 16)
     net = tflearn.residual_block(net, 1, 32, downsample=True)
     net = tflearn.residual_block(net, n - 1, 32)
     net = tflearn.residual_block(net, 1, 64, downsample=True)
     net = tflearn.residual_block(net, n - 1, 64)
     net = tflearn.batch_normalization(net)
     net = tflearn.activation(net, 'relu')
     net = tflearn.global_avg_pool(net)
     # Regression
     net = tflearn.fully_connected(net, 2, activation='softmax')
     mom = tflearn.Momentum(0.1,
                            lr_decay=0.1,
                            decay_step=32000,
                            staircase=True)
     net = tflearn.regression(net,
                              optimizer=mom,
                              loss='categorical_crossentropy')
     # Training
     model = tflearn.DNN(net,
                         checkpoint_path='model_resnet_bee1',
                         max_checkpoints=10,
                         tensorboard_verbose=tb_verb,
                         clip_gradients=0.)
     return model
Beispiel #36
0
def res_graph(X):
    img_prep = ImagePreprocessing()
    img_prep.add_featurewise_zero_center()
    img_prep.add_featurewise_stdnorm()
    img_aug = ImageAugmentation()
    img_aug.add_random_flip_leftright()
    img_aug.add_random_rotation(max_angle=25.)
    img_aug.add_random_crop([64, 64], padding=4)

    n = 5
    net = input_data(shape=[None, 64, 64, 3], data_preprocessing=img_prep, data_augmentation=img_aug)
    net = tflearn.conv_2d(net, 16, 3, regularizer='L2', weight_decay=0.0001)
    net = tflearn.residual_block(net, n, 16)
    net = tflearn.residual_block(net, 1, 32, downsample=True)
    net = tflearn.residual_block(net, n - 1, 32)
    net = tflearn.residual_block(net, 1, 64, downsample=True)
    net = tflearn.residual_block(net, n - 1, 64)
    net = tflearn.batch_normalization(net)
    net = tflearn.activation(net, 'relu')
    net = tflearn.global_avg_pool(net)
    net = tflearn.fully_connected(net, 2, activation='softmax')
    mom = tflearn.Momentum(0.1, lr_decay=0.1, decay_step=32000, staircase=True)
    net = tflearn.regression(net, optimizer=mom,
                             loss='categorical_crossentropy')
    model = tflearn.DNN(net)
    # rnn typo -> res
    model.load('model\\res\\jun_rnn_cat_dog.tflearn')
    res_result = model.predict(X)
    return res_result
	def Define():	# less deep
		img_aug = tflearn.data_augmentation.ImageAugmentation()
		img_aug.add_random_flip_leftright()
		img_aug.add_random_crop((48, 48),6)
		img_aug.add_random_rotation(max_angle=30.)		

		img_prep = tflearn.data_preprocessing.ImagePreprocessing()
		img_prep.add_featurewise_zero_center()
		img_prep.add_featurewise_stdnorm()

		n = 5

		network = tflearn.input_data(shape=[None, 48, 48, 1], data_augmentation=img_aug, data_preprocessing=img_prep) #48 x 48 grayscale
		network = tflearn.conv_2d(network, 16, 3, regularizer='L2', weight_decay=0.0001)
		network = tflearn.residual_block(network, n, 16)
		network = tflearn.residual_block(network, 1, 32, downsample=True)
		network = tflearn.residual_block(network, n-1, 32)
		network = tflearn.residual_block(network, 1, 64, downsample=True)
		network = tflearn.residual_block(network, n-1, 64)
		network = tflearn.batch_normalization(network)
		network = tflearn.activation(network, 'relu')
		network = tflearn.global_avg_pool(network)
		# Regression
		network = tflearn.fully_connected(network, 7, activation='softmax')

		return network	
Beispiel #38
0
    def build_network(self):
        """
        Build the convnet.
        Input is 48x48
        3072 nodes in fully connected layer
        """
        # Real-time data preprocessing
        img_prep = tflearn.ImagePreprocessing()
        img_prep.add_featurewise_zero_center(
            per_channel=True, mean=[0.53990436, 0.4405486, 0.39328504])

        # Real-time data augmentation
        img_aug = tflearn.ImageAugmentation()
        img_aug.add_random_flip_leftright()
        img_aug.add_random_crop([49, 49], padding=4)

        # Building Residual Network
        self.network = tflearn.input_data(shape=[None, 49, 49, 3],
                                          data_preprocessing=img_prep,
                                          data_augmentation=img_aug)
        self.network = tflearn.conv_2d(self.network,
                                       16,
                                       3,
                                       regularizer='L2',
                                       weight_decay=0.0001)
        self.network = tflearn.resnext_block(self.network, 5, 16, 32)
        self.network = tflearn.resnext_block(self.network,
                                             1,
                                             32,
                                             32,
                                             downsample=True)
        self.network = tflearn.resnext_block(self.network, 4, 32, 32)
        self.network = tflearn.resnext_block(self.network,
                                             1,
                                             64,
                                             32,
                                             downsample=True)
        self.network = tflearn.resnext_block(self.network, 4, 64, 32)
        self.network = tflearn.batch_normalization(self.network)
        self.network = tflearn.activation(self.network, 'relu')
        self.network = tflearn.global_avg_pool(self.network)
        # Regression
        self.network = tflearn.fully_connected(self.network,
                                               11,
                                               activation='softmax')
        opt = tflearn.Momentum(0.1,
                               lr_decay=0.1,
                               decay_step=32000,
                               staircase=True)
        self.network = tflearn.regression(self.network,
                                          optimizer=opt,
                                          loss='categorical_crossentropy')
        # Training
        self.model = tflearn.DNN(self.network,
                                 checkpoint_path='Snapshots/model_resnext',
                                 max_checkpoints=10,
                                 tensorboard_verbose=0,
                                 tensorboard_dir='Logs/',
                                 clip_gradients=0.)
        self.load_model()
def create_Resnet(num_classes):
    # Residual blocks
    # 32 layers: n=5, 56 layers: n=9, 110 layers: n=18
    n = 9
    network = tflearn.input_data(
        shape=[None, 64, 64,
               3])  #, data_preprocessing=img_prep, data_augmentation=img_aug)
    network = tflearn.conv_2d(network,
                              16,
                              3,
                              regularizer='L2',
                              weight_decay=0.0001)
    network = tflearn.residual_block(network, n, 16)
    network = tflearn.residual_block(network, 1, 32, downsample=True)
    network = tflearn.residual_block(network, n - 1, 32)
    network = tflearn.residual_block(network, 1, 64, downsample=True)
    network = tflearn.residual_block(network, n - 1, 64)
    network = tflearn.batch_normalization(network)
    network = tflearn.activation(network, 'relu')
    network = tflearn.global_avg_pool(network)
    # Regression
    network = tflearn.fully_connected(network,
                                      num_classes,
                                      activation='softmax')
    mom = tflearn.Momentum(0.1, lr_decay=0.1, decay_step=32000, staircase=True)
    network = tflearn.regression(network,
                                 optimizer=mom,
                                 loss='categorical_crossentropy')
    return network
Beispiel #40
0
  def build_network(self):
    # 32 layers: n=5, 56 layers: n=9, 110 layers: n=18
    n = 5
    #https://github.com/tflearn/tflearn/blob/master/examples/images/residual_network_cifar10.py
    print('[+] Building RESIDUAL NETWORK')
    print ('[-] COLOR: ' + str(COLOR))
    print('[-] BATH_SIZE' + str(BATH_SIZE_CONSTANT))
    print('[-] EXPERIMENTAL_LABEL' + EXPERIMENTO_LABEL)

    self.network = input_data(shape=[None, SIZE_FACE, SIZE_FACE, COLOR])
    self.network = tflearn.conv_2d(self.network, 16, 3, regularizer='L2', weight_decay=0.0001)
    self.network = tflearn.residual_block(self.network, n, 16)
    self.network = tflearn.residual_block(self.network, 1, 32, downsample=True)
    self.network = tflearn.residual_block(self.network, n-1, 32)
    self.network = tflearn.residual_block(self.network, 1, 64, downsample=True)
    self.network = tflearn.residual_block(self.network, n-1, 64)
    self.network = tflearn.batch_normalization(self.network)
    self.network = tflearn.activation(self.network, 'relu')
    self.network = tflearn.global_avg_pool(self.network)
    # Regression
    self.network = tflearn.fully_connected(self.network, len(EMOTIONS), activation='softmax')
    self.mom = tflearn.Momentum(0.1, lr_decay=0.1, decay_step=32000, staircase=True)
    self.network = tflearn.regression(self.network, optimizer=self.mom,
                                      loss='categorical_crossentropy')

    self.model = tflearn.DNN(
      self.network,
      checkpoint_path = CHECKPOINT_DIR,
      max_checkpoints = 1,
      tensorboard_dir = TENSORBOARD_DIR,
      #best_checkpoint_path = CHECKPOINT_DIR_BEST,
      tensorboard_verbose = 1
    )
    self.load_model()
Beispiel #41
0
    def LoadModel(self):
        self.window_height = 16
        self.window_width = 8
        self.threshold = 0.03
        self.ground_height = 2
        symbol_count = 13

        network = input_data(
            shape=[None, self.window_height, self.window_width, symbol_count])
        network = conv_2d(network, 16, 2, activation='leaky_relu')
        network = max_pool_2d(network, 2)
        network = local_response_normalization(network)
        network = fully_connected(network,
                                  self.window_height * symbol_count,
                                  activation='relu')
        network = tf.reshape(network, [-1, self.window_height, symbol_count])
        network = regression(network,
                             optimizer='adagrad',
                             learning_rate=0.005,
                             loss='mean_square',
                             name='target',
                             batch_size=64)

        self.model = tflearn.DNN(network)
        self.model.load('./CNNmodel/model.tfl')
    def create_Q_network(self):
        # inputs = tf.placeholder(tf.float32, shape = ([None] + list(self.s_dim)))
        # net = layers.convolution2d(inputs, num_outputs=32, kernel_size=3, stride=1, padding = 'SAME', activation_fn=tf.nn.relu)
        # net = layers.max_pool2d(net, kernel_size=[2, 2], padding = 'SAME')
        # net = layers.convolution2d(net, num_outputs=64, kernel_size=2, stride=1, padding = 'SAME', activation_fn=tf.nn.relu)
        # net = layers.flatten(net)
        # net = layers.fully_connected(net, num_outputs=256, activation_fn=tf.nn.relu)
        # out = layers.fully_connected(net, num_outputs=self.a_dim, activation_fn=None)

        inputs = tflearn.input_data(shape=self.s_dim)
        net = tflearn.conv_2d(inputs, 32, 3, activation='relu', name='conv1')
        net = tflearn.layers.conv.max_pool_2d (net, 2, strides=None, padding='same', name='MaxPool2D1')
        net = tflearn.conv_2d(inputs, 64, 2, activation='relu', name='conv2')
        net = tflearn.fully_connected(net, 256, activation='relu')
        out = tflearn.fully_connected(net, self.a_dim)
        return inputs, out
    def run(self):

        # Real-time pre-processing of the image data
        img_prep = ImagePreprocessing()
        img_prep.add_featurewise_zero_center()
        img_prep.add_featurewise_stdnorm()

        # Real-time data augmentation
        img_aug = tflearn.ImageAugmentation()
        img_aug.add_random_flip_leftright()

        # Resnet model below:  Adapted from tflearn website
        self.n = 5 #32 layer resnet

        # Building Residual Network
        net = tflearn.input_data(shape=[None, 48, 48, 1], data_preprocessing=img_prep, data_augmentation=img_aug)
        net = tflearn.conv_2d(net, nb_filter=16, filter_size=3, regularizer='L2', weight_decay=0.0001)
        net = tflearn.residual_block(net, self.n, 16)
        net = tflearn.residual_block(net, 1, 32, downsample=True)
        net = tflearn.residual_block(net, self.n - 1, 32)
        net = tflearn.residual_block(net, 1, 64, downsample=True)
        net = tflearn.residual_block(net, self.n - 1, 64)
        net = tflearn.batch_normalization(net)
        net = tflearn.activation(net, 'relu')
        net = tflearn.global_avg_pool(net)

        # Regression
        net = tflearn.fully_connected(net, 7, activation='softmax')
        mom = tflearn.Momentum(learning_rate=0.1, lr_decay=0.0001, decay_step=32000, staircase=True, momentum=0.9)
        net = tflearn.regression(net, optimizer=mom,
                                 loss='categorical_crossentropy')

        self.model = tflearn.DNN(net, checkpoint_path='models/model_resnet_emotion',
                            max_checkpoints=10, tensorboard_verbose=0,
                            clip_gradients=0.)

        self.model.load('model.tfl')

        face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
        cap = cv2.VideoCapture(0)

        #Main Loop where we will be capturing live webcam feed, crop image and process the image for emotion recognition on trained model
        while True:
            ret, img = cap.read()
            gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
            faces = face_cascade.detectMultiScale(gray, 1.3, 5)
            for (x, y, w, h) in faces:
                cv2.rectangle(img, (x, y), (x + w, y + h), (255, 0, 0), 2)
                roi_gray = gray[y:y + h, x:x + w]
                roi_color = img[y:y + h, x:x + w]
                self.image_processing(roi_gray, img)
            if cv2.waitKey(1) & 0xFF == ord('q'):
                break

        cap.release()
        cv2.destroyAllWindows()
Beispiel #44
0
    def test_feed_dict_no_None(self):

        X = [[0., 0., 0., 0.], [1., 1., 1., 1.], [0., 0., 1., 0.], [1., 1., 1., 0.]]
        Y = [[1., 0.], [0., 1.], [1., 0.], [0., 1.]]

        with tf.Graph().as_default():
            g = tflearn.input_data(shape=[None, 4], name="X_in")
            g = tflearn.reshape(g, new_shape=[-1, 2, 2, 1])
            g = tflearn.conv_2d(g, 4, 2)
            g = tflearn.conv_2d(g, 4, 1)
            g = tflearn.max_pool_2d(g, 2)
            g = tflearn.fully_connected(g, 2, activation='softmax')
            g = tflearn.regression(g, optimizer='sgd', learning_rate=1.)

            m = tflearn.DNN(g)

            def do_fit():
                m.fit({"X_in": X, 'non_existent': X}, Y, n_epoch=30, snapshot_epoch=False)
            self.assertRaisesRegexp(Exception, "Feed dict asks for variable named 'non_existent' but no such variable is known to exist", do_fit)
def decoder(inputs):
    net = tflearn.fully_connected(inputs, 1200 * 32, name='DecFC1')
    net = tflearn.batch_normalization(net, name='DecBN1')
    net = tflearn.elu(net)
    print "========================"
    print "dec-L1",net.get_shape()
    print "========================"

    net = tflearn.reshape(net, (-1, side1 // 2**2, side2 // 2**2, 32))
    net = tflearn.conv_2d(net, 32, 3, name='DecConv1')
    net = tflearn.batch_normalization(net, name='DecBN2')
    net = tflearn.elu(net)
    print "========================"
    print "dec-L2",net.get_shape()
    print "========================"

    net = tflearn.conv_2d_transpose(net, 16, 3, [side1 // 2, side2 // 2],
                                        strides=2, padding='same', name='DecConvT1')
    net = tflearn.batch_normalization(net, name='DecBN3')
    net = tflearn.elu(net)
    print "========================"
    print "dec-L3",net.get_shape()
    print "========================"

    net = tflearn.conv_2d(net, 16, 3, name='DecConv2')
    net = tflearn.batch_normalization(net, name='DecBN4')
    net = tflearn.elu(net)
    print "========================"
    print "dec-L4",net.get_shape()
    print "========================"

    net = tflearn.conv_2d_transpose(net, channel, 3, [side1, side2],
                                        strides=2, padding='same', activation='sigmoid',
                                        name='DecConvT2')
    decode_layer = net
    print "========================"
    print "output layer",net.get_shape()
    print "========================"

    return [net,decode_layer]
def encoder(inputs,hidden_layer):
    net = tflearn.conv_2d(inputs, 16, 3, strides=2)
    net = tflearn.batch_normalization(net)
    net = tflearn.elu(net)
    print "========================"
    print "enc-L1",net.get_shape()
    print "========================"

    net = tflearn.conv_2d(net, 16, 3, strides=1)
    net = tflearn.batch_normalization(net)
    net = tflearn.elu(net)
    print "========================"
    print "enc-L2",net.get_shape()
    print "========================"

    net = tflearn.conv_2d(net, 32, 3, strides=2)
    net = tflearn.batch_normalization(net)
    net = tflearn.elu(net)
    print "========================"
    print "enc-L3",net.get_shape()
    print "========================"
    net = tflearn.conv_2d(net, 32, 3, strides=1)
    net = tflearn.batch_normalization(net)
    net = tflearn.elu(net)
    print "========================"
    print "enc-L4",net.get_shape()
    print "========================"
    net = tflearn.flatten(net)
    #net = tflearn.fully_connected(net, nb_feature,activation="sigmoid")
    net = tflearn.fully_connected(net, nb_feature)
    hidden_layer = net
    net = tflearn.batch_normalization(net)
    net = tflearn.sigmoid(net)
    print "========================"
    print "hidden",net.get_shape()
    print "========================"

    return [net,hidden_layer]
Beispiel #47
0
def _model2():
    global yTest, img_aug
    tf.reset_default_graph()
    img_prep = ImagePreprocessing()
    img_prep.add_featurewise_zero_center()
    img_prep.add_featurewise_stdnorm()
    net = input_data(shape=[None, inputSize, inputSize, dim],
                 name='input',
                 data_preprocessing=img_prep,
                 data_augmentation=img_aug)
    n = 3
    j = 64
    '''
    net = tflearn.conv_2d(net, j, 3, regularizer='L2', weight_decay=0.0001)
    net = tflearn.residual_block(net, n, j)
    net = tflearn.residual_block(net, 1, j*2, downsample=True)
    net = tflearn.residual_block(net, n-1, j*2)
    net = tflearn.residual_block(net, 1, j*4, downsample=True)
    net = tflearn.residual_block(net, n-1, j*4)
    net = tflearn.residual_block(net, 1, j*8, downsample=True)
    net = tflearn.residual_block(net, n-1, j*8)
    net = tflearn.batch_normalization(net)
    net = tflearn.activation(net, 'relu')
    net = tflearn.global_avg_pool(net)
    '''
    net = tflearn.conv_2d(net, j, 7, strides = 2, regularizer='L2', weight_decay=0.0001)
    net = max_pool_2d(net, 2, strides=2)
    net = tflearn.residual_block(net, n, j)
    net = tflearn.residual_block(net, 1, j*2, downsample=True)
    net = tflearn.residual_block(net, n-1, j*2)
    net = tflearn.residual_block(net, 1, j*4, downsample=True)
    net = tflearn.residual_block(net, n-1, j*4)
    net = tflearn.residual_block(net, 1, j*8, downsample=True)
    net = tflearn.residual_block(net, n-1, j*8)
    net = tflearn.batch_normalization(net)
    net = tflearn.activation(net, 'relu')
    net = tflearn.global_avg_pool(net)
    net = tflearn.fully_connected(net, len(Y[0]), activation='softmax')
    mom = tflearn.Momentum(0.1, lr_decay=0.1, decay_step=32000, staircase=True)
    net = tflearn.regression(net, optimizer=mom,
                     loss='categorical_crossentropy')
    model = tflearn.DNN(net, checkpoint_path='model_resnet_cifar10',
                max_checkpoints=10, tensorboard_verbose=3, clip_gradients=0.)
    model.fit(X, Y, n_epoch=epochNum, validation_set=(xTest, yTest),snapshot_epoch=False,
              snapshot_step=500, show_metric=True, batch_size=batchNum, shuffle=True, run_id= _id + 'artClassification')
    if modelStore: model.save(_id + '-model.tflearn')
Beispiel #48
0
 def build_residual_network(self, network, res_n=5):
     #     data_augmentation=self.generate_image_augumentation())
     network = tflearn.conv_2d(network, 16, 3, regularizer='L2',
                               weight_decay=0.0001)
     network = tflearn.residual_block(network, res_n, 16)
     network = tflearn.residual_block(network, 1, 32, downsample=True)
     network = tflearn.residual_block(network, res_n - 1, 32)
     network = tflearn.residual_block(network, 1, 64, downsample=True)
     network = tflearn.residual_block(network, res_n - 1, 64)
     network = tflearn.batch_normalization(network)
     network = tflearn.activation(network, 'relu')
     network = tflearn.global_avg_pool(network)
     # Regression
     network = tflearn.fully_connected(network, 2, activation='softmax')
     mom = tflearn.Momentum(0.1, lr_decay=0.1, decay_step=32000,
                            staircase=True)
     network = tflearn.regression(network, optimizer=mom,
                                  loss='categorical_crossentropy')
     return network
Beispiel #49
0
def resnext(width, height, frame_count, lr, output=9, model_name = 'sentnet_color.model'):
    net = input_data(shape=[None, width, height, 3], name='input')
    net = tflearn.conv_2d(net, 16, 3, regularizer='L2', weight_decay=0.0001)
    net = tflearn.layers.conv.resnext_block(net, n, 16, 32)
    net = tflearn.resnext_block(net, 1, 32, 32, downsample=True)
    net = tflearn.resnext_block(net, n-1, 32, 32)
    net = tflearn.resnext_block(net, 1, 64, 32, downsample=True)
    net = tflearn.resnext_block(net, n-1, 64, 32)
    net = tflearn.batch_normalization(net)
    net = tflearn.activation(net, 'relu')
    net = tflearn.global_avg_pool(net)
    # Regression
    net = tflearn.fully_connected(net, output, activation='softmax')
    opt = tflearn.Momentum(0.1, lr_decay=0.1, decay_step=32000, staircase=True)
    net = tflearn.regression(net, optimizer=opt,
                             loss='categorical_crossentropy')

    model = tflearn.DNN(net,
                        max_checkpoints=0, tensorboard_verbose=0, tensorboard_dir='log')

    return model
    # One hot encode the labels
    Y = tflearn.data_utils.to_categorical(Y, 7)
    Y_test = tflearn.data_utils.to_categorical(Y_test, 7)

    # Real-time preprocessing of the image data
    img_prep = ImagePreprocessing()
    img_prep.add_featurewise_zero_center()
    img_prep.add_featurewise_stdnorm()

    # Real-time data augmentation
    img_aug = tflearn.ImageAugmentation()
    img_aug.add_random_flip_leftright()

    # Building Residual Network
    net = tflearn.input_data(shape=[None, 48, 48, 1], data_preprocessing=img_prep, data_augmentation=img_aug)
    net = tflearn.conv_2d(net, nb_filter=16, filter_size=3, regularizer='L2', weight_decay=0.0001)
    net = tflearn.residual_block(net, n, 16)
    net = tflearn.residual_block(net, 1, 32, downsample=True)
    net = tflearn.residual_block(net, n-1, 32)
    net = tflearn.residual_block(net, 1, 64, downsample=True)
    net = tflearn.residual_block(net, n-1, 64)
    net = tflearn.batch_normalization(net)
    net = tflearn.activation(net, 'relu')
    net = tflearn.global_avg_pool(net)

    # Regression
    net = tflearn.fully_connected(net, 7, activation='softmax')
    mom = tflearn.Momentum(learning_rate=0.1, lr_decay=0.0001, decay_step=32000, staircase=True, momentum=0.9)
    net = tflearn.regression(net, optimizer=mom,
                             loss='categorical_crossentropy')
    # Training
import tflearn
from tflearn.data_utils import image_preloader

# Residual blocks
# 32 layers: n=5, 56 layers: n=9, 110 layers: n=18
n = 5

# Data loading
X,Y = image_preloader('files_list', image_shape = (224,224),mode='file',categorical_labels=True,normalize=True,files_extension=['.jpg', '.png'])


# Building Residual Network
net = tflearn.input_data(shape=[None, 224, 224, 3], name='input',)

net = tflearn.conv_2d(net, 64, 7,strides=2, regularizer='L2', weight_decay=0.0001)
net = tflearn.residual_block(net, n, 16)
net = tflearn.residual_block(net, 1, 32, downsample=True)
net = tflearn.residual_block(net, n-1, 32)
net = tflearn.residual_block(net, 1, 64, downsample=True)
net = tflearn.residual_block(net, n-1, 64)
net = tflearn.batch_normalization(net)
net = tflearn.activation(net, 'relu')
net = tflearn.global_avg_pool(net)
# Regression
net = tflearn.fully_connected(net, 10, activation='softmax')
mom = tflearn.Momentum(0.1, lr_decay=0.1, decay_step=32000, staircase=True)
net = tflearn.regression(net, optimizer=mom,
                         loss='categorical_crossentropy')
# Training
model = tflearn.DNN(net, checkpoint_path='model_resnet_cifar10',
Beispiel #52
0
# --------------------------------------

# Using MNIST Dataset
import tflearn.datasets.mnist as mnist
mnist_data = mnist.read_data_sets(one_hot=True)

# User defined placeholders
with tf.Graph().as_default():
    # Placeholders for data and labels
    X = tf.placeholder(shape=(None, 784), dtype=tf.float32)
    Y = tf.placeholder(shape=(None, 10), dtype=tf.float32)

    net = tf.reshape(X, [-1, 28, 28, 1])

    # Using TFLearn wrappers for network building
    net = tflearn.conv_2d(net, 32, 3, activation='relu')
    net = tflearn.max_pool_2d(net, 2)
    net = tflearn.local_response_normalization(net)
    net = tflearn.dropout(net, 0.8)
    net = tflearn.conv_2d(net, 64, 3, activation='relu')
    net = tflearn.max_pool_2d(net, 2)
    net = tflearn.local_response_normalization(net)
    net = tflearn.dropout(net, 0.8)
    net = tflearn.fully_connected(net, 128, activation='tanh')
    net = tflearn.dropout(net, 0.8)
    net = tflearn.fully_connected(net, 256, activation='tanh')
    net = tflearn.dropout(net, 0.8)
    net = tflearn.fully_connected(net, 10, activation='softmax')

    # Defining other ops using Tensorflow
    loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(net, Y))
Beispiel #53
0
def run():
    net = tflearn.input_data(shape=[None, 224, 224, 3])

    net = tflearn.conv_2d(net, 64, 3, activation='relu')
    net = tflearn.conv_2d(net, 64, 3, activation='relu')
    net = tflearn.max_pool_2d(net, 2)

    net = tflearn.conv_2d(net, 128, 3, activation='relu')
    net = tflearn.conv_2d(net, 128, 3, activation='relu')
    net = tflearn.max_pool_2d(net, 2)

    net = tflearn.conv_2d(net, 256, 3, activation='relu')
    net = tflearn.conv_2d(net, 256, 3, activation='relu')
    net = tflearn.conv_2d(net, 256, 3, activation='relu')
    net = tflearn.max_pool_2d(net, 2)

    net = tflearn.conv_2d(net, 512, 3, activation='relu')
    net = tflearn.conv_2d(net, 512, 3, activation='relu')
    net = tflearn.conv_2d(net, 512, 3, activation='relu')
    net = tflearn.max_pool_2d(net, 2)

    net = tflearn.conv_2d(net, 512, 3, activation='relu')
    net = tflearn.conv_2d(net, 512, 3, activation='relu')
    net = tflearn.conv_2d(net, 512, 3, activation='relu')
    net = tflearn.max_pool_2d(net, 2)

    net = tflearn.fully_connected(net, 4096, activation='relu')
    net = tflearn.dropout(net, 0.5)
    net = tflearn.fully_connected(net, 4096, activation='relu')
    net = tflearn.dropout(net, 0.5)
    net = tflearn.fully_connected(net, 17, activation='softmax')

    net = tflearn.regression(net, optimizer='rmsprop',
                     loss='categorical_crossentropy',
                     learning_rate=0.001)

    m = tflearn.DNN(net, checkpoint_path='models/vgg_net',
                    max_checkpoints=1, tensorboard_verbose=3)
    m.fit(X, Y, n_epoch=500, shuffle=True,
          show_metric=True, batch_size=32, snapshot_step=500,
          snapshot_epoch=False, run_id='vgg_net')
    m.save('models/vgg_net.tfl')
Beispiel #54
0
from __future__ import division, print_function, absolute_import

import tflearn
import tflearn.data_utils as du

# Data loading and preprocessing
import tflearn.datasets.mnist as mnist
X, Y, testX, testY = mnist.load_data(one_hot=True)
X = X.reshape([-1, 28, 28, 1])
testX = testX.reshape([-1, 28, 28, 1])
X, mean = du.featurewise_zero_center(X)
testX = du.featurewise_zero_center(testX, mean)

# Building Residual Network
net = tflearn.input_data(shape=[None, 28, 28, 1])
net = tflearn.conv_2d(net, 64, 3, activation='relu', bias=False)
net = tflearn.batch_normalization(net)
# Residual blocks
net = tflearn.deep_residual_block(net, 3, 16, 64)
net = tflearn.deep_residual_block(net, 1, 32, 128, downsample=True)
net = tflearn.deep_residual_block(net, 3, 32, 128)
net = tflearn.deep_residual_block(net, 1, 64, 256, downsample=True)
net = tflearn.deep_residual_block(net, 3, 64, 256)
net = tflearn.global_avg_pool(net)
# Regression
net = tflearn.fully_connected(net, 10, activation='softmax')
sgd = tflearn.SGD(learning_rate=0.1, lr_decay=0.96, decay_step=300)
net = tflearn.regression(net, optimizer='sgd',
                         loss='categorical_crossentropy',
                         learning_rate=0.1)
# Training
import tflearn as tl
import tflearn.data_utils as du

if __name__ == "__main__":
	parser = argparse.ArgumentParser()
	parser.add_argument('--t', action='store', dest='test_path', type=str, help='Test Data Path')
	config = parser.parse_args()

	#Load Test data
	image_count = (3,6)
	patch_count = 20
	X = generate_patches(img2numpy_arr(config.test_path), image_count, patch_count)

	# Building Residual Network
	net = tl.input_data(shape=[None, 42, 42, 3])
	net = tl.conv_2d(net, 32, 3)
	net = tl.batch_normalization(net)
	net = tl.activation(net, 'relu')
	net = tl.shallow_residual_block(net, 4, 32, regularizer='L2')
	net = tl.shallow_residual_block(net, 1, 32, downsample=True,
												 regularizer='L2')
	net = tl.shallow_residual_block(net, 4, 64, regularizer='L2')
	net = tl.shallow_residual_block(net, 1, 64, downsample=True,
												 regularizer='L2')
	net = tl.shallow_residual_block(net, 5, 64, regularizer='L2')
	net = tl.global_avg_pool(net)
	
	# Regression
	net = tl.fully_connected(net, 9, activation='softmax')
	mom = tl.Momentum(0.1, lr_decay=0.1, decay_step=16000, staircase=True)
	net = tl.regression(net, optimizer=mom,
import tflearn.data_utils as du

# Data loading
from tflearn.datasets import cifar10
(X, Y), (testX, testY) = cifar10.load_data()
# Data pre-processing
X, mean = du.featurewise_zero_center(X)
X, std = du.featurewise_std_normalization(X)
testX = du.featurewise_zero_center(testX, mean)
testX = du.featurewise_std_normalization(testX, std)
Y = du.to_categorical(Y, 10)
testY = du.to_categorical(testY, 10)

# Building Residual Network
net = tflearn.input_data(shape=[None, 32, 32, 3])
net = tflearn.conv_2d(net, 32, 3)
net = tflearn.batch_normalization(net)
net = tflearn.activation(net, 'relu')
net = tflearn.shallow_residual_block(net, 4, 32, regularizer='L2')
net = tflearn.shallow_residual_block(net, 1, 32, downsample=True,
                                     regularizer='L2')
net = tflearn.shallow_residual_block(net, 4, 64, regularizer='L2')
net = tflearn.shallow_residual_block(net, 1, 64, downsample=True,
                                     regularizer='L2')
net = tflearn.shallow_residual_block(net, 5, 128, regularizer='L2')
net = tflearn.global_avg_pool(net)
# Regression
net = tflearn.fully_connected(net, 10, activation='softmax')
mom = tflearn.Momentum(0.1, lr_decay=0.1, decay_step=16000, staircase=True)
net = tflearn.regression(net, optimizer=mom,
                         loss='categorical_crossentropy')
Beispiel #57
0
def get_cae():
	# import tensorflow as tf
	# with tf.device('/gpu:1'):
	encoder = tflearn.input_data(shape=[None, 256, 256, 20], name='input')
	# encoder = encoder/255.0
	num_filter = 10*20
	encoder = tflearn.conv_2d(encoder, 20, 7, activation='relu', regularizer='L1')
	# encoder = tflearn.conv_2d(encoder, 20, 3, activation='relu', regularizer='L1')
	

	encoder = tflearn.conv_2d(encoder, num_filter*1, 3, activation='relu', regularizer='L1')
	encoder = tflearn.residual_block(encoder, 2, num_filter*1, batch_norm=False, regularizer='L1')
	scale_0 = encoder
	# encoder = tflearn.layers.normalization.batch_normalization(encoder)
	encoder = tflearn.max_pool_2d(encoder, 2)
	# encoder = tflearn.dropout(encoder, 0.75)


	encoder = tflearn.conv_2d(encoder, num_filter*2, 3, activation='relu', regularizer='L1')
	encoder = tflearn.residual_block(encoder, 2, num_filter*2, batch_norm=False, regularizer='L1')
	scale_1 = encoder
	# encoder = tflearn.layers.normalization.batch_normalization(encoder)
	encoder = tflearn.max_pool_2d(encoder, 2)
	# encoder = tflearn.dropout(encoder, 0.75)


	encoder = tflearn.conv_2d(encoder, num_filter*4, 3, activation='relu', regularizer='L1')
	encoder = tflearn.residual_block(encoder, 2, num_filter*4, batch_norm=False, regularizer='L1')
	scale_2 = encoder
	# encoder = tflearn.layers.normalization.batch_normalization(encoder)
	encoder = tflearn.max_pool_2d(encoder, 2)
	# encoder = tflearn.dropout(encoder, 0.75)
	

	encoder = tflearn.conv_2d(encoder, num_filter*8, 3, activation='relu', regularizer='L1')
	encoder = tflearn.residual_block(encoder, 2, num_filter*8, batch_norm=False, regularizer='L1')
	scale_3 = encoder
	# encoder = tflearn.layers.normalization.batch_normalization(encoder)
	encoder = tflearn.max_pool_2d(encoder, 2)
	# encoder = tflearn.dropout(encoder, 0.75)


	
	encoder = tflearn.conv_2d(encoder, num_filter*12, 3, activation='relu', regularizer='L1')
	encoder = tflearn.residual_block(encoder, 2, num_filter*16, batch_norm=False, regularizer='L1')
	# encoder = tflearn.layers.normalization.batch_normalization(encoder)

	decoder = encoder
	# decoder = tflearn.conv_2d_transpose(decoder, 
	# 								 nb_filter=num_filter*12, 
	# 								 filter_size=3, 
	# 								 activation='relu',
	# 								 regularizer='L1',
	# 								 output_shape=[16, 16])
	



	# decoder = tflearn.upsample_2d(decoder, 2)
	decoder = tflearn.layers.conv.upscore_layer(decoder, 
						 num_classes=256, 
						 kernel_size=3, 
						 shape=[1, 32, 32, num_filter*8]
						 ) 
	decoder = tflearn.conv_2d(decoder, num_filter*8, 3, activation='relu', regularizer='L1')
	# decoder = tflearn.residual_block(decoder, 1, num_filter*8, batch_norm=False, regularizer='L1')
	# decoder = tflearn.conv_2d_transpose(decoder, 
	# 								 nb_filter=num_filter*8, 
	# 								 filter_size=3, 
	# 								 activation='relu', 
	# 								 regularizer='L1',
	# 								 output_shape=[32, 32])
	# decoder = tflearn.layers.normalization.batch_normalization(decoder)
	# decoder = decoder + scale_3
	decoder = merge([decoder, scale_3], mode='elemwise_sum', axis=3)

	
	# decoder = tflearn.dropout(decoder, 0.75)
	# decoder = tflearn.upsample_2d(decoder, 2)
	decoder = tflearn.layers.conv.upscore_layer(decoder, 
							 num_classes=256, 
							 kernel_size=3, 
							 shape=[1, 64, 64, num_filter*4]
							 ) 
	decoder = tflearn.conv_2d(decoder, num_filter*4, 3, activation='relu', regularizer='L1')
	# decoder = tflearn.residual_block(decoder, 1, num_filter*4, batch_norm=False, regularizer='L1')
	# decoder = tflearn.conv_2d_transpose(decoder, 
	# 								 nb_filter=num_filter*4, 
	# 								 filter_size=3, 
	# 								 activation='relu',
	# 								 regularizer='L1',
	# 								 output_shape=[64, 64])
	# decoder = tflearn.layers.normalization.batch_normalization(decoder)
	# decoder = decoder + scale_2
	decoder = merge([decoder, scale_2], mode='elemwise_sum', axis=3)
	# decoder = tflearn.dropout(decoder, 0.75)
	# decoder = tflearn.upsample_2d(decoder, 2)
	decoder = tflearn.layers.conv.upscore_layer(decoder, 
							 num_classes=256, 
							 kernel_size=3, 
							 shape=[1, 128, 128, num_filter*2]
							 ) 
	decoder = tflearn.conv_2d(decoder, num_filter*2, 3, activation='relu', regularizer='L1')
	# decoder = tflearn.residual_block(decoder, 1, num_filter*2, batch_norm=False, regularizer='L1')
	# decoder = tflearn.conv_2d_transpose(decoder, 
	# 								 nb_filter=num_filter*2, 
	# 								 filter_size=3, 
	# 								 activation='relu',
	# 								 regularizer='L1',
	# 								 output_shape=[128, 128])
	# decoder = tflearn.layers.normalization.batch_normalization(decoder)
	# decoder = decoder + scale_1
	decoder = merge([decoder, scale_1], mode='elemwise_sum', axis=3)
	# decoder = tflearn.dropout(decoder, 0.75)
	# decoder = tflearn.upsample_2d(decoder, 2)
	decoder = tflearn.layers.conv.upscore_layer(decoder, 
							 num_classes=256, 
							 kernel_size=3, 
							 shape=[1, 256, 256, num_filter*1]
							 ) 
	decoder = tflearn.conv_2d(decoder, num_filter*1, 3, activation='relu', regularizer='L1')
	# decoder = tflearn.residual_block(decoder, 1, num_filter*1, batch_norm=False, regularizer='L1')
	# decoder = tflearn.conv_2d_transpose(decoder, 
	# 								 nb_filter=num_filter*1, 
	# 								 filter_size=3, 
	# 								 activation='relu',
	# 								 regularizer='L1',
	# 								 output_shape=[256, 256])
	# decoder = tflearn.layers.normalization.batch_normalization(decoder)
	# decoder = decoder + scale_0
	decoder = merge([decoder, scale_0], mode='elemwise_sum', axis=3)
	# decoder = tflearn.dropout(decoder, 0.75) 
	
	decoder = tflearn.conv_2d(decoder, 20, 1, activation='relu', regularizer='L1')
	# decoder = tflearn.conv_2d_transpose(decoder, 
	# 								 nb_filter=20, 
	# 								 filter_size=3, 
	# 								 activation='relu',
	# 								 regularizer='L1',
	# 								 output_shape=[256, 256])
	
	# decoder = tf.round(decoder)
	decoder = tf.clip_by_value(decoder, 0, 255)
	
	return decoder
'''
Coding Just for Fun
Created by burness on 16/8/31.
'''
import tflearn
from tflearn.layers.estimator import regression

x = tflearn.input_data(shape=[None, 224, 224, 3], name='input',
                       placeholder=None)

x = tflearn.conv_2d(x, 64, 3, activation='relu', scope='conv1_1')
x = tflearn.conv_2d(x, 64, 3, activation='relu', scope='conv1_2')
x = tflearn.max_pool_2d(x, 2, strides=2, name='maxpool1')

x = tflearn.conv_2d(x, 128, 3, activation='relu', scope='conv2_1')
x = tflearn.conv_2d(x, 128, 3, activation='relu', scope='conv2_2')
x = tflearn.max_pool_2d(x, 2, strides=2, name='maxpool2')

x = tflearn.conv_2d(x, 256, 3, activation='relu', scope='conv3_1')
x = tflearn.conv_2d(x, 256, 3, activation='relu', scope='conv3_2')
x = tflearn.conv_2d(x, 256, 3, activation='relu', scope='conv3_3')
x = tflearn.max_pool_2d(x, 2, strides=2, name='maxpool3')

x = tflearn.conv_2d(x, 512, 3, activation='relu', scope='conv4_1')
x = tflearn.conv_2d(x, 512, 3, activation='relu', scope='conv4_2')
x = tflearn.conv_2d(x, 512, 3, activation='relu', scope='conv4_3')
x = tflearn.max_pool_2d(x, 2, strides=2, name='maxpool4')

x = tflearn.conv_2d(x, 512, 3, activation='relu', scope='conv5_1')
x = tflearn.conv_2d(x, 512, 3, activation='relu', scope='conv5_2')
x = tflearn.conv_2d(x, 512, 3, activation='relu', scope='conv5_3')
def run():
    X = tf.placeholder(shape=(None, 784), dtype=tf.float32)
    Y = tf.placeholder(shape=(None, 10), dtype=tf.float32)

    net = tf.reshape(X, [-1, 28, 28, 1])  # batch, height, width, chnl

    # 32 filters, each of size 3(x3)
    net = tflearn.conv_2d(net, 32, 3, activation='relu')
    # pool kernel size 2, stride size default kernel soze
    net = tflearn.max_pool_2d(net, 2)
    # for "encourage some kind of inhibition and boost the neurons with
    # relatively larger activations"
    net = tflearn.local_response_normalization(net)
    # The dropout method is introduced to prevent overfitting. At each training stage, individual nodes are either "dropped out" of the net with probability {\displaystyle 1-p} 1-p or kept with probability {\displaystyle p} p, so that a reduced network is left
    # keep_prob=0.8
    net = tflearn.dropout(net, 0.8)

    # 64 filters
    net = tflearn.conv_2d(net, 64, 3, activation='relu')
    net = tflearn.max_pool_2d(net, 2)
    net = tflearn.local_response_normalization(net)
    net = tflearn.dropout(net, 0.8)

    # FC
    net = tflearn.fully_connected(net, 128, activation='tanh')
    net = tflearn.dropout(net, 0.8)
    net = tflearn.fully_connected(net, 256, activation='tanh')
    net = tflearn.dropout(net, 0.8)
    net = tflearn.fully_connected(net, 10, activation='softmax')

    # --------------------------------------
    # really manual tf way
    # # Defining other ops using Tensorflow
    # loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(net, Y))
    # optimizer = tf.train.AdamOptimizer(learning_rate=0.01)
    # optimizer_minop = optimizer.minimize(loss)

    # # start
    # init = tf.initialize_all_variables()

    # with tf.Session() as sess:
    #     sess.run(init)
    #     batch_size = 128
    #     for epoch in range(2):
    #         avg_cost = 0.
    #         total_batch = int(mnist_data.train.num_examples/batch_size)
    #         for i in range(total_batch):
    #             batch_xs, batch_ys = mnist_data.train.next_batch(batch_size)
    #             sess.run(optimizer_minop, feed_dict={X: batch_xs, Y: batch_ys})
    #             cost = sess.run(loss, feed_dict={X: batch_xs, Y: batch_ys})
    #             avg_cost += cost/total_batch
    #             if i % 20 == 0:
    #                 print("Epoch:", '%03d' % (epoch+1), "Step:", '%03d' % i,
    #                       "Loss:", str(cost))

    # --------------------------------------
    # use trainer class
    loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(net, Y))
    optimizer = tf.train.AdamOptimizer(learning_rate=0.01)
    accuracy = tf.reduce_mean(tf.cast(
        tf.equal(tf.argmax(net, 1), tf.argmax(Y, 1)),
        tf.float32), name='acc')

    trainop = tflearn.TrainOp(loss=loss, optimizer=optimizer,
                              metric=accuracy, batch_size=128)

    trainer = tflearn.Trainer(train_ops=trainop, tensorboard_verbose=0)
    trainer.fit({X: trainX, Y: trainY}, val_feed_dicts={
                X: testX, Y: testY}, n_epoch=2, show_metric=True)
    trainer.save('models/mnist_cnn.tfl')
Beispiel #60
0
testY = tflearn.data_utils.to_categorical(testY, 10)

# Real-time data preprocessing
img_prep = tflearn.ImagePreprocessing()
img_prep.add_featurewise_zero_center(per_channel=True)

# Real-time data augmentation
img_aug = tflearn.ImageAugmentation()
img_aug.add_random_flip_leftright()
img_aug.add_random_crop([32, 32], padding=4)

# Building Residual Network
net = tflearn.input_data(shape=[None, 32, 32, 3],
                         data_preprocessing=img_prep,
                         data_augmentation=img_aug)
net = tflearn.conv_2d(net, 16, 3, regularizer='L2', weight_decay=0.0001)
net = tflearn.resnext_block(net, n, 16, 32)
net = tflearn.resnext_block(net, 1, 32, 32, downsample=True)
net = tflearn.resnext_block(net, n-1, 32, 32)
net = tflearn.resnext_block(net, 1, 64, 32, downsample=True)
net = tflearn.resnext_block(net, n-1, 64, 32)
net = tflearn.batch_normalization(net)
net = tflearn.activation(net, 'relu')
net = tflearn.global_avg_pool(net)
# Regression
net = tflearn.fully_connected(net, 10, activation='softmax')
opt = tflearn.Momentum(0.1, lr_decay=0.1, decay_step=32000, staircase=True)
net = tflearn.regression(net, optimizer=opt,
                         loss='categorical_crossentropy')
# Training
model = tflearn.DNN(net, checkpoint_path='model_resnet_cifar10',