Exemplo n.º 1
0
def model_graph_asym(x, y_):
	with tf.name_scope('fc_1'):
	    #Add the third densely connected layer
	    w_fc1=weight_variable([x.get_shape().as_list()[1],2000])
	    b_fc1=bias_variable([2000])
	    h_fc1=tflearn.relu(tf.matmul(x,w_fc1)+b_fc1)

	with tf.name_scope('fc_2'):
	    #Add the third densely connected layer
	    w_fc2=weight_variable([
	    	h_fc1.get_shape().as_list()[1],2000])
	    b_fc2=bias_variable([2000])
	    h_fc1=tflearn.relu(tf.matmul(h_fc1,w_fc2)+b_fc2)

	with tf.name_scope('softmax_layer'):
	    w_s1=weight_variable([1500,1])
	    w_s2=weight_variable([502,1])
	    b_s=bias_variable([y_.get_shape().as_list()[1]])
	    y_conv_logit1 = tf.matmul(h_fc1[:,:1500], w_s1)
	    y_conv_logit2 = tf.matmul(
	    	tf.concat([h_fc1[:,1500:], x], 1), w_s2)
	    y_conv_logit=tf.concat([y_conv_logit1,
	    	y_conv_logit2],1)+b_s
	    y_conv=tf.nn.softmax(y_conv_logit)
	return y_conv_logit, y_conv
Exemplo n.º 2
0
def AuxiliaryHeadCIFAR(x,class_num):
	x=tflearn.relu(x)
	x=slim.avg_pool2d(x,[5,5], stride=3,padding='SAME')
	x=slim.conv2d(x,128,[1,1])
	x=slim.batch_norm(x)
	x=tflearn.relu(x)
	x=slim.conv2d(x,768,[2,2])
	x=slim.batch_norm(x)
	x=tflearn.relu(x)
	x=slim.flatten(x)
	x=slim.fully_connected(x,class_num, activation_fn=None)
	return x
Exemplo n.º 3
0
def FactorizedReduce(x, c_out):
    x = tflearn.relu(x)
    conv1 = slim.conv2d(x, c_out // 2, [1, 1], stride=[2, 2])
    conv2 = slim.conv2d(x[:, 1:, 1:, :], c_out // 2, [1, 1], stride=[2, 2])
    x = tf.concat([conv1, conv2], -1)
    x = slim.batch_norm(x)
    return x
Exemplo n.º 4
0
def DilConv(x, C_out, kernel_size, stride, rate):
    x = tflearn.relu(x)
    x = slim.separable_convolution2d(x,
                                     C_out,
                                     kernel_size,
                                     depth_multiplier=1,
                                     stride=stride)
    x = slim.batch_norm(x)
    return x
Exemplo n.º 5
0
def model_test(x, is_training, class_num=10):
    with tf.variable_scope("lw", reuse=tf.AUTO_REUSE):
        with slim.arg_scope([slim.conv2d],
                            normalizer_fn=None,
                            activation_fn=tflearn.relu,
                            padding="SAME"):
            with slim.arg_scope(
                [slim.conv2d, slim.fully_connected],
                    weights_initializer=tf.truncated_normal_initializer(
                        dtype=tf.float32, stddev=0.01),
                    weights_regularizer=None,
                    biases_initializer=tf.constant_initializer(0.0),
            ):
                with slim.arg_scope(
                    [slim.batch_norm],
                        decay=0.9,
                        scale=False,
                        epsilon=1e-3,
                        is_training=is_training,
                        zero_debias_moving_mean=True,
                ):

                    x = slim.conv2d(x, 24, [3, 3], normalizer_fn=tf.nn.lrn)
                    x = slim.max_pool2d(x, [2, 2])
                    x = slim.conv2d(x, 96, [3, 3], normalizer_fn=tf.nn.lrn)
                    x = slim.max_pool2d(x, [2, 2])
                    x = slim.conv2d(x, 192, [3, 3])
                    x = slim.conv2d(x, 192, [3, 3])
                    x = slim.conv2d(x, 96, [3, 3])
                    x = slim.max_pool2d(x, [2, 2])

                    # x = slim.dropout(x,is_training=is_training, keep_prob=0.9)

                    x = slim.flatten(x)
                    x = slim.fully_connected(x, 1024, activation_fn=None)
                    x = tflearn.relu(x)
                    x = slim.fully_connected(x, 1024, activation_fn=None)
                    x = tflearn.relu(x)

                    logits = slim.fully_connected(x,
                                                  class_num,
                                                  activation_fn=None)
                    return logits
Exemplo n.º 6
0
def SepConv(x, C_out, kernel_size, stride):
    x = tflearn.relu(x)
    C_in = x.get_shape()[-1].value

    x = slim.separable_convolution2d(x,
                                     C_in,
                                     kernel_size,
                                     depth_multiplier=1,
                                     stride=stride)
    x = slim.batch_norm(x)

    x = slim.separable_convolution2d(x, C_out, kernel_size, depth_multiplier=1)
    x = slim.batch_norm(x)
    return x
    def build_network(self, num_classes, input_shape, model):
        network = tflearn.input_data(
            shape=[None, input_shape[0], input_shape[1], input_shape[2]])
        if model == 'DeepFace':
            conv_1 = tflearn.relu(
                tflearn.conv_2d(network,
                                32,
                                11,
                                strides=1,
                                padding='VALID',
                                name='Conv2d_1'))
            maxpool_1 = tflearn.max_pool_2d(conv_1,
                                            3,
                                            strides=2,
                                            padding='VALID',
                                            name='MaxPool_1')
            conv_2 = tflearn.relu(
                tflearn.conv_2d(maxpool_1,
                                32,
                                9,
                                strides=1,
                                padding='VALID',
                                name='Conv2d_2'))

            local_1 = tflearn.relu(self.local(conv_2, 16, 9, 1, 'Local_1'))
            local_2 = tflearn.relu(self.local(local_1, 16, 7, 1, 'Local_2'))
            local_3 = tflearn.relu(self.local(local_2, 16, 5, 1, 'Local_3'))

            flatterned = tflearn.flatten(local_3)
            full_1 = tflearn.dropout(
                tflearn.relu(
                    tflearn.fully_connected(flatterned,
                                            4096,
                                            name='Fully_Connected_1')), 0.5)
            output = tflearn.fully_connected(full_1,
                                             num_classes,
                                             activation='softmax',
                                             name='Output')

        elif model == 'Song':
            conv_1 = tflearn.relu(
                tflearn.conv_2d(network,
                                64,
                                5,
                                strides=1,
                                padding='VALID',
                                name='Conv_1'))
            maxpool_1 = tflearn.max_pool_2d(conv_1,
                                            3,
                                            strides=2,
                                            padding='VALID',
                                            name='MaxPool_1')
            conv_2 = tflearn.relu(
                tflearn.conv_2d(maxpool_1,
                                64,
                                5,
                                strides=1,
                                padding='VALID',
                                name='Conv_2'))
            maxpool_2 = tflearn.max_pool_2d(conv_2,
                                            3,
                                            strides=2,
                                            padding='VALID',
                                            name='MaxPool_2')

            local_1 = tflearn.dropout(
                tflearn.relu(self.local(maxpool_2, 32, 3, 1, 'Local_1')), 1)
            local_2 = tflearn.dropout(
                tflearn.relu(self.local(local_1, 32, 3, 1, 'Local_2')), 1)
            flatterned = tflearn.flatten(local_2)
            output = tflearn.fully_connected(flatterned,
                                             num_classes,
                                             activation='softmax',
                                             name='Output')

        else:
            conv_1 = tflearn.relu(
                tflearn.conv_2d(network,
                                64,
                                7,
                                strides=2,
                                bias=True,
                                padding='VALID',
                                name='Conv2d_1'))
            maxpool_1 = tflearn.batch_normalization(
                tflearn.max_pool_2d(conv_1,
                                    3,
                                    strides=2,
                                    padding='VALID',
                                    name='MaxPool_1'))

            conv_2a = tflearn.relu(
                tflearn.conv_2d(maxpool_1,
                                96,
                                1,
                                strides=1,
                                padding='VALID',
                                name='Conv_2a_FX1'))
            maxpool_2a = tflearn.max_pool_2d(maxpool_1,
                                             3,
                                             strides=1,
                                             padding='VALID',
                                             name='MaxPool_2a_FX1')
            conv_2b = tflearn.relu(
                tflearn.conv_2d(conv_2a,
                                208,
                                3,
                                strides=1,
                                padding='VALID',
                                name='Conv_2b_FX1'))
            conv_2c = tflearn.relu(
                tflearn.conv_2d(maxpool_2a,
                                64,
                                1,
                                strides=1,
                                padding='VALID',
                                name='Conv_2c_FX1'))
            FX1_out = tflearn.merge([conv_2b, conv_2c],
                                    mode='concat',
                                    axis=3,
                                    name='FX1_out')

            conv_3a = tflearn.relu(
                tflearn.conv_2d(FX1_out,
                                96,
                                1,
                                strides=1,
                                padding='VALID',
                                name='Conv_3a_FX2'))
            maxpool_3a = tflearn.max_pool_2d(FX1_out,
                                             3,
                                             strides=1,
                                             padding='VALID',
                                             name='MaxPool_3a_FX2')
            conv_3b = tflearn.relu(
                tflearn.conv_2d(conv_3a,
                                208,
                                3,
                                strides=1,
                                padding='VALID',
                                name='Conv_3b_FX2'))
            conv_3c = tflearn.relu(
                tflearn.conv_2d(maxpool_3a,
                                64,
                                1,
                                strides=1,
                                padding='VALID',
                                name='Conv_3c_FX2'))
            FX2_out = tflearn.merge([conv_3b, conv_3c],
                                    mode='concat',
                                    axis=3,
                                    name='FX2_out')
            net = tflearn.flatten(FX2_out)
            output = tflearn.fully_connected(net,
                                             num_classes,
                                             activation='softmax',
                                             name='Output')

        return tflearn.regression(output,
                                  optimizer='Adam',
                                  loss='categorical_crossentropy',
                                  learning_rate=0.000001)
Exemplo n.º 8
0
    def build_network(self):
        padding = 'SAME'
        print(' ')
        print('----------------- Building CNN -----------------')
        print(' ')
        self.network = tflearn.input_data(
            shape=[None, SIZE_FACE, SIZE_FACE, 1])

        conv_1 = tflearn.relu(
            conv_2d(self.network,
                    96,
                    3,
                    strides=1,
                    bias=True,
                    padding=padding,
                    activation=None,
                    name='Conv_1'))
        maxpool_1 = tflearn.max_pool_2d(conv_1,
                                        3,
                                        strides=2,
                                        padding=padding,
                                        name='MaxPool_1')
        maxpool_1 = tflearn.batch_normalization(maxpool_1)

        conv_2 = tflearn.relu(
            conv_2d(maxpool_1,
                    108,
                    2,
                    strides=1,
                    padding=padding,
                    name='Conv_2'))
        maxpool_2 = tflearn.max_pool_2d(conv_2,
                                        2,
                                        strides=1,
                                        padding=padding,
                                        name='MaxPool_2')
        maxpool_2 = tflearn.batch_normalization(maxpool_2)

        conv_3 = tflearn.relu(
            conv_2d(maxpool_2,
                    208,
                    2,
                    strides=1,
                    padding=padding,
                    name='Conv_3'))
        conv_4 = tflearn.relu(
            conv_2d(conv_3, 64, 2, strides=1, padding=padding, name='Conv_4'))
        maxpool_3 = tflearn.max_pool_2d(conv_4,
                                        2,
                                        strides=1,
                                        padding=padding,
                                        name='MaxPool_3')
        maxpool_3 = tflearn.batch_normalization(maxpool_3)

        net = tflearn.flatten(maxpool_3, name='Net')
        net = tflearn.dropout(net, 0.1)

        final_1 = tflearn.fully_connected(net, 512, activation='relu')
        final_1 = tflearn.dropout(final_1, 0.5)

        final_2 = tflearn.fully_connected(final_1, 256, activation='relu')
        final_2 = tflearn.dropout(final_2, 0.5)

        Loss = tflearn.fully_connected(final_2,
                                       7,
                                       activation='softmax',
                                       name='Total_loss')

        self.network = tflearn.regression(Loss,
                                          optimizer='Adam',
                                          loss='categorical_crossentropy',
                                          learning_rate=0.0001)
        self.model = tflearn.DNN(self.network,
                                 tensorboard_verbose=0,
                                 tensorboard_dir=os.getcwd() + '/checkpoint',
                                 checkpoint_path='./data/' +
                                 '/emotion_recognition',
                                 max_checkpoints=None)
        #self.model = tflearn.DNN(self.network)
        self.load_model()
Exemplo n.º 9
0
prop_statement_input_4 = tflearn.input_data([None, prop_statement_time_steps, input_dim], name='prop_statement_input_4')
prop_hyps_input_4 = tflearn.input_data([None, prop_hyps_time_steps, input_dim], name='prop_hyps_input_4')

prop_statement_input_5 = tflearn.input_data([None, prop_statement_time_steps, input_dim], name='prop_statement_input_5')
prop_hyps_input_5 = tflearn.input_data([None, prop_hyps_time_steps, input_dim], name='prop_hyps_input_5')

with tf.variable_scope('prop') as scope:
    prop_net_1 = prop_net(prop_statement_input_1, prop_hyps_input_1)
    scope.reuse_variables()
    prop_net_2 = prop_net(prop_statement_input_2, prop_hyps_input_2)
    prop_net_3 = prop_net(prop_statement_input_3, prop_hyps_input_3)
    prop_net_4 = prop_net(prop_statement_input_4, prop_hyps_input_4)
    prop_net_5 = prop_net(prop_statement_input_5, prop_hyps_input_5)

main_net = tflearn.fully_connected(main_net, output_dim, weights_init='xavier')
main_net = tflearn.relu(main_net)
main_net = tflearn.fully_connected(main_net, output_dim, weights_init='xavier')
main_net = tflearn.relu(main_net)
main_net = tflearn.dropout(main_net, p)

prop_net_1 = tflearn.fully_connected(prop_net_1, output_dim, weights_init='xavier')
prop_net_1 = tflearn.relu(prop_net_1)
prop_net_1 = tflearn.dropout(prop_net_1, p)
prop_net_1 = tflearn.fully_connected(prop_net_1, output_dim, weights_init='xavier')
prop_net_1 = tflearn.relu(prop_net_1)
prop_net_1 = tflearn.dropout(prop_net_1, p)

prop_net_2 = tflearn.fully_connected(prop_net_2, output_dim, weights_init='xavier')
prop_net_2 = tflearn.relu(prop_net_2)
prop_net_2 = tflearn.dropout(prop_net_2, p)
prop_net_2 = tflearn.fully_connected(prop_net_2, output_dim, weights_init='xavier')
Exemplo n.º 10
0
number_features = data.shape[1]
number_examples = data.shape[0]

data_prep = DataPreprocessing()
data_prep.add_featurewise_zero_center()
#data_prep.add_featurewise_stdnorm()


# Build neural network
net = tflearn.input_data(shape=[None, number_features], data_preprocessing=data_prep)

# 1 fully connected
net = tflearn.fully_connected(net, number_hidden)
#tflearn.add_weights_regularizer(net, loss='L2')
net = tflearn.batch_normalization(net)
net = tflearn.relu(net)

# 2

net = tflearn.fully_connected(net, number_hidden)
#tflearn.add_weights_regularizer(net, loss='L2')
net = tflearn.relu(net)

# 3

net = tflearn.fully_connected(net, number_hidden)
#tflearn.add_weights_regularizer(net, loss='L2')
net = tflearn.batch_normalization(net)
net = tflearn.relu(net)

# 4
Exemplo n.º 11
0
def ReLUConvBN(x, C_out):
    x = tflearn.relu(x)
    x = slim.conv2d(x, C_out, [1, 1])
    x = slim.batch_norm(x)
    return x
Exemplo n.º 12
0
def my_model(X, y):
    y = tf.one_hot(y, 10)

    network = tflearn.conv_2d(X,
                              nb_filter=32,
                              filter_size=3,
                              strides=1,
                              activation="linear",
                              padding="valid",
                              name="conv1",
                              weight_decay=0.01)
    network = tflearn.batch_normalization(network, name="bn1")
    network = tflearn.relu(network)
    network = tflearn.max_pool_2d(network,
                                  kernel_size=2,
                                  strides=2,
                                  padding="same",
                                  name="pool1")

    network = tflearn.conv_2d(network,
                              nb_filter=64,
                              filter_size=3,
                              strides=1,
                              activation="linear",
                              padding="valid",
                              name="conv2",
                              weight_decay=0.01)
    network = tflearn.batch_normalization(network, name="bn2")
    network = tflearn.relu(network)
    network = tflearn.max_pool_2d(network,
                                  kernel_size=2,
                                  strides=2,
                                  padding="same",
                                  name="pool2")

    network = tflearn.flatten(network, name="flat1")

    network = tflearn.fully_connected(network,
                                      1024,
                                      activation="linear",
                                      name="fc1",
                                      weight_decay=0.01)
    network = tflearn.batch_normalization(network, name="bn2")
    network = tflearn.relu(network)

    network = tflearn.fully_connected(network,
                                      1024,
                                      activation="linear",
                                      name="fc2",
                                      weight_decay=0.01)
    network = tflearn.batch_normalization(network, name="bn3")
    network = tflearn.relu(network)

    logits = tflearn.fully_connected(network,
                                     10,
                                     activation="softmax",
                                     name="output",
                                     weight_decay=0.01)
    loss = tflearn.categorical_crossentropy(logits, y)
    train_op = tflearn.Adam(0.0001, 0.9)().minimize(loss)

    return logits, loss, train_op