예제 #1
0
파일: test.py 프로젝트: cvikasreddy/VisReg
def conv_net(x):
    conv1 = conv_layer(x, [5, 5, 3, 64])
    conv1 = dropout_layer(conv1, keep_prob)

    pool1 = pool_layer(conv1, ksize=[1, 3, 3, 1])

    conv2 = conv_layer(pool1, [5, 5, 64, 64])
    conv2 = dropout_layer(conv2, keep_prob)

    pool2 = pool_layer(conv2, ksize=[1, 3, 3, 1])

    conv3 = conv_layer(pool2, [5, 5, 64, 64])
    conv3 = dropout_layer(conv3, keep_prob)

    pool3 = pool_layer(conv3, ksize=[1, 3, 3, 1])

    reshaped, reshaped_shape = change_to_fc(pool3)

    fc1 = fc_layer(reshaped, reshaped_shape, 384)
    fc1 = dropout_layer(fc1, keep_prob)

    fc2 = fc_layer(fc1, 384, 192)
    fc2 = dropout_layer(fc2, keep_prob)

    out = fc_layer(fc2, 192, 10, act='softmax', std=0.005)
    return out
예제 #2
0
파일: test.py 프로젝트: cvikasreddy/VisReg
def conv_net(x):
    conv1 = conv_layer(x, [3, 3, 1, 64], 64)
    conv1 = dropout_layer(conv1, 1.0)

    conv2 = conv_layer(conv1, [3, 3, 64, 64], 64)
    conv2 = dropout_layer(conv2, 0.9)

    pool1 = pool_layer(conv2)

    reshaped, reshaped_shape = change_to_fc(pool1)
    fc1 = fc_layer(reshaped, reshaped_shape, 1024)
    fc1 = dropout_layer(fc1, 0.9)

    out = fc_layer(fc1, 1024, 10, act='softmax')
    return out
예제 #3
0
    def __init__(self,
                 input_shape=[128, 96, 96, 1],
                 n_filter=[32, 64, 128],
                 n_hidden=[500, 500],
                 n_y=30,
                 receptive_field=[[3, 3], [2, 2], [2, 2]],
                 pool_size=[[2, 2], [2, 2], [2, 2]],
                 obj_fcn=mse):

        self._sanity_check(input_shape, n_filter, receptive_field, pool_size)

        x_shape = input_shape[:]
        x_shape[0] = None

        x = tf.placeholder(shape=x_shape, dtype=tf.float32)
        y = tf.placeholder(shape=(None, n_y), dtype=tf.float32)

        self.x, self.y = x, y

        # ========= CNN layers =========
        n_channel = [input_shape[-1]] + n_filter
        for i in range(len(n_channel) - 1):
            filter_shape = receptive_field[i] + n_channel[
                i:i + 2]  # e.g. [5, 5, 32, 64]
            pool_shape = [1] + pool_size[i] + [1]
            print 'Filter shape (layer %d): %s' % (i, filter_shape)

            conv_and_filter = conv_layer(x,
                                         filter_shape,
                                         'conv%d' % i,
                                         padding='VALID')
            print 'Shape after conv: %s' % conv_and_filter.get_shape().as_list(
            )
            # norm1 = tf.nn.local_response_normalization(
            #    conv_and_filter, 4, bias=1.0, alpha=0.001 / 9.0,
            #    beta=0.75, name='norm%d'%i)
            pool1 = tf.nn.max_pool(
                #norm1,
                conv_and_filter,
                ksize=pool_shape,
                strides=pool_shape,
                padding='SAME',
                name='pool%d' % i)
            print 'Shape after pooling: %s' % pool1.get_shape().as_list()
            x = pool1

        # ========= Fully-connected layers =========
        dim = np.prod(x.get_shape()[1:].as_list())
        x = tf.reshape(x, [-1, dim])
        print 'Total dim after CNN: %d' % dim
        for i, n in enumerate(n_hidden):
            x = full_layer(x, n,
                           layer_name='full%d' % i)  # nonlinear=tf.nn.relu
        yhat = full_layer(x, n_y, layer_name='output', nonlinear=tf.identity)

        self.yhat = yhat

        self.batch_size = input_shape[0]
        self.lr = tf.placeholder(dtype=tf.float32)

        self.objective = sigmoidCE(y, yhat)
        self.optimizer = tf.train.AdamOptimizer(self.lr).minimize(
            self.objective)
        tf.scalar_summary(self.objective.op.name, self.objective)

        self.sess = tf.Session(config=config)
예제 #4
0
    def __init__(self, input_shape, n_filter, n_hidden, n_y_landmark,
                 n_y_attribute, receptive_field, pool_size, apply_cross_stitch,
                 apply_weight_reg, attribute, logdir):

        print('Attribute: ' + attribute)
        print('Storing in ' + logdir)

        self._sanity_check(input_shape, n_filter, receptive_field, pool_size)

        x_shape = input_shape[:]
        x_shape[0] = None

        x = tf.placeholder(shape=x_shape, dtype=tf.float32)
        y_landmark = tf.placeholder(shape=(None, n_y_landmark),
                                    dtype=tf.float32)
        y_attribute = tf.placeholder(shape=(None, n_y_attribute),
                                     dtype=tf.float32)

        self.x, self.y_landmark, self.y_attribute = x, y_landmark, y_attribute

        # Loss
        self.objective = 0

        # ========= CNN layers =========
        x_1, x_2 = x, x
        n_channel = [input_shape[-1]] + n_filter
        for i in range(len(n_channel) - 1):
            filter_shape = receptive_field[i] + n_channel[
                i:i + 2]  # e.g. [5, 5, 32, 64]
            pool_shape = [1] + pool_size[i] + [1]
            print 'Filter shape (layer %d): %s' % (i, filter_shape)

            # Convolutional layers
            conv_and_filter_1 = conv_layer(x_1,
                                           filter_shape,
                                           'conv%d_%d' % (i, 1),
                                           padding='VALID')
            conv_and_filter_2 = conv_layer(x_2,
                                           filter_shape,
                                           'conv%d_%d' % (i, 2),
                                           padding='VALID')
            print 'Shape after conv: %s' % conv_and_filter_1.get_shape(
            ).as_list()

            # Batch normalization
            # norm1 = tf.nn.local_response_normalization(
            #    conv_and_filter_1, 4, bias=1.0, alpha=0.001 / 9.0,
            #    beta=0.75, name='norm%d_%d'%(i,1))
            # norm2 = tf.nn.local_response_normalization(
            #    conv_and_filter_2, 4, bias=1.0, alpha=0.001 / 9.0,
            #    beta=0.75, name='norm%d_%d'%(i,2))

            # Pooling layer
            pool_1 = tf.nn.max_pool(
                #norm1,
                conv_and_filter_1,
                ksize=pool_shape,
                strides=pool_shape,
                padding='SAME',
                name='pool%d_%d' % (i, 1))
            pool_2 = tf.nn.max_pool(
                #norm1,
                conv_and_filter_2,
                ksize=pool_shape,
                strides=pool_shape,
                padding='SAME',
                name='pool%d_%d' % (i, 2))
            print 'Shape after pooling: %s' % pool_1.get_shape().as_list()

            # Cross stitch
            if apply_cross_stitch[i]:
                channels = n_channel[i + 1]
                x_1 = tf.add(tf.multiply(pool_1, tf.Variable(tf.multiply(0.9, tf.ones([channels])), name='alpha%d_%d' % (i,11)), name='mul%d_%d' % (i,11)), \
                             tf.multiply(pool_2, tf.Variable(tf.multiply(0.1, tf.ones([channels])), name='alpha%d_%d' % (i,12)), name='mul%d_%d' % (i,12)), \
                             name='add%d_%d' % (i,1))
                x_2 = tf.add(tf.multiply(pool_2, tf.Variable(tf.multiply(0.9, tf.ones([channels])), name='alpha%d_%d' % (i,21)), name='mul%d_%d' % (i,21)), \
                             tf.multiply(pool_1, tf.Variable(tf.multiply(0.1, tf.ones([channels])), name='alpha%d_%d' % (i,22)), name='mul%d_%d' % (i,22)), \
                             name='add%d_%d' % (i,2))
            else:
                x_1 = pool_1
                x_2 = pool_2

        # ========= Fully-connected layers =========
        dim_1 = np.prod(x_1.get_shape()[1:].as_list())
        x_1 = tf.reshape(x_1, [-1, dim_1])
        dim_2 = np.prod(x_2.get_shape()[1:].as_list())
        x_2 = tf.reshape(x_2, [-1, dim_2])
        print 'Total dim after CNN: %d' % dim_1
        for i, n in enumerate(n_hidden):
            i += len(n_channel) - 1

            fl_1 = full_layer(x_1, n, layer_name='full%d_%d' %
                              (i, 1))  # nonlinear=tf.nn.relu
            fl_2 = full_layer(x_2, n, layer_name='full%d_%d' %
                              (i, 2))  # nonlinear=tf.nn.relu

            # Cross stitch
            if apply_cross_stitch[i]:
                print(i)
                x_1 = tf.add(tf.multiply(fl_1, tf.Variable(tf.multiply(0.9, tf.ones([1])), name='alpha%d_%d' % (i,11)), name='mul%d_%d' % (i,11)), \
                             tf.multiply(fl_2, tf.Variable(tf.multiply(0.1, tf.ones([1])), name='alpha%d_%d' % (i,12)), name='mul%d_%d' % (i,12)), \
                             name='add%d_%d' % (i,1))
                x_2 = tf.add(tf.multiply(fl_2, tf.Variable(tf.multiply(0.9, tf.ones([1])), name='alpha%d_%d' % (i,21)), name='mul%d_%d' % (i,21)), \
                             tf.multiply(fl_1, tf.Variable(tf.multiply(0.1, tf.ones([1])), name='alpha%d_%d' % (i,22)), name='mul%d_%d' % (i,22)), \
                             name='add%d_%d' % (i,2))
            else:
                x_1 = fl_1
                x_2 = fl_2

        yhat_1 = full_layer(x_1,
                            n_y_landmark,
                            layer_name='output_1',
                            nonlinear=tf.identity)
        yhat_2 = full_layer(x_2,
                            n_y_attribute,
                            layer_name='output_2',
                            nonlinear=tf.identity)

        self.yhat_1 = yhat_1
        self.yhat_2 = yhat_2

        self.batch_size = input_shape[0]
        self.lr = tf.placeholder(dtype=tf.float32)

        self.objective_1 = mse(y_landmark, yhat_1)
        if attribute == 'all':
            self.objective_2 = sigmoidCE(y_attribute, yhat_2)
        else:
            self.objective_2 = softmaxCE(y_attribute, yhat_2)

        self.obj1_lambda = 1000.0
        self.objective = self.obj1_lambda * self.objective_1 + self.objective_2

        # Weight regularization
        self.weight_lambda = 1.0
        for i in range(len(n_filter)):
            if apply_weight_reg[i]:
                w1 = tf.get_collection(tf.GraphKeys.VARIABLES,
                                       scope='conv%d_%d' % (i, 1))[0]
                w2 = tf.get_collection(tf.GraphKeys.VARIABLES,
                                       scope='conv%d_%d' % (i, 2))[0]
                a = tf.Variable(tf.multiply(1.0, tf.ones([1])),
                                name='a_w%d' % i)
                b = tf.Variable(tf.add(0.0, tf.zeros([1])), name='b_w%d' % i)
                w1 = tf.add(tf.multiply(a, w1, name='mul%d' % i),
                            b,
                            name='add%d' % i)
                loss = tf.nn.l2_loss(tf.subtract(w1, w2,
                                                 name='sub_conv%d' % i),
                                     name='l2_conv%d' % i)
                self.objective = tf.add(self.objective,
                                        tf.multiply(self.weight_lambda,
                                                    loss,
                                                    name='mul_w%d' % i),
                                        name='add_w%d' % i)

        for i in range(len(n_filter), len(n_filter) + len(n_hidden)):
            if apply_weight_reg[i]:
                w1 = tf.get_collection(tf.GraphKeys.VARIABLES,
                                       scope='full%d_%d' % (i, 1))[0]
                w2 = tf.get_collection(tf.GraphKeys.VARIABLES,
                                       scope='full%d_%d' % (i, 2))[0]
                a = tf.Variable(tf.multiply(1.0, tf.ones([1])),
                                name='a_w%d' % i)
                b = tf.Variable(tf.add(0.0, tf.zeros([1])), name='b_w%d' % i)
                w1 = tf.add(tf.multiply(a, w1, name='mul%d' % i),
                            b,
                            name='add%d' % i)
                loss = tf.nn.l2_loss(tf.subtract(w1, w2, name='sub_fc%d' % i),
                                     name='l2_fc%d' % i)
                self.objective = tf.add(self.objective,
                                        tf.multiply(self.weight_lambda,
                                                    loss,
                                                    name='mul_w%d' % i),
                                        name='add_w%d' % i)

        self.optimizer = tf.train.AdamOptimizer(self.lr).minimize(
            self.objective)

        tf.summary.scalar(self.objective.op.name, self.objective)

        self.sess = tf.Session(config=config)

        if attribute == 'all':
            correct_pred = tf.equal(tf.greater(y_attribute, 0),
                                    tf.greater(yhat_2, 0))
        else:
            correct_pred = tf.equal(tf.argmax(y_attribute, 1),
                                    tf.argmax(yhat_2, 1))
        self.accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))

        self.logdir = logdir
        self.attribute = attribute
        self.layer_num = len(n_filter) + len(n_hidden)
예제 #5
0
    def __init__(self,
        input_shape,
        n_filter, 
        n_hidden,
        n_y_landmark,
        n_y_attribute,
        receptive_field,
        pool_size,
        attribute,
        logdir):

        print('Attribute: ' + attribute)
        print('Storing in ' + logdir)
        
        self._sanity_check(input_shape, n_filter, receptive_field, pool_size)

        x_shape = input_shape[:]
        x_shape[0] = None
        
        x = tf.placeholder(shape=x_shape, dtype=tf.float32)
        y_landmark = tf.placeholder(shape=(None, n_y_landmark), dtype=tf.float32)
        y_attribute = tf.placeholder(shape=(None, n_y_attribute), dtype=tf.float32)

        self.x, self.y_landmark, self.y_attribute = x, y_landmark, y_attribute

        # Loss
        self.objective = 0

        # ========= CNN layers =========
        n_channel = [input_shape[-1]] + n_filter
        for i in range(len(n_channel) -1):
            filter_shape = receptive_field[i] + n_channel[i:i+2] # e.g. [5, 5, 32, 64]
            pool_shape = [1] + pool_size[i] + [1]
            print 'Filter shape (layer %d): %s' % (i, filter_shape)

            # Convolutional layers
            conv_and_filter = conv_layer(x, filter_shape, 'conv%d' % i, padding='VALID')
            print 'Shape after conv: %s' % conv_and_filter.get_shape().as_list()

            # Batch normalization
            # norm1 = tf.nn.local_response_normalization(
            #    conv_and_filter, 4, bias=1.0, alpha=0.001 / 9.0,
            #    beta=0.75, name='norm%d_%d'%(i,1))

            # Pooling layer
            pool = tf.nn.max_pool(
                #norm1,
                conv_and_filter,
                ksize=pool_shape,
                strides=pool_shape,
                padding='SAME',
                name='pool%d' % i)
            print 'Shape after pooling: %s' % pool.get_shape().as_list()

            x = pool

        # ========= Fully-connected layers =========
        dim = np.prod(x.get_shape()[1:].as_list())
        x = tf.reshape(x, [-1, dim])
        print 'Total dim after CNN: %d' % dim
        for i, n in enumerate(n_hidden):
            x = full_layer(x, n, layer_name='full%d' % i) # nonlinear=tf.nn.relu
        
        yhat_1 = full_layer(x, n_y_landmark, layer_name='output_1', nonlinear=tf.identity)
        yhat_2 = full_layer(x, n_y_attribute, layer_name='output_2', nonlinear=tf.identity)

        self.yhat_1 = yhat_1
        self.yhat_2 = yhat_2

        self.batch_size = input_shape[0]
        self.lr = tf.placeholder(dtype=tf.float32)

        self.objective_1 = mse(y_landmark, yhat_1)
        if attribute == "all":
            self.objective_2 = sigmoidCE(y_attribute, yhat_2)
        else:
            self.objective_2 = softmaxCE(y_attribute, yhat_2)

        self.obj1_lambda = 200
        self.objective = self.obj1_lambda * self.objective_1 + self.objective_2
        self.optimizer = tf.train.AdamOptimizer(self.lr).minimize(self.objective)

        tf.summary.scalar(self.objective.op.name, self.objective)

        self.sess = tf.Session(config=config)
        
        correct_pred = tf.equal(tf.argmax(y_attribute, 1), tf.argmax(yhat_2, 1))
        self.accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))

        self.logdir = logdir
        self.attribute = attribute