示例#1
0
    def __init__(self, input_shape, n_filter, n_hidden, n_y_landmark,
                 n_y_attribute, receptive_field, pool_size, apply_cross_stitch,
                 apply_weight_reg, attribute, logdir):

        print('Attribute: ' + attribute)
        print('Storing in ' + logdir)

        self._sanity_check(input_shape, n_filter, receptive_field, pool_size)

        x_shape = input_shape[:]
        x_shape[0] = None

        x = tf.placeholder(shape=x_shape, dtype=tf.float32)
        y_landmark = tf.placeholder(shape=(None, n_y_landmark),
                                    dtype=tf.float32)
        y_attribute = tf.placeholder(shape=(None, n_y_attribute),
                                     dtype=tf.float32)

        self.x, self.y_landmark, self.y_attribute = x, y_landmark, y_attribute

        # Loss
        self.objective = 0

        # ========= CNN layers =========
        x_1, x_2 = x, x
        n_channel = [input_shape[-1]] + n_filter
        for i in range(len(n_channel) - 1):
            filter_shape = receptive_field[i] + n_channel[
                i:i + 2]  # e.g. [5, 5, 32, 64]
            pool_shape = [1] + pool_size[i] + [1]
            print 'Filter shape (layer %d): %s' % (i, filter_shape)

            # Convolutional layers
            conv_and_filter_1 = conv_layer(x_1,
                                           filter_shape,
                                           'conv%d_%d' % (i, 1),
                                           padding='VALID')
            conv_and_filter_2 = conv_layer(x_2,
                                           filter_shape,
                                           'conv%d_%d' % (i, 2),
                                           padding='VALID')
            print 'Shape after conv: %s' % conv_and_filter_1.get_shape(
            ).as_list()

            # Batch normalization
            # norm1 = tf.nn.local_response_normalization(
            #    conv_and_filter_1, 4, bias=1.0, alpha=0.001 / 9.0,
            #    beta=0.75, name='norm%d_%d'%(i,1))
            # norm2 = tf.nn.local_response_normalization(
            #    conv_and_filter_2, 4, bias=1.0, alpha=0.001 / 9.0,
            #    beta=0.75, name='norm%d_%d'%(i,2))

            # Pooling layer
            pool_1 = tf.nn.max_pool(
                #norm1,
                conv_and_filter_1,
                ksize=pool_shape,
                strides=pool_shape,
                padding='SAME',
                name='pool%d_%d' % (i, 1))
            pool_2 = tf.nn.max_pool(
                #norm1,
                conv_and_filter_2,
                ksize=pool_shape,
                strides=pool_shape,
                padding='SAME',
                name='pool%d_%d' % (i, 2))
            print 'Shape after pooling: %s' % pool_1.get_shape().as_list()

            # Cross stitch
            if apply_cross_stitch[i]:
                channels = n_channel[i + 1]
                x_1 = tf.add(tf.multiply(pool_1, tf.Variable(tf.multiply(0.9, tf.ones([channels])), name='alpha%d_%d' % (i,11)), name='mul%d_%d' % (i,11)), \
                             tf.multiply(pool_2, tf.Variable(tf.multiply(0.1, tf.ones([channels])), name='alpha%d_%d' % (i,12)), name='mul%d_%d' % (i,12)), \
                             name='add%d_%d' % (i,1))
                x_2 = tf.add(tf.multiply(pool_2, tf.Variable(tf.multiply(0.9, tf.ones([channels])), name='alpha%d_%d' % (i,21)), name='mul%d_%d' % (i,21)), \
                             tf.multiply(pool_1, tf.Variable(tf.multiply(0.1, tf.ones([channels])), name='alpha%d_%d' % (i,22)), name='mul%d_%d' % (i,22)), \
                             name='add%d_%d' % (i,2))
            else:
                x_1 = pool_1
                x_2 = pool_2

        # ========= Fully-connected layers =========
        dim_1 = np.prod(x_1.get_shape()[1:].as_list())
        x_1 = tf.reshape(x_1, [-1, dim_1])
        dim_2 = np.prod(x_2.get_shape()[1:].as_list())
        x_2 = tf.reshape(x_2, [-1, dim_2])
        print 'Total dim after CNN: %d' % dim_1
        for i, n in enumerate(n_hidden):
            i += len(n_channel) - 1

            fl_1 = full_layer(x_1, n, layer_name='full%d_%d' %
                              (i, 1))  # nonlinear=tf.nn.relu
            fl_2 = full_layer(x_2, n, layer_name='full%d_%d' %
                              (i, 2))  # nonlinear=tf.nn.relu

            # Cross stitch
            if apply_cross_stitch[i]:
                print(i)
                x_1 = tf.add(tf.multiply(fl_1, tf.Variable(tf.multiply(0.9, tf.ones([1])), name='alpha%d_%d' % (i,11)), name='mul%d_%d' % (i,11)), \
                             tf.multiply(fl_2, tf.Variable(tf.multiply(0.1, tf.ones([1])), name='alpha%d_%d' % (i,12)), name='mul%d_%d' % (i,12)), \
                             name='add%d_%d' % (i,1))
                x_2 = tf.add(tf.multiply(fl_2, tf.Variable(tf.multiply(0.9, tf.ones([1])), name='alpha%d_%d' % (i,21)), name='mul%d_%d' % (i,21)), \
                             tf.multiply(fl_1, tf.Variable(tf.multiply(0.1, tf.ones([1])), name='alpha%d_%d' % (i,22)), name='mul%d_%d' % (i,22)), \
                             name='add%d_%d' % (i,2))
            else:
                x_1 = fl_1
                x_2 = fl_2

        yhat_1 = full_layer(x_1,
                            n_y_landmark,
                            layer_name='output_1',
                            nonlinear=tf.identity)
        yhat_2 = full_layer(x_2,
                            n_y_attribute,
                            layer_name='output_2',
                            nonlinear=tf.identity)

        self.yhat_1 = yhat_1
        self.yhat_2 = yhat_2

        self.batch_size = input_shape[0]
        self.lr = tf.placeholder(dtype=tf.float32)

        self.objective_1 = mse(y_landmark, yhat_1)
        if attribute == 'all':
            self.objective_2 = sigmoidCE(y_attribute, yhat_2)
        else:
            self.objective_2 = softmaxCE(y_attribute, yhat_2)

        self.obj1_lambda = 1000.0
        self.objective = self.obj1_lambda * self.objective_1 + self.objective_2

        # Weight regularization
        self.weight_lambda = 1.0
        for i in range(len(n_filter)):
            if apply_weight_reg[i]:
                w1 = tf.get_collection(tf.GraphKeys.VARIABLES,
                                       scope='conv%d_%d' % (i, 1))[0]
                w2 = tf.get_collection(tf.GraphKeys.VARIABLES,
                                       scope='conv%d_%d' % (i, 2))[0]
                a = tf.Variable(tf.multiply(1.0, tf.ones([1])),
                                name='a_w%d' % i)
                b = tf.Variable(tf.add(0.0, tf.zeros([1])), name='b_w%d' % i)
                w1 = tf.add(tf.multiply(a, w1, name='mul%d' % i),
                            b,
                            name='add%d' % i)
                loss = tf.nn.l2_loss(tf.subtract(w1, w2,
                                                 name='sub_conv%d' % i),
                                     name='l2_conv%d' % i)
                self.objective = tf.add(self.objective,
                                        tf.multiply(self.weight_lambda,
                                                    loss,
                                                    name='mul_w%d' % i),
                                        name='add_w%d' % i)

        for i in range(len(n_filter), len(n_filter) + len(n_hidden)):
            if apply_weight_reg[i]:
                w1 = tf.get_collection(tf.GraphKeys.VARIABLES,
                                       scope='full%d_%d' % (i, 1))[0]
                w2 = tf.get_collection(tf.GraphKeys.VARIABLES,
                                       scope='full%d_%d' % (i, 2))[0]
                a = tf.Variable(tf.multiply(1.0, tf.ones([1])),
                                name='a_w%d' % i)
                b = tf.Variable(tf.add(0.0, tf.zeros([1])), name='b_w%d' % i)
                w1 = tf.add(tf.multiply(a, w1, name='mul%d' % i),
                            b,
                            name='add%d' % i)
                loss = tf.nn.l2_loss(tf.subtract(w1, w2, name='sub_fc%d' % i),
                                     name='l2_fc%d' % i)
                self.objective = tf.add(self.objective,
                                        tf.multiply(self.weight_lambda,
                                                    loss,
                                                    name='mul_w%d' % i),
                                        name='add_w%d' % i)

        self.optimizer = tf.train.AdamOptimizer(self.lr).minimize(
            self.objective)

        tf.summary.scalar(self.objective.op.name, self.objective)

        self.sess = tf.Session(config=config)

        if attribute == 'all':
            correct_pred = tf.equal(tf.greater(y_attribute, 0),
                                    tf.greater(yhat_2, 0))
        else:
            correct_pred = tf.equal(tf.argmax(y_attribute, 1),
                                    tf.argmax(yhat_2, 1))
        self.accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))

        self.logdir = logdir
        self.attribute = attribute
        self.layer_num = len(n_filter) + len(n_hidden)
示例#2
0
    def __init__(self,
                 input_shape=[128, 96, 96, 1],
                 n_filter=[32, 64, 128],
                 n_hidden=[500, 500],
                 n_y=30,
                 receptive_field=[[3, 3], [2, 2], [2, 2]],
                 pool_size=[[2, 2], [2, 2], [2, 2]],
                 obj_fcn=mse):

        self._sanity_check(input_shape, n_filter, receptive_field, pool_size)

        x_shape = input_shape[:]
        x_shape[0] = None

        x = tf.placeholder(shape=x_shape, dtype=tf.float32)
        y = tf.placeholder(shape=(None, n_y), dtype=tf.float32)

        self.x, self.y = x, y

        # ========= CNN layers =========
        n_channel = [input_shape[-1]] + n_filter
        for i in range(len(n_channel) - 1):
            filter_shape = receptive_field[i] + n_channel[
                i:i + 2]  # e.g. [5, 5, 32, 64]
            pool_shape = [1] + pool_size[i] + [1]
            print 'Filter shape (layer %d): %s' % (i, filter_shape)

            conv_and_filter = conv_layer(x,
                                         filter_shape,
                                         'conv%d' % i,
                                         padding='VALID')
            print 'Shape after conv: %s' % conv_and_filter.get_shape().as_list(
            )
            # norm1 = tf.nn.local_response_normalization(
            #    conv_and_filter, 4, bias=1.0, alpha=0.001 / 9.0,
            #    beta=0.75, name='norm%d'%i)
            pool1 = tf.nn.max_pool(
                #norm1,
                conv_and_filter,
                ksize=pool_shape,
                strides=pool_shape,
                padding='SAME',
                name='pool%d' % i)
            print 'Shape after pooling: %s' % pool1.get_shape().as_list()
            x = pool1

        # ========= Fully-connected layers =========
        dim = np.prod(x.get_shape()[1:].as_list())
        x = tf.reshape(x, [-1, dim])
        print 'Total dim after CNN: %d' % dim
        for i, n in enumerate(n_hidden):
            x = full_layer(x, n,
                           layer_name='full%d' % i)  # nonlinear=tf.nn.relu
        yhat = full_layer(x, n_y, layer_name='output', nonlinear=tf.identity)

        self.yhat = yhat

        self.batch_size = input_shape[0]
        self.lr = tf.placeholder(dtype=tf.float32)

        self.objective = sigmoidCE(y, yhat)
        self.optimizer = tf.train.AdamOptimizer(self.lr).minimize(
            self.objective)
        tf.scalar_summary(self.objective.op.name, self.objective)

        self.sess = tf.Session(config=config)
示例#3
0
    def __init__(self,
        input_shape,
        n_filter, 
        n_hidden,
        n_y_landmark,
        n_y_attribute,
        receptive_field,
        pool_size,
        attribute,
        logdir):

        print('Attribute: ' + attribute)
        print('Storing in ' + logdir)
        
        self._sanity_check(input_shape, n_filter, receptive_field, pool_size)

        x_shape = input_shape[:]
        x_shape[0] = None
        
        x = tf.placeholder(shape=x_shape, dtype=tf.float32)
        y_landmark = tf.placeholder(shape=(None, n_y_landmark), dtype=tf.float32)
        y_attribute = tf.placeholder(shape=(None, n_y_attribute), dtype=tf.float32)

        self.x, self.y_landmark, self.y_attribute = x, y_landmark, y_attribute

        # Loss
        self.objective = 0

        # ========= CNN layers =========
        n_channel = [input_shape[-1]] + n_filter
        for i in range(len(n_channel) -1):
            filter_shape = receptive_field[i] + n_channel[i:i+2] # e.g. [5, 5, 32, 64]
            pool_shape = [1] + pool_size[i] + [1]
            print 'Filter shape (layer %d): %s' % (i, filter_shape)

            # Convolutional layers
            conv_and_filter = conv_layer(x, filter_shape, 'conv%d' % i, padding='VALID')
            print 'Shape after conv: %s' % conv_and_filter.get_shape().as_list()

            # Batch normalization
            # norm1 = tf.nn.local_response_normalization(
            #    conv_and_filter, 4, bias=1.0, alpha=0.001 / 9.0,
            #    beta=0.75, name='norm%d_%d'%(i,1))

            # Pooling layer
            pool = tf.nn.max_pool(
                #norm1,
                conv_and_filter,
                ksize=pool_shape,
                strides=pool_shape,
                padding='SAME',
                name='pool%d' % i)
            print 'Shape after pooling: %s' % pool.get_shape().as_list()

            x = pool

        # ========= Fully-connected layers =========
        dim = np.prod(x.get_shape()[1:].as_list())
        x = tf.reshape(x, [-1, dim])
        print 'Total dim after CNN: %d' % dim
        for i, n in enumerate(n_hidden):
            x = full_layer(x, n, layer_name='full%d' % i) # nonlinear=tf.nn.relu
        
        yhat_1 = full_layer(x, n_y_landmark, layer_name='output_1', nonlinear=tf.identity)
        yhat_2 = full_layer(x, n_y_attribute, layer_name='output_2', nonlinear=tf.identity)

        self.yhat_1 = yhat_1
        self.yhat_2 = yhat_2

        self.batch_size = input_shape[0]
        self.lr = tf.placeholder(dtype=tf.float32)

        self.objective_1 = mse(y_landmark, yhat_1)
        if attribute == "all":
            self.objective_2 = sigmoidCE(y_attribute, yhat_2)
        else:
            self.objective_2 = softmaxCE(y_attribute, yhat_2)

        self.obj1_lambda = 200
        self.objective = self.obj1_lambda * self.objective_1 + self.objective_2
        self.optimizer = tf.train.AdamOptimizer(self.lr).minimize(self.objective)

        tf.summary.scalar(self.objective.op.name, self.objective)

        self.sess = tf.Session(config=config)
        
        correct_pred = tf.equal(tf.argmax(y_attribute, 1), tf.argmax(yhat_2, 1))
        self.accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))

        self.logdir = logdir
        self.attribute = attribute