Esempio n. 1
0
    def fully_connected_forward(self):
        representation = self.representation
        dim = self.representation.get_shape()[1].value
        locals = []

        for i, units in enumerate(self.fc_layers):
            with tf.variable_scope('fc{}'.format(i + 1)):
                if i == 0:
                    inp_dim = dim
                else:
                    inp_dim = self.fc_layers[i - 1]

                weights = variable_with_weight_decay('weights',
                                                     shape=[inp_dim, units],
                                                     stddev=0.04,
                                                     wd=0.001,
                                                     dtype=self.dtype,
                                                     trainable=True)
                biases = variable_on_cpu('biases', [units],
                                         tf.constant_initializer(0.1),
                                         dtype=self.dtype,
                                         trainable=True)
                if i == 0:
                    rep_drop = tf.nn.dropout(representation, 0.5)
                    local = tf.nn.relu(tf.matmul(rep_drop, weights) + biases,
                                       name='local')
                else:
                    local = tf.nn.relu(tf.matmul(locals[i - 1], weights) +
                                       biases,
                                       name='local')

                locals.append(local)

        with tf.variable_scope('softmax_linear'):
            non_linear = locals[-1]
            weights = variable_with_weight_decay(
                'weights',
                shape=[self.fc_layers[-1], self.num_classes],
                stddev=1 / 192.0,
                wd=0.0,
                dtype=self.dtype,
                trainable=True)
            biases = variable_on_cpu('biases', [self.num_classes],
                                     tf.constant_initializer(0.0),
                                     dtype=self.dtype,
                                     trainable=True)

            output = tf.add(tf.matmul(non_linear, weights),
                            biases,
                            name='output')

        return output
Esempio n. 2
0
def region_ranking_3d_fix(tf_input,
                          weight_shape,
                          output_size,
                          batch_size=None):
    """RegionRanking layer works similar to max-pooling

    Args:
        tf_input [batch_size, d, h, w, c]
        weight_shape [d, h, w, c, 1]  convolve with input to create weights for each component
        strids [h, w, c] shrink factor on each of the dimensions
        batch_size

    Returns:
        Output of the layer

    Rrainable variables:
        tf_ranking_w
    """
    # if not strides:
    # strides=[2, 2, 2]  # stride on depth, height, width

    # b_size, d, h, w, c = tf.shape(tf_input, tf.int32)
    batch_size = batch_size or FLAGS.batch_size
    n_outputs = reduce(lambda x, y: x * y, output_size)
    tf_input_shape = tf.shape(tf_input, out_type=tf.int32)

    tf_ranking_w = variable_with_weight_decay(
        'w',
        shape=weight_shape,
        initializer=tf.contrib.layers.xavier_initializer(),
        wd=None)

    tf_weights = tf.nn.conv3d(tf_input,
                              tf_ranking_w,
                              strides=[1, 1, 1, 1, 1],
                              padding='SAME')

    tf_input_line = tf.reshape(tf_input,
                               tf.pack([
                                   tf_input_shape[0], -1, tf_input_shape[-1]
                               ]))  # keep the batch_size and channels
    # fixme: it has to be in batch_size dimension because we want a sort in this table
    tf_weights_line = tf.reshape(tf_weights, tf.pack([tf_input_shape[0], -1]))

    # sorted=False enables that their spatial relationships will be roughly kept
    _, tf_indices = tf.nn.top_k(tf_weights_line, k=n_outputs, sorted=False)

    tf_indices_line = tf.reshape(
        tf_indices, [-1])  # shape [batch_size, top_k_indices] to 1d vector
    tf_indices_helper = tf.expand_dims(tf.range(batch_size),
                                       1)  # should be batch size
    tf_indices_helper = tf.tile(tf_indices_helper, multiples=[1, n_outputs])
    tf_indices_helper = tf.reshape(tf_indices_helper, [-1])

    tf_indices_2d = tf.stack([tf_indices_helper, tf_indices_line], axis=1)

    tf_input_shrinked = tf.gather_nd(tf_input_line, indices=tf_indices_2d)
    tf_output = tf.reshape(
        tf_input_shrinked,
        tf.pack([
            tf_input_shape[0], output_size[0], output_size[1], output_size[2],
            tf_input_shape[4]
        ]))
    return tf_output
Esempio n. 3
0
def inference_c3d(inputs, isTraining=True):

    with tf.variable_scope('conv1') as scope:
        k1 = variable_with_weight_decay(
            'w',
            shape=[3, 3, 3, 3, 64],
            initializer=tf.contrib.layers.xavier_initializer(),
            wd=FLAGS.weight_decay_conv)
        conv1 = tf.nn.conv3d(inputs,
                             k1,
                             strides=[1, 1, 1, 1, 1],
                             padding='SAME',
                             name='conv1')
        conv_bn1 = bn(conv1, isTraining=isTraining)
        conv_bn1 = tf.nn.relu(conv_bn1)
        print_tensor_shape(conv_bn1)

    with tf.variable_scope('pool1') as scope:
        pool1 = region_ranking_3d_fix(conv_bn1, [7, 7, 7, 64, 1], [16, 64, 64])
        print_tensor_shape(pool1)

    with tf.variable_scope('conv2') as scope:
        k2 = variable_with_weight_decay(
            'w',
            shape=[3, 3, 3, 64, 128],
            initializer=tf.contrib.layers.xavier_initializer(),
            wd=FLAGS.weight_decay_conv)
        conv2 = tf.nn.conv3d(pool1,
                             k2,
                             strides=[1, 1, 1, 1, 1],
                             padding='SAME',
                             name='conv2')
        conv_bn2 = bn(conv2, isTraining=isTraining)
        conv_bn2 = tf.nn.relu(conv_bn2)
        print_tensor_shape(conv_bn2)

    pool2 = tf.nn.max_pool3d(conv_bn2,
                             ksize=[1, 2, 2, 2, 1],
                             strides=[1, 2, 2, 2, 1],
                             padding='SAME',
                             name='pool2')
    print_tensor_shape(pool2)

    with tf.variable_scope('conv3') as scope:
        k3 = variable_with_weight_decay(
            'w',
            shape=[3, 3, 3, 128, 256],
            initializer=tf.contrib.layers.xavier_initializer(),
            wd=FLAGS.weight_decay_conv)
        conv3 = tf.nn.conv3d(pool2,
                             k3,
                             strides=[1, 1, 1, 1, 1],
                             padding='SAME',
                             name='conv3')
        conv_bn3 = bn(conv3, isTraining=isTraining)
        conv_bn3 = tf.nn.relu(conv_bn3)
        print_tensor_shape(conv_bn3)

    pool3 = tf.nn.max_pool3d(conv_bn3,
                             ksize=[1, 2, 2, 2, 1],
                             strides=[1, 2, 2, 2, 1],
                             padding='SAME',
                             name='pool3')
    print_tensor_shape(pool3)

    with tf.variable_scope('conv4') as scope:
        k4 = variable_with_weight_decay(
            'w',
            shape=[3, 3, 3, 256, 256],
            initializer=tf.contrib.layers.xavier_initializer(),
            wd=FLAGS.weight_decay_conv)
        conv4 = tf.nn.conv3d(pool3,
                             k4,
                             strides=[1, 1, 1, 1, 1],
                             padding='SAME',
                             name='conv4')
        conv_bn4 = bn(conv4, isTraining=isTraining)
        conv_bn4 = tf.nn.relu(conv_bn4)
        print_tensor_shape(conv_bn4)

    pool4 = tf.nn.max_pool3d(conv_bn4,
                             ksize=[1, 2, 2, 2, 1],
                             strides=[1, 2, 2, 2, 1],
                             padding='SAME',
                             name='pool4')
    print_tensor_shape(pool4)

    with tf.variable_scope('conv5') as scope:
        k5 = variable_with_weight_decay(
            'w',
            shape=[3, 3, 3, 256, 256],
            initializer=tf.contrib.layers.xavier_initializer(),
            wd=FLAGS.weight_decay_conv)
        conv5 = tf.nn.conv3d(pool4,
                             k5,
                             strides=[1, 1, 1, 1, 1],
                             padding='SAME',
                             name='conv5')
        conv_bn5 = bn(conv5, isTraining=isTraining)
        conv_bn5 = tf.nn.relu(conv_bn5)
        print_tensor_shape(conv_bn5)

    pool5 = tf.nn.max_pool3d(conv_bn5,
                             ksize=[1, 2, 2, 2, 1],
                             strides=[1, 2, 2, 2, 1],
                             padding='SAME',
                             name='pool5')
    print_tensor_shape(pool5)

    with tf.variable_scope('fc1') as scope:
        kfc1 = variable_with_weight_decay(
            'w',
            shape=[1, 4, 4, 256, 2048],
            initializer=tf.contrib.layers.xavier_initializer(),
            wd=FLAGS.weight_decay_fc)
        conv_fc1 = tf.nn.conv3d(pool5,
                                kfc1,
                                strides=[1, 1, 1, 1, 1],
                                padding='VALID',
                                name='fc1')
        conv_bn_fc1 = bn(conv_fc1, isTraining=isTraining)
        print_tensor_shape(conv_bn_fc1)

    if isTraining:
        conv_bn_fc1 = tf.nn.dropout(conv_bn_fc1, FLAGS.dropout)

    with tf.variable_scope('fc2') as scope:
        kfc2 = variable_with_weight_decay(
            'w',
            shape=[1, 1, 1, 2048, 2048],
            initializer=tf.contrib.layers.xavier_initializer(),
            wd=FLAGS.weight_decay_fc)
        conv_fc2 = tf.nn.conv3d(conv_bn_fc1,
                                kfc2,
                                strides=[1, 1, 1, 1, 1],
                                padding='VALID',
                                name='fc2')
        conv_bn_fc2 = bn(conv_fc2, isTraining=isTraining)
        print_tensor_shape(conv_bn_fc2)

    if isTraining:
        conv_bn_fc2 = tf.nn.dropout(conv_bn_fc2, FLAGS.dropout)

    with tf.variable_scope('classification') as scope:
        weights = variable_with_weight_decay(
            'w', [1, 1, 1, 2048, NUM_CLASSES],
            initializer=tf.contrib.layers.xavier_initializer(),
            wd=FLAGS.weight_decay_fc)
        biases = variable_on_cpu('b', [NUM_CLASSES],
                                 tf.constant_initializer(0.0))
        conv = tf.nn.conv3d(conv_bn_fc2,
                            weights,
                            strides=[1, 1, 1, 1, 1],
                            padding='VALID')
        softmax = tf.nn.bias_add(conv, biases)
        print_tensor_shape(softmax, 'softmax-before')

        #todo: the following are used to deal with shapes with different sizes, to get mean
        softmax = tf.reduce_mean(softmax, axis=1, keep_dims=True)
        softmax = tf.reduce_mean(softmax, axis=2, keep_dims=True)
        softmax = tf.reduce_mean(softmax, axis=3, keep_dims=True)

        softmax = tf.squeeze(softmax, axis=[1, 2, 3])
        print_tensor_shape(softmax, 'softmax-after')

    # Output: class prediction
    return softmax