示例#1
0
    def initial_block(self, x, scope):
        # convolution branch:
        W_conv = self.get_variable_weight_decay(
            scope + "/W",
            shape=[3, 3, 3, 13],
            # ([filter_height, filter_width, in_depth, out_depth])
            initializer=tf.contrib.layers.xavier_initializer(),
            loss_category="encoder_wd_losses")
        b_conv = self.get_variable_weight_decay(
            scope + "/b",
            shape=[13],  # ([out_depth])
            initializer=tf.constant_initializer(0),
            loss_category="encoder_wd_losses")
        conv_branch = tf.nn.conv2d(
            x, W_conv, strides=[1, 2, 2, 1], padding="SAME") + b_conv

        # max pooling branch:
        pool_branch = tf.nn.max_pool(x,
                                     ksize=[1, 2, 2, 1],
                                     strides=[1, 2, 2, 1],
                                     padding="VALID")

        # concatenate the branches:
        concat = tf.concat([conv_branch, pool_branch],
                           axis=3)  # (3: the depth axis)

        # apply batch normalization and PReLU:
        output = tf.contrib.slim.batch_norm(concat)
        output = PReLU(output, scope=scope)

        return output
示例#2
0
    def encoder_bottleneck_asymmetric(self,
                                      x,
                                      output_depth,
                                      keep_prob,
                                      scope,
                                      proj_ratio=4):
        input_shape = x.get_shape().as_list()
        input_depth = input_shape[3]

        internal_depth = int(output_depth / proj_ratio)

        # convolution branch:
        conv_branch = x

        # # 1x1 projection:
        W_proj = self.get_variable_weight_decay(
            scope + "/W_proj",
            shape=[1, 1, input_depth, internal_depth],
            # ([filter_height, filter_width, in_depth, out_depth])
            initializer=tf.contrib.layers.xavier_initializer(),
            loss_category="encoder_wd_losses")
        conv_branch = tf.nn.conv2d(conv_branch,
                                   W_proj,
                                   strides=[1, 1, 1, 1],
                                   padding="VALID")  # NOTE! no bias terms
        # # # batch norm and PReLU:
        conv_branch = tf.contrib.slim.batch_norm(conv_branch)
        conv_branch = PReLU(conv_branch, scope=scope + "/proj")

        # # asymmetric conv:
        # # # asymmetric conv 1:
        W_conv1 = self.get_variable_weight_decay(
            scope + "/W_conv1",
            shape=[5, 1, internal_depth, internal_depth],
            # ([filter_height, filter_width, in_depth, out_depth])
            initializer=tf.contrib.layers.xavier_initializer(),
            loss_category="encoder_wd_losses")
        conv_branch = tf.nn.conv2d(conv_branch,
                                   W_conv1,
                                   strides=[1, 1, 1, 1],
                                   padding="SAME")  # NOTE! no bias terms
        # # # asymmetric conv 2:
        W_conv2 = self.get_variable_weight_decay(
            scope + "/W_conv2",
            shape=[1, 5, internal_depth, internal_depth],
            # ([filter_height, filter_width, in_depth, out_depth])
            initializer=tf.contrib.layers.xavier_initializer(),
            loss_category="encoder_wd_losses")
        b_conv2 = self.get_variable_weight_decay(
            scope + "/b_conv2",
            shape=[internal_depth],  # ([out_depth])
            initializer=tf.constant_initializer(0),
            loss_category="encoder_wd_losses")
        conv_branch = tf.nn.conv2d(
            conv_branch, W_conv2, strides=[1, 1, 1, 1
                                           ], padding="SAME") + b_conv2
        # # # batch norm and PReLU:
        conv_branch = tf.contrib.slim.batch_norm(conv_branch)
        conv_branch = PReLU(conv_branch, scope=scope + "/conv")

        # # 1x1 expansion:
        shape = [1, 1, internal_depth, output_depth]
        W_exp = self.get_variable_weight_decay(
            scope + "/W_exp",
            shape=shape,
            # ([filter_height, filter_width, in_depth, out_depth])
            initializer=tf.contrib.layers.xavier_initializer(),
            loss_category="encoder_wd_losses")
        W_exp = tf.reshape(drop_connect(W_exp, keep_prob), shape=shape)
        conv_branch = tf.nn.conv2d(conv_branch,
                                   W_exp,
                                   strides=[1, 1, 1, 1],
                                   padding="VALID")  # NOTE! no bias terms
        # # # batch norm:
        conv_branch = tf.contrib.slim.batch_norm(conv_branch)
        # NOTE! no PReLU here

        # # regularizer:
        # conv_branch = dropout(conv_branch, self.keep_prob_pl)

        # main branch:
        main_branch = x

        # add the branches:
        merged = conv_branch + main_branch

        # apply PReLU:
        output = PReLU(merged, scope=scope + "/output")

        return output
示例#3
0
    def encoder_bottleneck_regular(self,
                                   x,
                                   output_depth,
                                   scope,
                                   keep_prob,
                                   proj_ratio=4,
                                   downsampling=False):
        input_shape = x.get_shape().as_list()
        input_depth = input_shape[3]

        internal_depth = int(output_depth / proj_ratio)

        # convolution branch:
        conv_branch = x

        # # 1x1 projection:
        if downsampling:
            W_conv = self.get_variable_weight_decay(
                scope + "/W_proj",
                shape=[2, 2, input_depth, internal_depth],
                # ([filter_height, filter_width, in_depth, out_depth])
                initializer=tf.contrib.layers.xavier_initializer(),
                loss_category="encoder_wd_losses")
            conv_branch = tf.nn.conv2d(conv_branch,
                                       W_conv,
                                       strides=[1, 2, 2, 1],
                                       padding="VALID")  # NOTE! no bias terms
        else:
            W_proj = self.get_variable_weight_decay(
                scope + "/W_proj",
                shape=[1, 1, input_depth, internal_depth],
                # ([filter_height, filter_width, in_depth, out_depth])
                initializer=tf.contrib.layers.xavier_initializer(),
                loss_category="encoder_wd_losses")
            conv_branch = tf.nn.conv2d(conv_branch,
                                       W_proj,
                                       strides=[1, 1, 1, 1],
                                       padding="VALID")  # NOTE! no bias terms
        # # # batch norm and PReLU:
        conv_branch = tf.contrib.slim.batch_norm(conv_branch)
        conv_branch = PReLU(conv_branch, scope=scope + "/proj")

        # # conv:
        W_conv = self.get_variable_weight_decay(
            scope + "/W_conv",
            shape=[3, 3, internal_depth, internal_depth],
            # ([filter_height, filter_width, in_depth, out_depth])
            initializer=tf.contrib.layers.xavier_initializer(),
            loss_category="encoder_wd_losses")
        b_conv = self.get_variable_weight_decay(
            scope + "/b_conv",
            shape=[internal_depth],  # ([out_depth])
            initializer=tf.constant_initializer(0),
            loss_category="encoder_wd_losses")
        conv_branch = tf.nn.conv2d(
            conv_branch, W_conv, strides=[1, 1, 1, 1], padding="SAME") + b_conv
        # # # batch norm and PReLU:
        conv_branch = tf.contrib.slim.batch_norm(conv_branch)
        conv_branch = PReLU(conv_branch, scope=scope + "/conv")

        # # 1x1 expansion:
        shape = [1, 1, internal_depth, output_depth]
        W_exp = self.get_variable_weight_decay(
            scope + "/W_exp",
            shape=shape,
            # ([filter_height, filter_width, in_depth, out_depth])
            initializer=tf.contrib.layers.xavier_initializer(),
            loss_category="encoder_wd_losses")
        W_exp = tf.reshape(drop_connect(W_exp, self.keep_prob_pl), shape=shape)

        conv_branch = tf.nn.conv2d(conv_branch,
                                   W_exp,
                                   strides=[1, 1, 1, 1],
                                   padding="VALID")  # NOTE! no bias terms
        # # # batch norm:
        conv_branch = tf.contrib.slim.batch_norm(conv_branch)
        # NOTE! no PReLU here

        # # regularizer:
        # conv_branch = dropout(conv_branch, self.keep_prob_pl)

        # main branch:
        main_branch = x

        if downsampling:
            # max pooling with argmax (for use in max_unpool in the decoder):
            main_branch, pooling_indices = tf.nn.max_pool_with_argmax(
                main_branch,
                ksize=[1, 2, 2, 1],
                strides=[1, 2, 2, 1],
                padding="SAME")
            # (everytime we downsample, we also increase the feature block depth)

            # pad with zeros so that the feature block depth matches:
            depth_to_pad = output_depth - input_depth
            paddings = tf.convert_to_tensor([[0, 0], [0, 0], [0, 0],
                                             [0, depth_to_pad]])
            # (paddings is an integer tensor of shape [4, 2] where 4 is the rank
            # of main_branch. For each dimension D (D = 0, 1, 2, 3) of main_branch,
            # paddings[D, 0] is the no of values to add before the contents of
            # main_branch in that dimension, and paddings[D, 0] is the no of
            # values to add after the contents of main_branch in that dimension)
            main_branch = tf.pad(main_branch,
                                 paddings=paddings,
                                 mode="CONSTANT")

        # add the branches:
        merged = conv_branch + main_branch

        # apply PReLU:
        output = PReLU(merged, scope=scope + "/output")

        if downsampling:
            return output, pooling_indices
        else:
            return output