Exemplo n.º 1
0
    def _deconv(self, name, x, out_channels, kernel_size=(3, 3), stride=2):
        with tf.variable_scope(name):
            h, w = x.shape.as_list()[1:3]
            h, w = h * stride, w * stride
            output_shape = [self.args.batch_size, h, w, out_channels]
            stride = [1, stride, stride, 1]
            kernel_shape = [
                kernel_size[0], kernel_size[1], out_channels,
                x.shape.as_list()[-1]
            ]
            w = get_deconv_filter(kernel_shape, self.args.weight_decay)

            variable_summaries(w)
            out = tf.nn.conv2d_transpose(x,
                                         w,
                                         tf.stack(output_shape),
                                         strides=stride,
                                         padding="SAME")

            if self.args.use_bias:
                bias = tf.get_variable('biases', [output_shape[-1]],
                                       initializer=tf.constant_initializer(
                                           self.args.bias))
                variable_summaries(bias)
                out = tf.nn.bias_add(out, bias)
            tf.add_to_collection('debug_layers', out)
            tf.add_to_collection('decoding_trainable_vars', w)
            tf.add_to_collection('decoding_trainable_vars', bias)
        return out
Exemplo n.º 2
0
    def _conv(name,
              x,
              num_filters=16,
              kernel_size=(3, 3),
              padding='SAME',
              stride=(1, 1),
              initializer=tf.contrib.layers.xavier_initializer(),
              l2_strength=0.0,
              dilation=1.0,
              bias=-1):

        with tf.variable_scope(name):
            stride = [1, stride[0], stride[1], 1]
            kernel_shape = [
                kernel_size[0], kernel_size[1], x.shape[-1], num_filters
            ]

            w = variable_with_weight_decay(kernel_shape, initializer,
                                           l2_strength)

            variable_summaries(w)
            if dilation > 1:
                conv = tf.nn.atrous_conv2d(x, w, dilation, padding)
            else:
                if type(padding) == type(''):
                    conv = tf.nn.conv2d(x, w, stride, padding)
                else:
                    conv = tf.pad(x, padding, "CONSTANT")
                    conv = tf.nn.conv2d(conv, w, stride, padding='VALID')

            if bias != -1:
                bias = tf.get_variable(
                    'biases', [num_filters],
                    initializer=tf.constant_initializer(bias))

                variable_summaries(bias)
                conv = tf.nn.bias_add(conv, bias)

            tf.add_to_collection('debug_layers', conv)

            return conv
Exemplo n.º 3
0
    def _fc(name,
            x,
            output_dim=128,
            initializer=tf.contrib.layers.xavier_initializer(),
            l2_strength=0.0,
            bias=0.0):

        with tf.variable_scope(name):
            n_in = x.get_shape()[-1].value

            w = variable_with_weight_decay([n_in, output_dim], initializer,
                                           l2_strength)

            variable_summaries(w)

            if isinstance(bias, float):
                bias = tf.get_variable("biases", [output_dim], tf.float32,
                                       tf.constant_initializer(bias))

            variable_summaries(bias)

            output = tf.nn.bias_add(tf.matmul(x, w), bias)

            return output