def discriminate(self, net, reuse=None, training=True, with_fc=True):
        """Builds a discriminator network on top of inputs.

        Args:
            net: Input to the discriminator
            reuse: Whether to reuse already defined variables
            training: Whether in train or eval mode.
            with_fc: Whether to include fully connected layers (used during unsupervised training)

        Returns:
            Resulting logits
        """
        with tf.variable_scope('discriminator', reuse=reuse):
            with slim.arg_scope(alexnet_argscope(activation=self.fc_activation, padding='SAME', training=training,
                                                 fix_bn=self.fix_bn)):
                net = slim.conv2d(net, 96, kernel_size=[11, 11], stride=4, scope='conv_1', padding=self.pad,
                                  normalizer_fn=None)
                net = slim.max_pool2d(net, kernel_size=[3, 3], stride=2, scope='pool_1', padding=self.pad)
                # net = tf.nn.lrn(net, depth_radius=2, alpha=0.00002, beta=0.75)
                net = conv_group(net, 256, kernel_size=[5, 5], scope='conv_2')
                net = slim.max_pool2d(net, kernel_size=[3, 3], stride=2, scope='pool_2', padding=self.pad)
                # net = tf.nn.lrn(net, depth_radius=2, alpha=0.00002, beta=0.75)
                net = slim.conv2d(net, 384, kernel_size=[3, 3], scope='conv_3')
                net = conv_group(net, 384, kernel_size=[3, 3], scope='conv_4')
                net = conv_group(net, 256, kernel_size=[3, 3], scope='conv_5')
                if self.use_pool5:
                    net = slim.max_pool2d(net, kernel_size=[3, 3], stride=2, scope='pool_5', padding=self.pad)
                encoded = net
                drop_pred = None

                if with_fc:
                    drop_pred = slim.conv2d(net, 1, kernel_size=[1, 1], activation_fn=None, normalizer_fn=None)
                    drop_pred = slim.flatten(drop_pred)

                    net = slim.flatten(net)
                    net = slim.fully_connected(net, 4096, scope='fc1')
                    net = slim.dropout(net, 0.5, is_training=training)
                    net = slim.fully_connected(net, 4096, scope='fc2')
                    net = slim.dropout(net, 0.5, is_training=training)
                    net = slim.fully_connected(net, 2, scope='fc3',
                                               activation_fn=None,
                                               normalizer_fn=None,
                                               biases_initializer=tf.zeros_initializer())

                return net, drop_pred, encoded
예제 #2
0
    def discriminate(self, net, reuse=None, training=True):
        with tf.variable_scope('discriminator', reuse=reuse):
            with slim.arg_scope(
                    alexnet_argscope(activation=self.fc_activation,
                                     padding='SAME',
                                     training=training,
                                     fix_bn=self.fix_bn)):
                self.layers['input'] = net
                net = slim.conv2d(net,
                                  96,
                                  kernel_size=[11, 11],
                                  stride=4,
                                  scope='conv_1',
                                  padding=self.pad,
                                  normalizer_fn=None)
                self.layers['conv_1'] = net
                net = slim.max_pool2d(net,
                                      kernel_size=[3, 3],
                                      stride=2,
                                      scope='pool_1',
                                      padding=self.pad)
                net = tf.nn.lrn(net, depth_radius=2, alpha=0.00002, beta=0.75)
                net = conv_group_nobn(net,
                                      256,
                                      kernel_size=[5, 5],
                                      scope='conv_2')
                self.layers['conv_2'] = net
                net = slim.max_pool2d(net,
                                      kernel_size=[3, 3],
                                      stride=2,
                                      scope='pool_2',
                                      padding=self.pad)
                net = tf.nn.lrn(net, depth_radius=2, alpha=0.00002, beta=0.75)
                net = slim.conv2d(net,
                                  384,
                                  kernel_size=[3, 3],
                                  scope='conv_3',
                                  normalizer_fn=None)
                self.layers['conv_3'] = net
                net = conv_group_nobn(net,
                                      384,
                                      kernel_size=[3, 3],
                                      scope='conv_4')
                self.layers['conv_4'] = net
                net = conv_group(net, 256, kernel_size=[3, 3], scope='conv_5')
                self.layers['conv_5'] = net
                if self.use_pool5:
                    net = slim.max_pool2d(net,
                                          kernel_size=[3, 3],
                                          stride=2,
                                          scope='pool_5',
                                          padding=self.pad)
                encoded = net

                drop_pred = slim.conv2d(net,
                                        1,
                                        kernel_size=[3, 3],
                                        padding='SAME',
                                        activation_fn=None,
                                        normalizer_fn=None,
                                        scope='drop_pred')
                self.layers['drop_pred'] = net
                drop_pred = slim.flatten(drop_pred)

                enc_shape = net.get_shape().as_list()
                net = slim.conv2d(net,
                                  4096,
                                  kernel_size=enc_shape[1:3],
                                  padding='VALID',
                                  scope='fc_1')
                self.layers['fc_1'] = net
                net = slim.dropout(net, 0.5, is_training=training)
                net = slim.conv2d(net,
                                  2,
                                  kernel_size=[1, 1],
                                  padding='VALID',
                                  activation_fn=None,
                                  normalizer_fn=None,
                                  scope='fc_2')
                self.layers['fc_2'] = net
                net = slim.flatten(net)

                return net, drop_pred, encoded