Example #1
0
    def __init__(self,
                 ch=512,
                 ch_in=512,
                 w_ch=512,
                 upsample=True,
                 enable_blur=False):
        super().__init__()
        self.upsample = upsample
        self.ch = ch
        self.ch_in = ch_in
        with self.init_scope():
            if not upsample:
                self.W = chainer.Parameter(shape=(ch_in, 4, 4))
                self.W.data[:] = 1  # w_data_tmp

            self.b0 = L.Bias(axis=1, shape=(ch, ))
            self.b1 = L.Bias(axis=1, shape=(ch, ))
            self.n0 = NoiseBlock(ch)
            self.n1 = NoiseBlock(ch)

            self.s0 = StyleBlock(w_ch, ch)
            self.s1 = StyleBlock(w_ch, ch)

            self.c0 = EqualizedConv2d(ch_in, ch, 3, 1, 1, nobias=True)
            self.c1 = EqualizedConv2d(ch, ch, 3, 1, 1, nobias=True)

        self.blur_k = None
        self.enable_blur = enable_blur
Example #2
0
 def __init__(self, ch, dim_z=256):
     super(EncoderBlockBase, self).__init__()
     with self.init_scope():
         self.c0 = EqualizedConv2d(ch, ch, 3, 1, 1)
         self.c1 = EqualizedConv2d(ch, ch, 4, 1, 0)
         self.l2 = EqualizedLinear(ch, dim_z, gain=1)
         self.bn0 = L.BatchNormalization(ch)
         self.bn1 = L.BatchNormalization(ch)
Example #3
0
 def __init__(self, in_ch, out_ch, enable_blur=False):
     super(DiscriminatorBlock, self).__init__()
     self.in_ch = in_ch
     self.out_ch = out_ch
     with self.init_scope():
         self.c0 = EqualizedConv2d(in_ch, out_ch, 3, 1, 1)
         self.c1 = EqualizedConv2d(out_ch, out_ch, 3, 1, 1)
     self.blur_k = None
     self.enable_blur = enable_blur
Example #4
0
    def __init__(self, ch=512, enable_blur=False):
        super(StyleGenerator, self).__init__()
        self.max_stage = 17
        with self.init_scope():
            self.blocks = chainer.ChainList(
                SynthesisBlock(ch, ch, upsample=False), #4
                SynthesisBlock(ch, ch, upsample=True, enable_blur=enable_blur), #8 
                SynthesisBlock(ch, ch, upsample=True, enable_blur=enable_blur), #16
                SynthesisBlock(ch, ch, upsample=True, enable_blur=enable_blur), # 32
                SynthesisBlock(ch // 2, ch, upsample=True, enable_blur=enable_blur), #64
                SynthesisBlock(ch // 4, ch // 2, upsample=True, enable_blur=enable_blur), #128
                SynthesisBlock(ch // 8, ch // 4, upsample=True, enable_blur=enable_blur), #256
                SynthesisBlock(ch // 16, ch // 8, upsample=True, enable_blur=enable_blur), #512
                SynthesisBlock(ch // 32, ch // 16, upsample=True, enable_blur=enable_blur) #1024
            )
            self.outs = chainer.ChainList(
                EqualizedConv2d(ch, 3, 1, 1, 0, gain=1),
                EqualizedConv2d(ch, 3, 1, 1, 0, gain=1),
                EqualizedConv2d(ch, 3, 1, 1, 0, gain=1),
                EqualizedConv2d(ch, 3, 1, 1, 0, gain=1),
                EqualizedConv2d(ch // 2, 3, 1, 1, 0, gain=1),
                EqualizedConv2d(ch // 4, 3, 1, 1, 0, gain=1),
                EqualizedConv2d(ch // 8, 3, 1, 1, 0, gain=1),
                EqualizedConv2d(ch // 16, 3, 1, 1, 0, gain=1),
                EqualizedConv2d(ch // 32, 3, 1, 1, 0, gain=1)
            )

        self.n_blocks = len(self.blocks)
        self.image_size = 1024
        self.enable_blur = enable_blur
Example #5
0
    def __init__(self, ch=512, enable_blur=False):
        super(Discriminator, self).__init__()
        self.max_stage = 17

        with self.init_scope():
            # NOTE: called in reversed order.
            self.blocks = chainer.ChainList(
                DiscriminatorBlockBase(ch),
                DiscriminatorBlock(ch, ch, enable_blur=enable_blur),
                DiscriminatorBlock(ch, ch, enable_blur=enable_blur),
                DiscriminatorBlock(ch, ch, enable_blur=enable_blur),
                DiscriminatorBlock(ch // 2, ch, enable_blur=enable_blur),
                DiscriminatorBlock(ch // 4, ch // 2, enable_blur=enable_blur),
                DiscriminatorBlock(ch // 8, ch // 4, enable_blur=enable_blur),
                DiscriminatorBlock(ch // 16, ch // 8, enable_blur=enable_blur),
                DiscriminatorBlock(ch // 32, ch // 16, enable_blur=enable_blur),)
            self.ins = chainer.ChainList(
                EqualizedConv2d(3, ch, 1, 1, 0),
                EqualizedConv2d(3, ch, 1, 1, 0),
                EqualizedConv2d(3, ch, 1, 1, 0),
                EqualizedConv2d(3, ch, 1, 1, 0),
                EqualizedConv2d(3, ch // 2, 1, 1, 0),
                EqualizedConv2d(3, ch // 4, 1, 1, 0),
                EqualizedConv2d(3, ch // 8, 1, 1, 0),
                EqualizedConv2d(3, ch // 16, 1, 1, 0),
                EqualizedConv2d(3, ch // 32, 1, 1, 0),)
            self.enable_blur = enable_blur
Example #6
0
    def __init__(self,
                 in_ch=128,
                 ch=512,
                 enable_blur=False,
                 rgbd=False,
                 use_encoder=False,
                 use_occupancy_net=False,
                 initial_depth=None):
        super(DCGANGenerator, self).__init__()
        self.in_ch = in_ch
        self.ch = ch
        self.max_stage = 17
        self.rgbd = rgbd
        self.use_occupancy_net = use_occupancy_net
        out_ch = 4 if rgbd else 3
        if initial_depth is None:
            initial_depth = 1.0

        with self.init_scope():
            if self.rgbd:
                self.linear = EqualizedLinear(in_ch + 9, ch * 4 * 4)
            else:
                self.linear = EqualizedLinear(in_ch, ch * 4 * 4)
            self.blocks = chainer.ChainList(
                DCGANBlock(ch, ch, enable_blur=enable_blur),  # 8
                DCGANBlock(ch, ch, enable_blur=enable_blur),  # 16
                DCGANBlock(ch, ch, enable_blur=enable_blur),  # 32
                DCGANBlock(ch // 2, ch, enable_blur=enable_blur),  # 64
                DCGANBlock(ch // 4, ch // 2, enable_blur=enable_blur),  # 128
            )
            self.outs = chainer.ChainList(
                EqualizedConv2d(ch, out_ch, 1, 1, 0, gain=1),
                EqualizedConv2d(ch, out_ch, 1, 1, 0, gain=1),
                EqualizedConv2d(ch, out_ch, 1, 1, 0, gain=1),
                EqualizedConv2d(ch // 2, out_ch, 1, 1, 0, gain=1),
                EqualizedConv2d(ch // 4, out_ch, 1, 1, 0, gain=1),
            )
            if use_encoder:
                self.enc = Encoder(ch, in_ch, enable_blur=enable_blur)
            if use_occupancy_net:
                self.occupancy = OccupancyNet(in_ch=in_ch + 3, hidden_ch=32)

        # initialize depth weight to 0
        for out in self.outs:
            out.c.W.array[-1] = 0
            out.c.b.array[-1] = math.log(math.e**initial_depth - 1)

        self.n_blocks = len(self.blocks)
        self.image_size = 128
        self.enable_blur = enable_blur
Example #7
0
    def __init__(self, ch=512, ch_in=512, upsample=True, enable_blur=False):
        super().__init__()
        self.upsample = upsample
        self.ch = ch
        self.ch_in = ch_in
        with self.init_scope():
            self.b0 = L.Bias(axis=1, shape=(ch, ))
            self.b1 = L.Bias(axis=1, shape=(ch, ))
            self.n0 = NoiseBlock(ch)
            self.n1 = NoiseBlock(ch)

            self.c0 = EqualizedConv2d(ch_in, ch, 3, 1, 1, nobias=True)
            self.c1 = EqualizedConv2d(ch, ch, 3, 1, 1, nobias=True)

        self.blur_k = None
        self.enable_blur = enable_blur
    def __init__(self, ch=512, enable_blur=False, sn=False):
        super(Discriminator, self).__init__()
        self.max_stage = 17
        self.sn = sn

        with self.init_scope():
            # NOTE: called in reversed order.
            self.blocks = chainer.ChainList(
                DiscriminatorBlockBase(ch, sn=sn),
                DiscriminatorBlock(ch, ch, enable_blur=enable_blur, sn=sn),
                DiscriminatorBlock(ch, ch, enable_blur=enable_blur, sn=sn),
                DiscriminatorBlock(ch, ch, enable_blur=enable_blur, sn=sn),
                DiscriminatorBlock(ch // 2, ch, enable_blur=enable_blur,
                                   sn=sn),
            )
            if not sn:
                self.ins = chainer.ChainList(
                    EqualizedConv2d(3, ch // 2, 1, 1, 0), )
            else:
                w = chainer.initializers.GlorotUniform(math.sqrt(2))
                self.ins = chainer.ChainList(
                    L.Convolution2D(3, ch // 2, 1, 1, 0, initialW=w).add_hook(
                        SpectralNormalization()), )

            self.enable_blur = enable_blur
Example #9
0
 def __init__(self,
              in_channels,
              out_channels,
              kernel_size,
              bias=True,
              padding_layer=None):
     '''
     :param in_channels: Number of input channels
     :param out_channels: Number of output channels
     :param kernel_size: Scalar. Spatial dimensions of kernel (only quadratic kernels supported).
     :param bias: Whether or not to use bias.
     :param padding_layer: Which padding to use. Default is reflection padding.
     '''
     assert padding_layer is None, "this padding is not supported"
     super().__init__()
     ka = kernel_size // 2
     kb = ka - 1 if kernel_size % 2 == 0 else ka
     with self.init_scope():
         self.net = chainer.Sequential(
             ReflectionPad(pad_width=((ka, kb), (ka, kb))),
             EqualizedConv2d(in_channels,
                             out_channels,
                             kernel_size,
                             nobias=not bias,
                             stride=1))
Example #10
0
 def __init__(self, ch, out_dim=1, sn=False):
     super(DiscriminatorBlockBase, self).__init__()
     with self.init_scope():
         if not sn:
             self.c0 = EqualizedConv2d(ch, ch, 3, 1, 1)
             self.c1 = EqualizedConv2d(ch, ch, 4, 1, 0)
             self.l2 = EqualizedLinear(ch, out_dim, gain=1)
         else:
             w = chainer.initializers.Uniform(1)
             self.c0 = L.Convolution2D(ch, ch, 3, 1, 1,
                                       initialW=w).add_hook(
                                           SpectralNormalization())
             self.c1 = L.Convolution2D(ch, ch, 4, 1, 0,
                                       initialW=w).add_hook(
                                           SpectralNormalization())
             self.l2 = L.Linear(ch, out_dim, initialW=w).add_hook(
                 SpectralNormalization())
Example #11
0
    def __init__(self, nf0, occnet_nf, frustrum_dims):
        super().__init__()

        self.occnet_nf = 32  # TODO avoid hard coding
        self.frustrum_dims = frustrum_dims
        self.frustrum_depth = frustrum_dims[-1]
        self.depth_coords = None

        with self.init_scope():
            self.mlp = chainer.Sequential(
                EqualizedConv2d(nf0 * self.frustrum_depth, self.occnet_nf, 1,
                                1, 0),
                L.BatchNormalization(self.occnet_nf),
                F.leaky_relu,
                EqualizedConv2d(self.occnet_nf, self.occnet_nf),
                L.BatchNormalization(self.occnet_nf),
                F.leaky_relu,
            )
Example #12
0
 def __init__(self,
              in_ch,
              out_ch,
              enable_blur=False,
              sn=False,
              res=False,
              bn=False):
     super(DiscriminatorBlock, self).__init__()
     self.in_ch = in_ch
     self.out_ch = out_ch
     self.res = res
     with self.init_scope():
         if not sn:
             self.c0 = EqualizedConv2d(in_ch, out_ch, 3, 1, 1)
             self.c1 = EqualizedConv2d(out_ch, out_ch, 3, 1, 1)
             if res:
                 self.c_sc = EqualizedConv2d(in_ch, out_ch, 3, 1, 1)
         else:
             w = chainer.initializers.Uniform(1)
             self.c0 = L.Convolution2D(in_ch, out_ch, 3, 1, 1,
                                       initialW=w).add_hook(
                                           SpectralNormalization())
             self.c1 = L.Convolution2D(out_ch, out_ch, 3, 1, 1,
                                       initialW=w).add_hook(
                                           SpectralNormalization())
             if res:
                 self.c_sc = L.Convolution2D(in_ch,
                                             out_ch,
                                             3,
                                             1,
                                             1,
                                             initialW=w).add_hook(
                                                 SpectralNormalization())
         if bn:
             self.b0 = L.BatchNormalization(out_ch)
             self.b1 = L.BatchNormalization(out_ch)
         else:
             self.b0 = lambda x: x
             self.b1 = lambda x: x
     self.blur_k = None
     self.enable_blur = enable_blur
    def __init__(self, ch=512, out_ch=64, enable_blur=False):
        super(BackgroundFeatureGenerator, self).__init__()
        self.img_size = 64
        self.background_depth = 4  # virtual background distance

        x_pos, y_pos = np.meshgrid(
            np.arange(self.img_size) - self.img_size // 2,
            np.arange(self.img_size) - self.img_size // 2)
        depth_map = self.background_depth * \
                    self.img_size * 2 / np.sqrt((self.img_size * 2) ** 2 + x_pos ** 2 + y_pos ** 2)

        with self.init_scope():
            self.blocks = chainer.ChainList(
                SynthesisBlock(ch, ch, ch, upsample=False),  # 4
                SynthesisBlock(ch,
                               ch,
                               ch,
                               upsample=True,
                               enable_blur=enable_blur),  # 8
                SynthesisBlock(ch,
                               ch,
                               ch,
                               upsample=True,
                               enable_blur=enable_blur),  # 16
                SynthesisBlock(ch,
                               ch,
                               ch,
                               upsample=True,
                               enable_blur=enable_blur),  # 32
                SynthesisBlock(ch // 2,
                               ch,
                               ch,
                               upsample=True,
                               enable_blur=enable_blur),  # 64
            )
            self.conv = EqualizedConv2d(ch // 2, out_ch, 1, 1, 0, gain=1)
            self.l1 = EqualizedLinear(ch + 9, ch)
            self.l2 = EqualizedLinear(ch, ch)

        self.add_persistent("depth_map", depth_map)

        self.n_blocks = len(self.blocks)
        self.image_size = 64
        self.enable_blur = enable_blur
Example #14
0
    def __init__(self, ch=512, dim_z=256, enable_blur=False, res=True):
        super(Encoder, self).__init__()
        self.max_stage = 17

        with self.init_scope():
            # NOTE: called in reversed order.
            self.blocks = chainer.ChainList(
                EncoderBlockBase(ch, dim_z + 9),  # 4
                DiscriminatorBlock(ch,
                                   ch,
                                   enable_blur=enable_blur,
                                   sn=False,
                                   res=res,
                                   bn=True),  # 8
                DiscriminatorBlock(ch,
                                   ch,
                                   enable_blur=enable_blur,
                                   sn=False,
                                   res=res,
                                   bn=True),  # 16
                DiscriminatorBlock(ch,
                                   ch,
                                   enable_blur=enable_blur,
                                   sn=False,
                                   res=res,
                                   bn=True),  # 32
                DiscriminatorBlock(ch // 2,
                                   ch,
                                   enable_blur=enable_blur,
                                   sn=False,
                                   res=res,
                                   bn=True),  # 64
                DiscriminatorBlock(ch // 4,
                                   ch // 2,
                                   enable_blur=enable_blur,
                                   sn=False,
                                   res=res,
                                   bn=True),  # 128
            )

            self.ins = chainer.ChainList(
                EqualizedConv2d(3, ch, 1, 1, 0),
                EqualizedConv2d(3, ch, 1, 1, 0),
                EqualizedConv2d(3, ch, 1, 1, 0),
                EqualizedConv2d(3, ch, 1, 1, 0),
                EqualizedConv2d(3, ch // 2, 1, 1, 0),
                EqualizedConv2d(3, ch // 4, 1, 1, 0),
            )

            self.enable_blur = enable_blur
    def __init__(self, w_ch, in_ch, hidden_ch=256):
        super(StyleGenerator, self).__init__()
        with self.init_scope():
            self.c0 = EqualizedConv2d(in_ch, hidden_ch * 2, 4, 2, 1)
            self.c1 = EqualizedConv2d(hidden_ch * 2, hidden_ch * 4, 4, 2, 1)
            self.c4 = EqualizedConv2d(hidden_ch * 4, hidden_ch * 4, 3, 1, 1)
            self.c5 = EqualizedConv2d(hidden_ch * 4, hidden_ch * 2, 3, 1, 1)
            self.c6 = EqualizedConv2d(hidden_ch * 2 * 2, hidden_ch, 3, 1, 1)
            self.c7 = EqualizedConv2d(hidden_ch + in_ch, 3, 3, 1, 1, gain=0.5)

            self.s0 = StyleBlock(w_ch, hidden_ch * 2)
            self.s1 = StyleBlock(w_ch, hidden_ch * 4)
            self.s4 = StyleBlock(w_ch, hidden_ch * 4)
            self.s5 = StyleBlock(w_ch, hidden_ch * 2)
            self.s6 = StyleBlock(w_ch, hidden_ch)
Example #16
0
 def __init__(self, ch):
     super(DiscriminatorBlockBase, self).__init__()
     with self.init_scope():
         self.c0 = EqualizedConv2d(ch, ch, 3, 1, 1)
         self.c1 = EqualizedConv2d(ch, ch, 4, 1, 0)
         self.l2 = EqualizedLinear(ch, 1, gain=1)
Example #17
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 prep_conv=True,
                 middle_channels=None,
                 use_dropout=False,
                 dropout_prob=0.1,
                 norm=L.BatchNormalization):
        '''
        :param in_channels: Number of input channels
        :param out_channels: Number of output channels
        :param prep_conv: Whether to have another convolutional layer before the downsampling layer.
        :param middle_channels: If prep_conv is true, this sets the number of channels between the prep and downsampling
                                convs.
        :param use_dropout: bool. Whether to use dropout or not.
        :param dropout_prob: Float. The dropout probability (if use_dropout is True)
        :param norm: Which norm to use. If None, no norm is used. Default is Batchnorm with affinity.
        '''
        super().__init__()

        if middle_channels is None:
            middle_channels = in_channels

        net = list()

        if prep_conv:
            net += [
                ReflectionPad(pad_width=1),
                EqualizedConv2d(in_channels,
                                middle_channels,
                                ksize=3,
                                pad=0,
                                stride=1,
                                nobias=False if norm is None else True)
            ]

            if norm is not None:
                net += [norm(middle_channels)]

            net += [F.leaky_relu]

            if use_dropout:
                net += [lambda x: F.dropout(x, dropout_prob)]

        net += [
            ReflectionPad(pad_width=1),
            EqualizedConv2d(middle_channels,
                            out_channels,
                            ksize=4,
                            pad=0,
                            stride=2,
                            nobias=False if norm is None else True)
        ]

        if norm is not None:
            net += [norm(out_channels)]

        net += [F.leaky_relu]

        if use_dropout:
            net += [lambda x: F.dropout(x, dropout_prob)]

        with self.init_scope():
            self.net = chainer.Sequential(*net)
Example #18
0
    def __init__(self,
                 ch=512,
                 enable_blur=False,
                 rgbd=False,
                 rotate_conv_input=False,
                 use_encoder=False,
                 use_occupancy_net=False,
                 initial_depth=1.0):
        super(StyleGenerator, self).__init__()
        self.max_stage = 17
        self.rgbd = rgbd
        self.rotate_conv_input = rotate_conv_input
        self.use_occupancy_net = use_occupancy_net
        out_ch = 4 if rgbd else 3
        with self.init_scope():
            self.blocks = chainer.ChainList(
                SynthesisBlock(ch, ch, ch, upsample=False),  # 4
                SynthesisBlock(ch,
                               ch,
                               ch,
                               upsample=True,
                               enable_blur=enable_blur),  # 8
                SynthesisBlock(ch,
                               ch,
                               ch,
                               upsample=True,
                               enable_blur=enable_blur),  # 16
                SynthesisBlock(ch,
                               ch,
                               ch,
                               upsample=True,
                               enable_blur=enable_blur),  # 32
                SynthesisBlock(ch // 2,
                               ch,
                               ch,
                               upsample=True,
                               enable_blur=enable_blur),  # 64
                SynthesisBlock(ch // 4,
                               ch // 2,
                               ch,
                               upsample=True,
                               enable_blur=enable_blur),  # 128
                # SynthesisBlock(ch // 8, ch // 4, ch, upsample=True, enable_blur=enable_blur),  # 256
                # SynthesisBlock(ch // 16, ch // 8, ch, upsample=True, enable_blur=enable_blur),  # 512
                # SynthesisBlock(ch // 32, ch // 16, ch, upsample=True, enable_blur=enable_blur)  # 1024
            )
            self.outs = chainer.ChainList(
                EqualizedConv2d(ch, out_ch, 1, 1, 0, gain=1),
                EqualizedConv2d(ch, out_ch, 1, 1, 0, gain=1),
                EqualizedConv2d(ch, out_ch, 1, 1, 0, gain=1),
                EqualizedConv2d(ch, out_ch, 1, 1, 0, gain=1),
                EqualizedConv2d(ch // 2, out_ch, 1, 1, 0, gain=1),
                EqualizedConv2d(ch // 4, out_ch, 1, 1, 0, gain=1),
                # EqualizedConv2d(ch // 8, out_ch, 1, 1, 0, gain=1),
                # EqualizedConv2d(ch // 16, out_ch, 1, 1, 0, gain=1),
                # EqualizedConv2d(ch // 32, out_ch, 1, 1, 0, gain=1)
            )
            if self.rgbd:
                if self.rotate_conv_input:
                    self.l1 = EqualizedLinear(9, ch)
                else:
                    self.l1 = EqualizedLinear(ch + 9, ch)
                self.l2 = EqualizedLinear(ch, ch)
            if rotate_conv_input:
                self.rotate = StyleBlock(ch, ch)

            if use_encoder:
                self.enc = Encoder(ch, ch * 2, enable_blur=enable_blur)

            if use_occupancy_net:
                self.occupancy = OccupancyNet(in_ch=ch * 2 + 3, hidden_ch=32)

        # initialize depth weight to 0
        for out in self.outs:
            out.c.W.array[-1] = 0
            out.c.b.array[-1] = math.log(math.e**initial_depth - 1)

        self.n_blocks = len(self.blocks)
        self.image_size = 128
        self.enable_blur = enable_blur
Example #19
0
    def __init__(self,
                 ch=512,
                 enable_blur=False,
                 sn=False,
                 res=False,
                 num_z=2):
        super(DisentangledDiscriminator, self).__init__()
        self.max_stage = 17
        self.sn = sn

        with self.init_scope():
            # NOTE: called in reversed order.
            self.shared_blocks = chainer.ChainList(
                DiscriminatorBlock(ch,
                                   ch,
                                   enable_blur=enable_blur,
                                   sn=sn,
                                   res=res),  # 16
                DiscriminatorBlock(ch,
                                   ch,
                                   enable_blur=enable_blur,
                                   sn=sn,
                                   res=res),  # 32
                DiscriminatorBlock(ch // 2,
                                   ch,
                                   enable_blur=enable_blur,
                                   sn=sn,
                                   res=res),  # 64
                DiscriminatorBlock(ch // 4,
                                   ch // 2,
                                   enable_blur=enable_blur,
                                   sn=sn,
                                   res=res),  # 128
                DiscriminatorBlock(ch // 8,
                                   ch // 4,
                                   enable_blur=enable_blur,
                                   sn=sn,
                                   res=res),  # 256
                DiscriminatorBlock(ch // 16,
                                   ch // 8,
                                   enable_blur=enable_blur,
                                   sn=sn,
                                   res=res),  # 512
                DiscriminatorBlock(ch // 32,
                                   ch // 16,
                                   enable_blur=enable_blur,
                                   sn=sn,
                                   res=res),
            )  # 1024

            self.camera_parameter_blocks = chainer.Sequential(  # camera parameter variant feature
                DiscriminatorBlock(ch,
                                   ch,
                                   enable_blur=enable_blur,
                                   sn=sn,
                                   res=res),  # 8
                DiscriminatorBlockBase(
                    ch, sn=sn,
                    out_dim=9),  # 4  # Euler angles, translation vector
            )

            self.z_regression_blocks = chainer.Sequential(  # camera parameter invariant feature
                DiscriminatorBlock(ch,
                                   ch,
                                   enable_blur=enable_blur,
                                   sn=sn,
                                   res=res),  # 8
                DiscriminatorBlockBase(ch, sn=sn, out_dim=ch * num_z),  # 4
            )

            self.discriminator_blocks = chainer.Sequential(  # camera parameter variant feature
                DiscriminatorBlock(ch,
                                   ch,
                                   enable_blur=enable_blur,
                                   sn=sn,
                                   res=res),  # 8
                DiscriminatorBlockBase(
                    ch, sn=sn),  # 4  # Euler angles*2, translation vector
            )

            if not sn:
                self.ins = chainer.ChainList(
                    EqualizedConv2d(3, ch, 1, 1, 0),
                    EqualizedConv2d(3, ch, 1, 1, 0),
                    EqualizedConv2d(3, ch, 1, 1, 0),
                    EqualizedConv2d(3, ch, 1, 1, 0),
                    EqualizedConv2d(3, ch // 2, 1, 1, 0),
                    EqualizedConv2d(3, ch // 4, 1, 1, 0),
                    EqualizedConv2d(3, ch // 8, 1, 1, 0),
                    EqualizedConv2d(3, ch // 16, 1, 1, 0),
                    EqualizedConv2d(3, ch // 32, 1, 1, 0),
                )
            else:
                w = chainer.initializers.Uniform(1)
                self.ins = chainer.ChainList(
                    L.Convolution2D(3, ch, 1, 1, 0, initialW=w).add_hook(
                        SpectralNormalization()),
                    L.Convolution2D(3, ch, 1, 1, 0, initialW=w).add_hook(
                        SpectralNormalization()),
                    L.Convolution2D(3, ch, 1, 1, 0, initialW=w).add_hook(
                        SpectralNormalization()),
                    L.Convolution2D(3, ch, 1, 1, 0, initialW=w).add_hook(
                        SpectralNormalization()),
                    L.Convolution2D(3, ch // 2, 1, 1, 0, initialW=w).add_hook(
                        SpectralNormalization()),
                    L.Convolution2D(3, ch // 4, 1, 1, 0, initialW=w).add_hook(
                        SpectralNormalization()),
                    L.Convolution2D(3, ch // 8, 1, 1, 0, initialW=w).add_hook(
                        SpectralNormalization()),
                    L.Convolution2D(3, ch // 16, 1, 1, 0, initialW=w).add_hook(
                        SpectralNormalization()),
                    L.Convolution2D(3, ch // 32, 1, 1, 0, initialW=w).add_hook(
                        SpectralNormalization()),
                )
            self.camera_param_discriminator = CameraParamDiscriminator()
            self.enable_blur = enable_blur
Example #20
0
    def __init__(self,
                 ch=512,
                 out_dim=1,
                 enable_blur=False,
                 sn=False,
                 res=False):
        super(Discriminator, self).__init__()
        self.max_stage = 17
        self.sn = sn

        with self.init_scope():
            # NOTE: called in reversed order.
            self.blocks = chainer.ChainList(
                DiscriminatorBlockBase(ch, out_dim, sn=sn),  # 4
                DiscriminatorBlock(ch,
                                   ch,
                                   enable_blur=enable_blur,
                                   sn=sn,
                                   res=res),  # 8
                DiscriminatorBlock(ch,
                                   ch,
                                   enable_blur=enable_blur,
                                   sn=sn,
                                   res=res),  # 16
                DiscriminatorBlock(ch,
                                   ch,
                                   enable_blur=enable_blur,
                                   sn=sn,
                                   res=res),  # 32
                DiscriminatorBlock(ch // 2,
                                   ch,
                                   enable_blur=enable_blur,
                                   sn=sn,
                                   res=res),  # 64
                DiscriminatorBlock(ch // 4,
                                   ch // 2,
                                   enable_blur=enable_blur,
                                   sn=sn,
                                   res=res),  # 128
            )

            if not sn:
                self.ins = chainer.ChainList(
                    EqualizedConv2d(3, ch, 1, 1, 0),
                    EqualizedConv2d(3, ch, 1, 1, 0),
                    EqualizedConv2d(3, ch, 1, 1, 0),
                    EqualizedConv2d(3, ch, 1, 1, 0),
                    EqualizedConv2d(3, ch // 2, 1, 1, 0),
                    EqualizedConv2d(3, ch // 4, 1, 1, 0),
                )
            else:
                w = chainer.initializers.Uniform(1)
                self.ins = chainer.ChainList(
                    L.Convolution2D(3, ch, 1, 1, 0, initialW=w).add_hook(
                        SpectralNormalization()),
                    L.Convolution2D(3, ch, 1, 1, 0, initialW=w).add_hook(
                        SpectralNormalization()),
                    L.Convolution2D(3, ch, 1, 1, 0, initialW=w).add_hook(
                        SpectralNormalization()),
                    L.Convolution2D(3, ch, 1, 1, 0, initialW=w).add_hook(
                        SpectralNormalization()),
                    L.Convolution2D(3, ch // 2, 1, 1, 0, initialW=w).add_hook(
                        SpectralNormalization()),
                    L.Convolution2D(3, ch // 4, 1, 1, 0, initialW=w).add_hook(
                        SpectralNormalization()),
                )

            self.enable_blur = enable_blur