def __init__(self, num_layers, mode='ir', opts=None):
     super(BackboneEncoderUsingLastLayerIntoWPlus, self).__init__()
     print('Using BackboneEncoderUsingLastLayerIntoWPlus')
     assert num_layers in [50, 100,
                           152], 'num_layers should be 50,100, or 152'
     assert mode in ['ir', 'ir_se'], 'mode should be ir or ir_se'
     blocks = get_blocks(num_layers)
     if mode == 'ir':
         unit_module = bottleneck_IR
     elif mode == 'ir_se':
         unit_module = bottleneck_IR_SE
     self.n_styles = opts.n_styles
     self.input_layer = Sequential(
         Conv2d(opts.input_nc, 64, (3, 3), 1, 1, bias=False),
         BatchNorm2d(64), PReLU(64))
     self.output_layer_2 = Sequential(BatchNorm2d(512),
                                      torch.nn.AdaptiveAvgPool2d((7, 7)),
                                      Flatten(), Linear(512 * 7 * 7, 512))
     self.linear = EqualLinear(512, 512 * self.n_styles, lr_mul=1)
     modules = []
     for block in blocks:
         for bottleneck in block:
             modules.append(
                 unit_module(bottleneck.in_channel, bottleneck.depth,
                             bottleneck.stride))
     self.body = Sequential(*modules)
Beispiel #2
0
    def __init__(self, opts):
        super(Mapper, self).__init__()

        self.opts = opts
        layers = [PixelNorm()]

        for i in range(4):
            layers.append(
                EqualLinear(512, 512, lr_mul=0.01, activation='fused_lrelu'))

        self.mapping = nn.Sequential(*layers)
 def __init__(self, in_c, out_c, spatial):
     super(GradualStyleBlock, self).__init__()
     self.out_c = out_c
     self.spatial = spatial
     num_pools = int(np.log2(spatial))
     modules = []
     modules += [Conv2d(in_c, out_c, kernel_size=3, stride=2, padding=1),
                 nn.LeakyReLU()]
     for i in range(num_pools - 1):
         modules += [
             Conv2d(out_c, out_c, kernel_size=3, stride=2, padding=1),
             nn.LeakyReLU()
         ]
     self.convs = nn.Sequential(*modules)
     self.linear = EqualLinear(out_c, out_c, lr_mul=1)
Beispiel #4
0
 def __init__(self, opts=None):
     super(MntToVecEncoderEncoderIntoW, self).__init__()
     print('Using MntToVecEncoderEncoderIntoW')
     blocks = get_blocks(num_layers=50)
     unit_module = bottleneck_SE
     self.input_layer = Sequential(
         Conv2d(opts.input_nc, 64, (3, 3), 1, 1, bias=False),
         BatchNorm2d(64), PReLU(64))
     self.output_pool = torch.nn.AdaptiveAvgPool2d((1, 1))
     self.linear = EqualLinear(512, 512, lr_mul=1)
     modules = []
     for block in blocks:
         for bottleneck in block:
             modules.append(
                 unit_module(bottleneck.in_channel, bottleneck.depth,
                             bottleneck.stride))
     self.body = Sequential(*modules)
Beispiel #5
0
 def __init__(self, num_layers, mode='ir', opts=None):
     super(BackboneEncoderUsingLastLayerIntoW, self).__init__()
     print('Using BackboneEncoderUsingLastLayerIntoW')
     assert num_layers in [50, 100,
                           152], 'num_layers should be 50,100, or 152'
     assert mode in ['ir', 'ir_se'], 'mode should be ir or ir_se'
     blocks = get_blocks(num_layers)
     if mode == 'ir':
         unit_module = bottleneck_IR
     elif mode == 'ir_se':
         unit_module = bottleneck_IR_SE
     self.input_layer = Sequential(Conv2d(3, 64, (3, 3), 1, 1, bias=False),
                                   BatchNorm2d(64), PReLU(64))
     self.output_pool = torch.nn.AdaptiveAvgPool2d((1, 1))
     self.linear = EqualLinear(512, 512, lr_mul=1)
     modules = []
     for block in blocks:
         for bottleneck in block:
             modules.append(
                 unit_module(bottleneck.in_channel, bottleneck.depth,
                             bottleneck.stride))
     self.body = Sequential(*modules)
     log_size = int(math.log(opts.stylegan_size, 2))
     self.style_count = 2 * log_size - 2