Exemplo n.º 1
0
    def __init__(self, code=512, depth=3):
        super().__init__()

        self.disc = []
        for index in range(depth - 1):
            self.disc.extend([lreq.Linear(code, code), nn.LeakyReLU(0.2)])
        self.disc = self.disc + [lreq.Linear(code, 1)]
        self.disc = nn.Sequential(*self.disc)
Exemplo n.º 2
0
    def __init__(self, code=512, depth=4):
        super().__init__()
        self.code = code
        self.act = nn.LeakyReLU(0.2)

        self.f = [BallProjection()]
        for _ in range(depth - 1):
            self.f.extend([lreq.Linear(code, code), nn.LeakyReLU(0.2)])
        self.f = self.f + [lreq.Linear(code, code)]
        self.f = nn.Sequential(*self.f)
Exemplo n.º 3
0
    def __init__(self, n_channels, code):
        super().__init__()

        self.insance_norm = nn.InstanceNorm2d(n_channels,
                                              affine=False,
                                              eps=1e-8)
        self.A = lreq.Linear(code, n_channels * 2)
Exemplo n.º 4
0
    def __init__(self, scale=512):
        """ scale matches input and does not change shape, only values
        """
        super().__init__()

        if isinstance(scale, int):
            scale = (scale, scale)

        self.A = lreq.Linear(scale[0], scale[1], bias=True)
Exemplo n.º 5
0
    def build_layer(self, code):
        layer = []
        layer.append(lreq.Linear(code, code))

        if self.norm:
            layer.append(Factory.get_normalization(Factory.make_norm_1d(self.norm)))
        if self.act:
            layer.append(Factory.get_activation(self.act))  
            
        return layer
Exemplo n.º 6
0
    def __init__(self,
                 inp_c,
                 oup_c,
                 code,
                 final=False,
                 blur_downsample=False,
                 fused_scale=True,
                 learn_blur=False):
        super().__init__()

        self.final = final
        self.blur_downsample = blur_downsample
        self.learn_blur = learn_blur

        self.learned_affine = AffineTransform(learnable=True,
                                              scale=oup_c if final else inp_c)

        self.in1 = nn.InstanceNorm2d(inp_c, affine=False)
        self.in2 = nn.InstanceNorm2d(oup_c, affine=False)

        self.conv1 = lreq.Conv2d(inp_c,
                                 inp_c,
                                 kernel_size=3,
                                 stride=1,
                                 padding=1)
        self.style_mapping1 = lreq.Linear(2 * inp_c, code)

        if final:
            self.fc = lreq.Linear(inp_c * 4 * 4, oup_c)
            self.style_mapping2 = lreq.Linear(oup_c, code)
        else:
            self.conv2 = lreq.Conv2d(inp_c,
                                     oup_c,
                                     kernel_size=3,
                                     stride=1,
                                     padding=1)
            self.style_mapping2 = lreq.Linear(2 * oup_c, code)

        self.act = nn.LeakyReLU(0.2)
        self.downsample = nn.AvgPool2d(2, 2)

        self.blur = Blur(inp_c)
Exemplo n.º 7
0
    def __init__(self, code=512, depth=3, norm='layer', act='mish', verbose=False):
        super().__init__()

        self.norm = norm
        self.act = act
        
        self.disc = []
        for i in range(depth - 1):
            if verbose: print(f"[Discriminator]\t Block {i} for {code}d code using norm {norm}, act {act} and a residual skip delay of {skip_delay} (only applies if non zero)")   
            self.disc.extend(self.build_layer(code))
        self.disc = self.disc + [lreq.Linear(code, 1)]
        self.disc = nn.Sequential(*self.disc)
Exemplo n.º 8
0
    def __init__(self, inputs, outputs, last=False, fused_scale=False, dense=False):
        super().__init__()
        self.conv_1 = lreq.Conv2d(inputs + (1 if last else 0), inputs, 3, 1, 1, bias=False)
        self.bias_1 = nn.Parameter(torch.Tensor(1, inputs, 1, 1))
        self.blur = BlurSimple(inputs)
        self.last = last
        self.dense_ = dense
        self.fused_scale = fused_scale
        if self.dense_:
            self.dense = lreq.Linear(inputs * 4 * 4, outputs)
        else:
            if fused_scale:
                self.conv_2 = lreq.Conv2d(inputs, outputs, 3, 2, 1, bias=False, transform_kernel=True)
            else:
                self.conv_2 = lreq.Conv2d(inputs, outputs, 3, 1, 1, bias=False)

        self.bias_2 = nn.Parameter(torch.Tensor(1, outputs, 1, 1))

        with torch.no_grad():
            self.bias_1.zero_()
            self.bias_2.zero_()