コード例 #1
0
ファイル: cgan_pd_32.py プロジェクト: yqGANs/mimicry
    def __init__(self, num_classes, bottom_width=4, nz=128, ngf=256, **kwargs):
        super().__init__(nz=nz,
                         ngf=ngf,
                         bottom_width=bottom_width,
                         num_classes=num_classes,
                         **kwargs)

        # Build the layers
        self.l1 = nn.Linear(self.nz, (self.bottom_width**2) * self.ngf)
        self.block2 = GBlock(self.ngf,
                             self.ngf,
                             upsample=True,
                             num_classes=self.num_classes)
        self.block3 = GBlock(self.ngf,
                             self.ngf,
                             upsample=True,
                             num_classes=self.num_classes)
        self.block4 = GBlock(self.ngf,
                             self.ngf,
                             upsample=True,
                             num_classes=self.num_classes)
        self.b5 = nn.BatchNorm2d(self.ngf)
        self.c5 = nn.Conv2d(self.ngf, 3, 3, 1, padding=1)
        self.activation = nn.ReLU(True)

        # Initialise the weights
        nn.init.xavier_uniform_(self.l1.weight.data, 1.0)
        nn.init.xavier_uniform_(self.c5.weight.data, 1.0)
コード例 #2
0
    def __init__(self,
                 num_classes,
                 nz=128,
                 ngf=1024,
                 bottom_width=4,
                 **kwargs):
        super().__init__(nz=nz,
                         ngf=ngf,
                         bottom_width=bottom_width,
                         num_classes=num_classes,
                         **kwargs)

        # Build the layers
        self.l1 = SNLinear(self.nz, (self.bottom_width**2) * self.ngf)
        self.block2 = GBlock(self.ngf,
                             self.ngf,
                             upsample=True,
                             num_classes=self.num_classes,
                             spectral_norm=True)
        self.block3 = GBlock(self.ngf,
                             self.ngf >> 1,
                             upsample=True,
                             num_classes=self.num_classes,
                             spectral_norm=True)
        self.block4 = GBlock(self.ngf >> 1,
                             self.ngf >> 2,
                             upsample=True,
                             num_classes=self.num_classes,
                             spectral_norm=True)
        self.block5 = GBlock(self.ngf >> 2,
                             self.ngf >> 3,
                             upsample=True,
                             num_classes=self.num_classes,
                             spectral_norm=True)
        self.block6 = GBlock(self.ngf >> 3,
                             self.ngf >> 4,
                             upsample=True,
                             num_classes=self.num_classes,
                             spectral_norm=True)
        self.b7 = nn.BatchNorm2d(self.ngf >> 4)
        self.c7 = SNConv2d(self.ngf >> 4, 3, 3, 1, padding=1)
        self.activation = nn.ReLU(True)

        # SA block
        self.attn_block = SelfAttention(self.ngf >> 2, spectral_norm=True)

        # Initialise the weights
        nn.init.xavier_uniform_(self.l1.weight.data, 1.0)
        nn.init.xavier_uniform_(self.c7.weight.data, 1.0)
コード例 #3
0
ファイル: ssgan_64.py プロジェクト: yqGANs/mimicry
    def __init__(self, nz=128, ngf=1024, bottom_width=4, **kwargs):
        super().__init__(nz=nz, ngf=ngf, bottom_width=bottom_width, **kwargs)

        # Build the layers
        self.l1 = nn.Linear(self.nz, (self.bottom_width**2) * self.ngf)
        self.block2 = GBlock(self.ngf, self.ngf >> 1, upsample=True)
        self.block3 = GBlock(self.ngf >> 1, self.ngf >> 2, upsample=True)
        self.block4 = GBlock(self.ngf >> 2, self.ngf >> 3, upsample=True)
        self.block5 = GBlock(self.ngf >> 3, self.ngf >> 4, upsample=True)
        self.b6 = nn.BatchNorm2d(self.ngf >> 4)
        self.c6 = nn.Conv2d(self.ngf >> 4, 3, 3, 1, padding=1)
        self.activation = nn.ReLU(True)

        # Initialise the weights
        nn.init.xavier_uniform_(self.l1.weight.data, 1.0)
        nn.init.xavier_uniform_(self.c6.weight.data, 1.0)
コード例 #4
0
    def __init__(self,
                 nz=128,
                 ngf=256,
                 bottom_width=4,
                 use_nfl=True,
                 double_layers=False,
                 **kwargs):
        super().__init__(nz=nz, ngf=ngf, bottom_width=bottom_width, **kwargs)

        self.use_nfl = use_nfl
        if use_nfl:
            print('using nfl!')
            #from networks.cosgrove.attentive_densenet import AttentiveDensenet
            self.ad = AttentiveDensenet(layer_channels=[
                self.ngf, self.ngf, self.ngf, self.ngf, self.ngf, self.ngf,
                self.ngf, self.ngf
            ],
                                        key_size=32,
                                        val_size=32,
                                        n_heads=4)

        # Build the layers
        self.l1 = nn.Linear(self.nz, (self.bottom_width**2) * self.ngf)
        self.block2 = GBlock(self.ngf, self.ngf, upsample=True)
        self.block3 = GBlock(self.ngf, self.ngf, upsample=True)
        self.block4 = GBlock(self.ngf, self.ngf, upsample=True)

        #self.b5 = nn.BatchNorm2d(self.ngf)
        #self.c5 = nn.Conv2d(self.ngf, 3, 3, 1, padding=1)

        self.l1_b = nn.Linear(self.nz, (self.bottom_width**2) * self.ngf)
        self.block2_b = GBlock(self.ngf, self.ngf, upsample=True)
        self.block3_b = GBlock(self.ngf, self.ngf, upsample=True)
        self.block4_b = GBlock(self.ngf, self.ngf, upsample=True)
        self.b5_b = nn.BatchNorm2d(self.ngf)
        self.c5_b = nn.Conv2d(self.ngf, 3, 3, 1, padding=1)

        self.double_layers = double_layers
        if double_layers:
            self.block2_bd = GBlock(self.ngf, self.ngf, upsample=False)
            self.block3_bd = GBlock(self.ngf, self.ngf, upsample=False)
            self.block4_bd = GBlock(self.ngf, self.ngf, upsample=False)

        self.activation = nn.ReLU(True)

        # Initialise the weights
        nn.init.xavier_uniform_(self.l1.weight.data, 1.0)
        nn.init.xavier_uniform_(self.l1_b.weight.data, 1.0)
        nn.init.xavier_uniform_(self.c5_b.weight.data, 1.0)