예제 #1
0
    def __init__(self, num_features, concat_vector_dim, spectral_norm=False):
        super().__init__()
        self.num_features = num_features
        self.bn = nn.BatchNorm2d(num_features, momentum=0.001, affine=False)

        if spectral_norm:
            self.gain = SNLinear(concat_vector_dim, num_features, bias=False)
            self.bias = SNLinear(concat_vector_dim, num_features, bias=False)
        else:
            self.gain = nn.Linear(concat_vector_dim, num_features, bias=False)
            self.bias = nn.Linear(concat_vector_dim, num_features, bias=False)
예제 #2
0
    def __init__(self,
                 ndf=128,
                 loss_type="hinge",
                 is_amp=False,
                 is_transforms=False,
                 real_lambda=10,
                 fake_lambda=5,
                 input_shape=None,
                 **kwargs):
        super().__init__(ndf=ndf,
                         loss_type=loss_type,
                         **kwargs)

        self.is_amp = is_amp
        self.is_transforms = is_transforms
        self.real_lambda = real_lambda
        self.fake_lambda = fake_lambda
        self.input_shape = input_shape

        self.block1 = DBlockOptimized(3, self.ndf)
        self.block2 = DBlock(self.ndf, self.ndf, downsample=True)
        self.block3 = DBlock(self.ndf, self.ndf, downsample=False)
        self.block4 = DBlock(self.ndf, self.ndf, downsample=False)
        self.l5 = SNLinear(self.ndf, 1)
        self.activation = nn.ReLU(inplace=True)

        # initialize the weights
        nn.init.xavier_normal_(self.l5.weight.data, 1.0)

        # get transforms
        if self.is_transforms:
            self.apply_transforms = SimCLRAugmentation(self.input_shape)

        # projection MLP
        self.prodjection = nn.Linear(self.ndf, self.ndf)
예제 #3
0
    def __init__(self,
                 ndf=128,
                 loss_type="ns",
                 is_amp=False,
                 cutmix=False,
                 consistency=False,
                 warmup=50000,
                 **kwargs):
        super().__init__(ndf=ndf, loss_type=loss_type, **kwargs)
        self.is_amp = is_amp
        self.cutmix = cutmix
        self.consistency = consistency

        # build encoder
        self.down1 = DBlockOptimized(3, self.ndf)
        self.down2 = DBlock(self.ndf, self.ndf, downsample=True)
        self.down3 = DBlock(self.ndf, self.ndf, downsample=False)
        self.down4 = DBlock(self.ndf, self.ndf, downsample=False)
        self.down_act = nn.ReLU(inplace=True)
        self.down_l5 = SNLinear(self.ndf, 1)
        # self-attention
        self.non_local_block = SelfAttention(self.ndf, spectral_norm=True)
        # initialize weights
        nn.init.xavier_uniform_(self.down_l5.weight.data, 1.0)

        # build decoder
        self.up1 = DBlockDecoder(self.ndf, self.ndf, upsample=False)
        self.up2 = DBlockDecoder(self.ndf * 2, self.ndf, upsample=False)
        self.up3 = DBlockDecoder(self.ndf * 2, self.ndf, upsample=True)
        self.up4 = DBlockDecoder(self.ndf * 2, self.ndf, upsample=True)
        self.up_act = nn.ReLU(inplace=True)
        self.up_c5 = SNConv2d(self.ndf, 1, kernel_size=3, stride=1, padding=1)
        # initialize weights
        nn.init.xavier_uniform_(self.up_c5.weight.data, 1.0)
예제 #4
0
    def __init__(self, ndf=128, **kwargs):
        super().__init__(ndf=ndf, **kwargs)

        # Build layers
        self.block1 = DBlockOptimized(3, self.ndf)
        self.block2 = DBlock(self.ndf, self.ndf, downsample=True)
        self.block3 = DBlock(self.ndf, self.ndf, downsample=False)
        self.block4 = DBlock(self.ndf, self.ndf, downsample=False)
        self.l5 = SNLinear(self.ndf, 1)

        # Rotation class prediction layer
        self.l_y = SNLinear(self.ndf, self.num_classes)

        # Initialise the weights
        nn.init.xavier_uniform_(self.l5.weight.data, 1.0)
        nn.init.xavier_uniform_(self.l_y.weight.data, 1.0)

        self.activation = nn.ReLU(True)
예제 #5
0
    def __init__(self, ndf=128, loss_type='hinge', **kwargs):
        super().__init__(ndf=ndf, loss_type=loss_type, **kwargs)
        self.num_classes = 4
        self.ss_loss_scale = 1.0

        # Build layers
        self.block1 = DBlockOptimized(3, self.ndf)
        self.block2 = DBlock(self.ndf, self.ndf, downsample=True)
        self.block3 = DBlock(self.ndf, self.ndf, downsample=False)
        self.block4 = DBlock(self.ndf, self.ndf, downsample=False)
        self.l5 = SNLinear(self.ndf, 1)
        self.activation = nn.ReLU(True)

        nn.init.xavier_uniform_(self.l5.weight.data, 1.0)

        # Rotation class prediction layer
        self.l_y = SNLinear(self.ndf, self.num_classes)
        nn.init.xavier_uniform_(self.l_y.weight.data, 1.0)
예제 #6
0
    def __init__(self,
                 num_classes,
                 ndf=128,
                 loss_type="hinge",
                 fq_type=None,
                 dict_size=10,
                 quant_layers=None,
                 fq_strength=10.0,
                 is_amp=False,
                 **kwargs):
        super().__init__(ndf=ndf,
                         loss_type=loss_type,
                         num_classes=num_classes,
                         **kwargs)

        self.fq_type = fq_type
        self.dict_size = dict_size
        self.quant_layers = quant_layers
        self.fq_strength = fq_strength
        self.is_amp = is_amp

        if isinstance(self.quant_layers, int):
            self.quant_layers = [self.quant_layers]

        if self.fq_type is not None:
            assert self.fq_type in ['Normal', 'EMA'
                                    ], "set fq_type within ['Normal', 'EMA']"

        # Build layers
        self.block1 = DBlockOptimized(3, self.ndf)
        self.block2 = DBlock(self.ndf, self.ndf, downsample=True)
        self.block3 = DBlock(self.ndf, self.ndf, downsample=False)
        self.block4 = DBlock(self.ndf, self.ndf, downsample=False)
        self.l5 = SNLinear(self.ndf, 1)
        self.activation = nn.ReLU(True)

        # Produce label vector from trained embedding
        self.l_y = SNEmbedding(num_embeddings=self.num_classes,
                               embedding_dim=self.ndf)

        # Initialise the weights
        nn.init.xavier_uniform_(self.l5.weight.data, 1.0)
        nn.init.xavier_uniform_(self.l_y.weight.data, 1.0)

        if self.fq_type:
            assert self.quant_layers is not None, "should set quant_layers like ['3']"
            assert (min(self.quant_layers) > 1) and (max(
                self.quant_layers) < 5), "should be range [2, 4]"
            for layer in self.quant_layers:
                out_channels = getattr(self, f"block{layer}").out_channels
                if self.fq_type == "Normal":
                    setattr(self, f"fq{layer}",
                            FeatureQuantizer(out_channels, 2**self.dict_size))
                elif self.fq_type == "EMA":
                    setattr(
                        self, f"fq{layer}",
                        FeatureQuantizerEMA(out_channels, 2**self.dict_size))
예제 #7
0
    def __init__(self, num_classes, ndf=128, **kwargs):
        super().__init__(ndf=ndf, num_classes=num_classes, **kwargs)

        # Build layers
        self.block1 = DBlockOptimized(3, self.ndf)
        self.block2 = DBlock(self.ndf, self.ndf, downsample=True)
        self.block3 = DBlock(self.ndf, self.ndf, downsample=False)
        self.block4 = DBlock(self.ndf, self.ndf, downsample=False)
        self.l5 = SNLinear(self.ndf, 1)

        # Produce label vector from trained embedding
        self.l_y = SNEmbedding(num_embeddings=self.num_classes,
                               embedding_dim=self.ndf)

        # Initialise the weights
        nn.init.xavier_uniform_(self.l5.weight.data, 1.0)
        nn.init.xavier_uniform_(self.l_y.weight.data, 1.0)

        self.activation = nn.ReLU(True)