예제 #1
0
    def __init__(self, output_size, input_size, which_linear, eps=1e-5, momentum=0.1,
                 cross_replica=False, mybn=False, norm_style='bn', ):
        super(ccbn, self).__init__()
        self.output_size, self.input_size = output_size, input_size
        # Prepare gain and bias layers
        self.gain = which_linear(input_size, output_size)
        self.bias = which_linear(input_size, output_size)
        # epsilon to avoid dividing by 0
        self.eps = eps
        # Momentum
        self.momentum = momentum
        # Use cross-replica batchnorm?
        self.cross_replica = cross_replica
        # Use my batchnorm?
        self.mybn = mybn
        # Norm style?
        self.norm_style = norm_style

        if self.cross_replica:
            self.bn = SyncBN2d(output_size, eps=self.eps, momentum=self.momentum, affine=False)
        elif self.mybn:
            self.bn = myBN(output_size, self.eps, self.momentum)
        elif self.norm_style in ['bn', 'in']:
            self.register_buffer('stored_mean', torch.zeros(output_size))
            self.register_buffer('stored_var', torch.ones(output_size))
예제 #2
0
    def __init__(self,
                 output_size,
                 eps=1e-5,
                 momentum=0.1,
                 cross_replica=False,
                 mybn=False):
        super(bn, self).__init__()
        self.output_size = output_size
        # Prepare gain and bias layers
        self.gain = P(torch.ones(output_size), requires_grad=True)
        self.bias = P(torch.zeros(output_size), requires_grad=True)
        # epsilon to avoid dividing by 0
        self.eps = eps
        # Momentum
        self.momentum = momentum
        # Use cross-replica batchnorm?
        self.cross_replica = cross_replica
        # Use my batchnorm?
        self.mybn = mybn

        if self.cross_replica:
            self.bn = SyncBN2d(output_size,
                               eps=self.eps,
                               momentum=self.momentum,
                               affine=False)
        elif mybn:
            self.bn = myBN(output_size, self.eps, self.momentum)
        # Register buffers if neither of the above
        else:
            self.register_buffer('stored_mean', torch.zeros(output_size))
            self.register_buffer('stored_var', torch.ones(output_size))
    def __init__(self, code_dim=100, n_class=100,nc=3, SN=True,Resolution=64,ch=64,SA_Resolution=256):
        super().__init__()

        self.ch = ch
        rate=1.0
        if SN == True:
            self.lin_code = spectral_init(nn.Linear(code_dim, 4 * 4 * ch*4),rate=rate)
            self.embedding = spectral_init(nn.Embedding(n_class, ch*4),rate=rate)
        else:
            self.lin_code = nn.Linear(code_dim, 4 * 4 * ch*4)
            self.embedding = nn.Embedding(n_class, ch*4)

        layer_num = int(math.log(Resolution/4,2))
        SA_layer = int(math.log(SA_Resolution/4,2))
        self.conv = []

        for i in range(layer_num):
            self.conv.append(ConvBlock(ch*4, ch*4, n_class=n_class,SN=SN,emb = self.embedding))
            # if i+1 == SA_layer:
            #     self.conv.append(SelfAttention(ch*4, SN=SN))
            #     print('apply sa G')

        self.conv = nn.ModuleList(self.conv)


        self.bn = SyncBN2d(ch*4, eps=1e-5, momentum=0.5, affine=False)#nn.BatchNorm2d(ch*4)
        if SN == True:
            self.colorize = spectral_init(nn.Conv2d(ch*4, nc, [3, 3], padding=1),rate=rate)
        else:
            self.colorize = nn.Conv2d(ch*4, nc, [3, 3], padding=1)

        self.optim = optim.Adam(params=self.parameters(), lr=1e-4,
                                betas=(0.0, 0.999), eps=1e-8)
        self.init_weights()
    def __init__(self, in_channel, n_class):
        super().__init__()

        self.bn = SyncBN2d(in_channel, eps=1e-5, momentum=0.5, affine=False)#nn.BatchNorm2d(in_channel, affine=False)
        self.embed = nn.Embedding(n_class, in_channel * 2)
        self.embed.weight.data[:, :in_channel] = 1
        self.embed.weight.data[:, in_channel:] = 0
예제 #5
0
    def __init__(self,
                 output_size,
                 input_size,
                 which_linear,
                 eps=1e-5,
                 momentum=0.1,
                 cross_replica=False,
                 mybn=False,
                 norm_style='bn',
                 style_linear=None,
                 dim_z=0,
                 no_conditional=False,
                 skip_z=False,
                 use_dog_cnt=False,
                 g_shared=False):
        super(ccbn, self).__init__()
        self.output_size, self.input_size = output_size, input_size
        self.use_dog_cnt = use_dog_cnt
        self.g_shared = g_shared
        # Prepare gain and bias layers

        self.gain = which_linear(input_size, output_size)
        self.bias = which_linear(input_size, output_size)

        if use_dog_cnt and not g_shared:
            self.gain_dog_cnt = which_linear(input_size, output_size)
            self.bias_dog_cnt = which_linear(input_size, output_size)

        # epsilon to avoid dividing by 0
        self.eps = eps
        # Momentum
        self.momentum = momentum
        # Use cross-replica batchnorm?
        self.cross_replica = cross_replica
        # Use my batchnorm?
        self.mybn = mybn
        # Norm style?
        self.norm_style = norm_style
        self.no_conditional = no_conditional

        if self.cross_replica:
            self.bn = SyncBN2d(output_size,
                               eps=self.eps,
                               momentum=self.momentum,
                               affine=False)
        elif self.mybn:
            self.bn = myBN(output_size, self.eps, self.momentum)
        elif self.norm_style in ['bn', 'in']:
            self.register_buffer('stored_mean', torch.zeros(output_size))
            self.register_buffer('stored_var', torch.ones(output_size))
        if style_linear is not None:
            if skip_z:
                self.style = style_linear(dim_z * 2, output_size * 2)
            else:
                self.style = style_linear(dim_z, output_size * 2)
            self.style.bias.data[:output_size] = 1
            self.style.bias.data[output_size:] = 0
예제 #6
0
    def __init__(self, in_channel, n_condition, cross_replica=False):
        super().__init__()
        if cross_replica:
            self.bn = SyncBN2d(in_channel, affine=False)
        else:
            self.bn = nn.BatchNorm2d(in_channel,
                                     affine=False)  # no learning parameters

        self.embed = nn.Linear(n_condition, in_channel * 2)

        nn.init.orthogonal_(self.embed.weight.data[:, :in_channel], gain=1)
        self.embed.weight.data[:, in_channel:].zero_()