示例#1
0
 def __init__(self,
              dim,
              depth,
              hidden_dim=256,
              use_tanh=False,
              use_bn=False,
              out_dim=None,
              use_an=False):
     super(BasicFullyConnectedNet, self).__init__()
     layers = []
     layers.append(nn.Linear(dim, hidden_dim))
     if use_bn:
         assert not use_an
         layers.append(nn.BatchNorm1d(hidden_dim))
     if use_an:
         assert not use_bn
         layers.append(ActNorm(hidden_dim))
     layers.append(nn.LeakyReLU())
     for d in range(depth):
         layers.append(nn.Linear(hidden_dim, hidden_dim))
         if use_bn:
             layers.append(nn.BatchNorm1d(hidden_dim))
         layers.append(nn.LeakyReLU())
     layers.append(
         nn.Linear(hidden_dim, dim if out_dim is None else out_dim))
     if use_tanh:
         layers.append(nn.Tanh())
     self.main = nn.Sequential(*layers)
示例#2
0
    def __init__(self, code_dim=140, n_class=1000, chn=96, debug=False, use_actnorm=False):
        super().__init__()
        self.linear = nn.Linear(n_class, 128, bias=False)

        if debug:
            chn = 8
        self.first_view = 16 * chn
        self.G_linear = SpectralNorm(nn.Linear(20, 4 * 4 * 16 * chn))

        self.GBlock = nn.ModuleList([
            GBlock(16 * chn, 16 * chn, n_class=n_class, use_actnorm=use_actnorm),
            GBlock(16 * chn, 8 * chn, n_class=n_class, use_actnorm=use_actnorm),
            GBlock(8 * chn, 8 * chn, n_class=n_class, use_actnorm=use_actnorm),
            GBlock(8 * chn, 4 * chn, n_class=n_class, use_actnorm=use_actnorm),
            GBlock(4 * chn, 2 * chn, n_class=n_class, use_actnorm=use_actnorm),
            GBlock(2 * chn, 1 * chn, n_class=n_class, use_actnorm=use_actnorm),
        ])

        self.sa_id = 5
        self.num_split = len(self.GBlock) + 1
        self.attention = SelfAttention(2 * chn)
        if not use_actnorm:
            self.ScaledCrossReplicaBN = BatchNorm2d(1 * chn, eps=1e-4)
        else:
            self.ScaledCrossReplicaBN = ActNorm(1 * chn)
        self.colorize = SpectralNorm(nn.Conv2d(1 * chn, 3, [3, 3], padding=1))
示例#3
0
 def __init__(self, num_features, num_classes):
     super().__init__()
     self.num_features = num_features
     self.bn = ActNorm(num_features)
     self.gamma_embed = SpectralNorm(
         nn.Linear(num_classes, num_features, bias=False))
     self.beta_embed = SpectralNorm(
         nn.Linear(num_classes, num_features, bias=False))
示例#4
0
 def __init__(self,
              in_channels,
              cond_channels,
              hidden_dim,
              hidden_depth,
              activation="lrelu"):
     super().__init__()
     __possible_activations = {
         "lrelu": InvLeakyRelu,
         "none": IgnoreLeakyRelu
     }
     self.norm_layer = ActNorm(in_channels, logdet=True)
     self.coupling = ConditionalDoubleVectorCouplingBlock(
         in_channels, cond_channels, hidden_dim, hidden_depth)
     self.activation = __possible_activations[activation]()
     self.shuffle = Shuffle(in_channels)
示例#5
0
    def __init__(self,
                 code_dim=140,
                 n_class=1000,
                 chn=96,
                 debug=False,
                 use_actnorm=False):
        super().__init__()
        if not use_actnorm:
            import warnings

            class BatchNormWarning(UserWarning):
                pass

            warnings.warn(
                "You are training with batch norm. It is highly recommended to switch to some "
                "other normalization method if a low batch size is used. Furthermore, Google may "
                "sue you for breaking the patent law!", BatchNormWarning)
        self.linear = nn.Linear(n_class, 128, bias=False)

        if debug:
            chn = 8
        self.first_view = 16 * chn
        self.G_linear = SpectralNorm(nn.Linear(20, 4 * 4 * 16 * chn))

        self.GBlock = nn.ModuleList([
            GBlock(16 * chn,
                   16 * chn,
                   n_class=n_class,
                   use_actnorm=use_actnorm),
            GBlock(16 * chn, 8 * chn, n_class=n_class,
                   use_actnorm=use_actnorm),
            GBlock(8 * chn, 8 * chn, n_class=n_class, use_actnorm=use_actnorm),
            GBlock(8 * chn, 4 * chn, n_class=n_class, use_actnorm=use_actnorm),
            GBlock(4 * chn, 2 * chn, n_class=n_class, use_actnorm=use_actnorm),
            GBlock(2 * chn, 1 * chn, n_class=n_class, use_actnorm=use_actnorm),
        ])

        self.sa_id = 5
        self.num_split = len(self.GBlock) + 1
        self.attention = SelfAttention(2 * chn)
        if not use_actnorm:
            self.ScaledCrossReplicaBN = BatchNorm2d(1 * chn, eps=1e-4)
        else:
            self.ScaledCrossReplicaBN = ActNorm(1 * chn)
        self.colorize = SpectralNorm(nn.Conv2d(1 * chn, 3, [3, 3], padding=1))
示例#6
0
 def __init__(self, *args, **kwargs):
     super().__init__()
     self.bn = ActNorm(*args, **kwargs)
示例#7
0
 def __init__(self, in_channels, hidden_dim, hidden_depth):
     super().__init__()
     self.norm_layer = ActNorm(in_channels, logdet=True)
     self.coupling = DoubleVectorCouplingBlock(in_channels, hidden_dim,
                                               hidden_depth)
     self.shuffle = Shuffle(in_channels)