Beispiel #1
0
    def __init__(self, block, layers):
        self.inplanes = 64
        super(FPN, self).__init__()
        self.conv1 = nn.Conv2d(3,
                               64,
                               kernel_size=7,
                               stride=2,
                               padding=3,
                               bias=False)
        self.bn1 = nn.BatchNorm2d(64)
        self.relu = nn.ReLU(inplace=True)
        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
        self.layer1 = self._make_layer(block, 64, layers[0])  # conv2  C2
        self.layer2 = self._make_layer(block, 128, layers[1],
                                       stride=2)  # conv3  C3
        self.layer3 = self._make_layer(block, 256, layers[2],
                                       stride=2)  # conv4  C4
        self.layer4 = self._make_layer(block, 512, layers[3],
                                       stride=2)  # conv5  C5

        self.conv6 = conv3x3(512 * block.expansion, 256, stride=2,
                             padding=1)  # P6
        self.conv7 = conv3x3(256, 256, stride=2, padding=1)  # P7

        self.lateral_layer1 = conv1x1(512 * block.expansion, 256)
        self.lateral_layer2 = conv1x1(256 * block.expansion, 256)
        self.lateral_layer3 = conv1x1(128 * block.expansion, 256)

        self.corr_layer1 = conv3x3(256, 256, stride=1, padding=1)  # P4
        self.corr_layer2 = conv3x3(256, 256, stride=1, padding=1)  # P3
 def __init__(self, inplanes, planes, stride=1, downsample=None):
     super(BasicBlock, self).__init__()
     self.conv1 = conv3x3(inplanes, planes, stride)
     self.bn1 = nn.BatchNorm2d(planes)
     self.relu = nn.ReLU(inplace=True)
     self.conv2 = conv3x3(planes, planes)
     self.bn2 = nn.BatchNorm2d(planes)
     self.downsample = downsample
     self.stride = stride
 def __init__(self, in_channel, out_channel, condition_dim, cross_replica=False, upsample=True):
     super().__init__()
     self.cbn1 = ConditionalNorm(in_channel, condition_dim, cross_replica)
     self.upsample = nn.Sequential()
     if upsample:
         self.upsample.add_module('upsample', nn.Upsample(scale_factor=2,
                                                          mode='nearest'))
     self.conv3x3_1 = nn.utils.spectral_norm(
         conv3x3(in_channel, out_channel)).apply(init_weight)
     self.cbn2 = ConditionalNorm(out_channel, condition_dim, cross_replica)
     self.conv3x3_2 = nn.utils.spectral_norm(
         conv3x3(out_channel, out_channel)).apply(init_weight)
     self.conv1x1 = nn.utils.spectral_norm(
         conv1x1(in_channel, out_channel)).apply(init_weight)
 def __init__(self, in_channel, out_channel, downsample=True):
     super().__init__()
     self.layer = nn.Sequential(
         nn.LeakyReLU(0.2),
         nn.utils.spectral_norm(conv3x3(in_channel, out_channel)).apply(
             init_weight),
         nn.LeakyReLU(0.2),
         nn.utils.spectral_norm(conv3x3(out_channel, out_channel)).apply(
             init_weight),
     )
     self.shortcut = nn.Sequential(
         nn.utils.spectral_norm(conv1x1(in_channel, out_channel)).apply(
             init_weight),
     )
     if downsample:
         self.layer.add_module('avgpool',
                               nn.AvgPool2d(kernel_size=2, stride=2))
         self.shortcut.add_module('avgpool',
                                  nn.AvgPool2d(kernel_size=2, stride=2))
Beispiel #5
0
    def __init__(self, z_dim, conv_dim, num_classes):
        super(Generator, self).__init__()
        self.conv_dim = conv_dim

        self.linear = spectral_norm(
            linear(in_features=z_dim, out_features=conv_dim * 16 * 4 * 4))
        self.res_1 = ResidualBlock_G(conv_dim * 16, conv_dim * 16, num_classes)
        self.res_2 = ResidualBlock_G(conv_dim * 16, conv_dim * 8, num_classes)
        self.res_3 = ResidualBlock_G(conv_dim * 8, conv_dim * 4, num_classes)
        self.attn = SelfAttn(conv_dim * 4)
        self.res_4 = ResidualBlock_G(conv_dim * 4, conv_dim * 2, num_classes)
        self.res_5 = ResidualBlock_G(conv_dim * 2, conv_dim, num_classes)
        self.bn = batch_norm(conv_dim, eps=1e-5, momentum=0.0001)
        self.lrelu = lrelu(inplace=True)
        self.conv3x3 = spectral_norm(conv3x3(conv_dim, 3))
        self.tanh = tanh()

        self.apply(init_weights)
    def __init__(self, n_feat,
                 max_resolution,
                 codes_dim,
                 n_classes=0,
                 use_attention=False,
                 arch=None,
                 cross_replica=False,
                 rgb_bn=False,
                 use_embedding=True):
        super().__init__()
        self.codes_dim = codes_dim
        self.use_embedding= use_embedding
        self.n_classes = n_classes
        # construct residual blocks
        n_layers = int(np.log2(max_resolution) - 2)
        self.residual_blocks = nn.ModuleList([])
        last_block_dim = 0
        if arch is None:
            first_block_factor = 2 ** (n_layers)
        else:
            first_block_factor = arch[0]
        self.fc = nn.Sequential(
            nn.utils.spectral_norm(
                nn.Linear(codes_dim,
                          first_block_factor * n_feat * 4 * 4)).apply(
                init_weight)
        )
        # print("first_block", first_block_factor)
        # print("n_layers ", n_layers)
        for i in range(n_layers):
            if arch is None:
                prev_factor = 2 ** (n_layers - i)
                curr_factor = 2 ** (n_layers - i - 1)
            else:
                prev_factor = arch[i]
                curr_factor = arch[i + 1]
            # print(f"block ({i}): {prev_factor}, {curr_factor}")
            block = ResBlock_G(prev_factor * n_feat, curr_factor * n_feat,
                               codes_dim + codes_dim, cross_replica=cross_replica,  upsample=True)
            # add current block to the model class
            self.residual_blocks.add_module(f'res_block_{i}', block)
            if i == n_layers - 1:
                last_block_dim = curr_factor

        if use_attention:
            self.attn = Attention(2 * n_feat)

        # print("last_layer ", last_block_dim)
        _to_rgb = [
            # nn.BatchNorm2d(2 * n_feat).apply(init_weight),
            nn.LeakyReLU(),
            nn.utils.spectral_norm(conv3x3(last_block_dim * n_feat, 3)).apply(
                init_weight),
            nn.Tanh()
        ]

        bn_layer = SyncBN2d if cross_replica else nn.BatchNorm2d
        if rgb_bn:
            _to_rgb.insert(0, bn_layer(last_block_dim * n_feat))

        self.to_rgb = nn.Sequential(
            *_to_rgb
        )

        self.embedding = nn.Embedding(num_embeddings=n_classes,
                                      embedding_dim=self.codes_dim).apply(init_weight)