コード例 #1
0
    def build_regressions(self):
        classifiers = []
        box_regressions = []

        extras = [self.b0, self.b1]
        extras.extend(self.extras)

        # from extras
        for i, extra in enumerate(extras):
            in_channels = self.calc_in_channel_width(extra)
            n = self.default_box.get_num_ratios(i)

            channels = n * self.num_class
            regression = nn.SeparableConv2d(in_channels, channels, 3, 1, 1)
            classifiers.append(regression)

            channels = n * 4
            regression = nn.SeparableConv2d(in_channels, channels, 3, 1, 1)
            box_regressions.append(regression)

        in_channels = self.calc_in_channel_width(self.b0)
        l2_norm = nn.Norm2d(in_channels)

        self.l2_norm = l2_norm
        self.classifiers = nn.ModuleList(classifiers)
        self.box_regressions = nn.ModuleList(box_regressions)
コード例 #2
0
    def build_bottom_up(self, pretrained):
        backbone = self.params['backbone']

        if backbone == "resnet50":
            model = models.resnet50(pretrained=pretrained)
        elif backbone == "resnet101":
            model = models.resnet101(pretrained=pretrained)
        else:
            raise Exception("unimplemented backbone %s" % backbone)

        # p3 ~ p5 are extracted from backbone
        p3 = nn.Sequential(model.conv1, model.bn1, model.relu, model.maxpool,
                           model.layer1, model.layer2)

        p4 = model.layer3
        p5 = model.layer4

        # build remaining layers
        in_channels = self.calc_in_channel_width(p5)
        p6 = nn.Conv2d(in_channels, 256, 3, stride=2, padding=1)

        p7 = nn.Sequential(nn.ReLU(),
                           nn.Conv2d(256, 256, 3, stride=2, padding=1))

        # register bottom up layers
        self.bottom_up_layers = nn.ModuleList((p3, p4, p5, p6, p7))
コード例 #3
0
    def build_extras(self):
        in_channels = self.calc_in_channel_width(self.b1)

        extras = []
        for layer in self.params['extras']:
            extra, in_channels = self.build_extra(in_channels, layer)

            extras.append(extra)

        self.extras = nn.ModuleList(extras)
コード例 #4
0
 def __init__(self):
     super().__init__()
     self.alexnet = AlexNet()
     self.lpips_weights = nn.ModuleList()
     for channels in self.alexnet.channels:
         self.lpips_weights.append(Conv1x1(channels, 1))
     self._load_lpips_weights()
     # imagenet normalization for range [-1, 1]
     self.mu = torch.tensor([-0.03, -0.088, -0.188]).view(1, 3, 1, 1).
     self.sigma = torch.tensor([0.458, 0.448, 0.450]).view(1, 3, 1, 1).
コード例 #5
0
        def __init__(self,
                     img_size=256,
                     style_dim=64,
                     max_conv_dim=512,
                     w_hpf=1):
            super().__init__()
            dim_in = 2**14 // img_size
            self.img_size = img_size
            self.from_rgb = nn.Conv2d(3, dim_in, 3, 1, 1)
            self.encode = nn.ModuleList()
            self.decode = nn.ModuleList()
            self.to_rgb = nn.Sequential(nn.InstanceNorm2d(dim_in, affine=True),
                                        nn.LeakyReLU(0.2),
                                        nn.Conv2d(dim_in, 3, 1, 1, 0))

            # down/up-sampling blocks
            repeat_num = int(np.log2(img_size)) - 4
            if w_hpf > 0:
                repeat_num += 1
            for _ in range(repeat_num):
                dim_out = min(dim_in * 2, max_conv_dim)
                self.encode.append(
                    ResBlk(dim_in, dim_out, normalize=True, downsample=True))
                self.decode.insert(0,
                                   AdainResBlk(dim_out,
                                               dim_in,
                                               style_dim,
                                               w_hpf=w_hpf,
                                               upsample=True))  # stack-like
                dim_in = dim_out

            # bottleneck blocks
            for _ in range(2):
                self.encode.append(ResBlk(dim_out, dim_out, normalize=True))
                self.decode.insert(
                    0, AdainResBlk(dim_out, dim_out, style_dim, w_hpf=w_hpf))

            if w_hpf > 0:
                device = torch.device(
                    'cuda' if torch.cuda.is_available() else 'cpu')
                self.hpf = HighPass(w_hpf, device)
コード例 #6
0
        def __init__(self, latent_dim=16, style_dim=64, num_domains=2):
            super().__init__()
            layers = []
            layers += [nn.Linear(latent_dim, 512)]
            layers += [nn.ReLU()]
            for _ in range(3):
                layers += [nn.Linear(512, 512)]
                layers += [nn.ReLU()]
            self.shared = nn.Sequential(*layers)

            self.unshared = nn.ModuleList()
            for _ in range(num_domains):
                self.unshared.append(
                    nn.Sequential(nn.Linear(512, 512), nn.ReLU(),
                                  nn.Linear(512, 512), nn.ReLU(),
                                  nn.Linear(512, 512), nn.ReLU(),
                                  nn.Linear(512, style_dim)))
コード例 #7
0
    def build_top_down(self):
        top_down_layers = []

        # ignore size of p1, p2
        size = self.params['width']
        for i in range(2):
            size = int((size + 1) / 2)

        # size of p3, p4, p5, p6 ,p7
        sizes = []
        for i in range(len(self.bottom_up_layers)):
            sizes.append(size)
            size = int((size + 1) / 2)

        for i in range(len(self.bottom_up_layers), 1, -1):
            layer = self.bottom_up_layers[i - 2]

            in_channels = self.calc_in_channel_width(layer)
            top_down_layers.append(
                CrossScaleBlock(in_channels, 256, sizes[i - 1]))

        self.top_down_layers = nn.ModuleList(top_down_layers)
コード例 #8
0
        def __init__(self,
                     img_size=256,
                     style_dim=64,
                     num_domains=2,
                     max_conv_dim=512):
            super().__init__()
            dim_in = 2**14 // img_size
            blocks = []
            blocks += [nn.Conv2d(3, dim_in, 3, 1, 1)]

            repeat_num = int(np.log2(img_size)) - 2
            for _ in range(repeat_num):
                dim_out = min(dim_in * 2, max_conv_dim)
                blocks += [ResBlk(dim_in, dim_out, downsample=True)]
                dim_in = dim_out

            blocks += [nn.LeakyReLU(0.2)]
            blocks += [nn.Conv2d(dim_out, dim_out, 4, 1, 0)]
            blocks += [nn.LeakyReLU(0.2)]
            self.shared = nn.Sequential(*blocks)

            self.unshared = nn.ModuleList()
            for _ in range(num_domains):
                self.unshared.append(nn.Linear(dim_out, style_dim))