def forward(self, x):
     x = self.cbl1(x)
     x = self.conv1(x)
     x = self.cblr1(x)
     x = self.conv2(x)
     x = self.cblr2(x)
     x = self.conv3(x)
     out1 = self.cblr3(x)
     x = self.conv4(out1)
     out2 = self.cblr4(x)
     x = self.conv5(out2)
     out3 = self.cblr5(x)
     out3 = self.CBL_set1(out3)
     y1 = self.cbl2(out3)
     y1 = self.conv6(y1)
     out3 = self.conv7(out3)
     out3 = UpsamplingBilinear2d(scale_factor=2)(out3)
     out3 = torch.cat((out3, out2), 1)
     out3 = self.CBL_set2(out3)
     y2 = self.cbl3(out3)
     y2 = self.conv8(out3)
     out3 = self.conv9(out3)
     out3 = UpsamplingBilinear2d(scale_factor=2)(out3)
     out3 = torch.cat((out3, out1), 1)
     out3 = self.CBL_set3(out3)
     y3 = self.cbl4(out3)
     y3 = self.conv10(y3)
     return y1, y2, y3
Exemple #2
0
 def __init__(self,
              size=None,
              scale_factor=None,
              return_quant_tensor: bool = True):
     UpsamplingBilinear2d.__init__(self,
                                   size=size,
                                   scale_factor=scale_factor)
     QuantLayerMixin.__init__(self, return_quant_tensor)
Exemple #3
0
 def __init__(self):
     super(model,self).__init__()
     self.fc = Linear(1000, 10000*3)
     self.conv = nn.Sequential(
         UpsamplingBilinear2d(scale_factor=2),
         Conv2d(3,16,5,1,2),
         UpsamplingBilinear2d(scale_factor=2),
         Conv2d(16,3,5,1,2),
         Sigmoid()
     )
    def loss_fn(pred, target):
        if type(pred) == list:
            loss = 0
            length = len(pred)
            ratio = np.power(scale_factor, 1 / length)
            for i, p in enumerate(pred):
                scale = 1 / np.power(ratio, length - i - 1)
                t = UpsamplingBilinear2d(scale_factor=scale)(target)
                t = UpsamplingBilinear2d(size=target.shape[-1])(t)

                loss += (length - i) * torch.nn.MSELoss(reduction=reduction)(p,
                                                                             t)
        else:
            loss = torch.nn.MSELoss(reduction=reduction)(pred, target)
        return loss
    def __init__(self,
                 root,
                 train_test_val='train',
                 top_k=5,
                 rand=False,
                 stat=True):
        super().__init__()
        self.dir = Path(root) / 'RED'
        self.list_dir = list(self.dir.iterdir())
        self.dir = Path(root) / 'NIR'
        self.list_dir += list(self.dir.iterdir())

        self.train_test_val = train_test_val

        prop = 0.1
        seed = 123
        random.seed(seed)
        random.shuffle(self.list_dir)
        if train_test_val == 'train':
            self.list_dir = self.list_dir[:-int(prop * len(self.list_dir))]
        elif train_test_val == 'val':
            self.list_dir = self.list_dir[-int(prop * len(self.list_dir)):]

        self.top_k = top_k
        self.rand = rand
        self.stat = stat
        self.upsample = UpsamplingBilinear2d(scale_factor=3)  # Baseline
Exemple #6
0
 def __init__(self, seg_classes, backbone_arch):
     super().__init__(seg_classes, backbone_arch)
     self.fpn = FPNSegmentation(
         inner_filters=128,
         filters=encoder_params[backbone_arch]["filters"])
     self.up = UpsamplingBilinear2d(scale_factor=4)
     self.final = Conv1x1(in_channels=128, out_channels=seg_classes)
    def __init__(self, base_name, classes,
                 pretrained_base=None, down_ratio=2):
        super(DLASeg, self).__init__()
        assert down_ratio in [2, 4, 8, 16]
        self.first_level = int(np.log2(down_ratio))
        self.encoder_stages = dla.__dict__[base_name](pretrained=pretrained_base,
                                                      return_levels=True)
        channels = self.encoder_stages.channels
        scales = [2 ** i for i in range(len(channels[self.first_level:]))]
        self.dla_up = DLAUp(channels[self.first_level:], scales=scales)
        self.fc = nn.Sequential(
            nn.Conv2d(channels[self.first_level], classes, kernel_size=1,
                      stride=1, padding=0, bias=True)
        )
        up_factor = 2 ** self.first_level
        if up_factor > 1:
            up = UpsamplingBilinear2d(scale_factor=down_ratio)
        else:
            up = Identity()
        self.up = up

        for m in self.fc.modules():
            if isinstance(m, nn.Conv2d):
                n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
                m.weight.data.normal_(0, math.sqrt(2. / n))
            elif isinstance(m, BatchNorm):
                m.weight.data.fill_(1)
                m.bias.data.zero_()
 def __init__(self, in_channels, out_channels, kernel_size, style_size,
              use_gpu):
     super().__init__()
     self.use_gpu = use_gpu
     self.upsample_layer = UpsamplingBilinear2d(scale_factor=2)
     self.out_channels = out_channels
     self.weight_scaling_1 = Weight_Scaling(
         in_channels * kernel_size * kernel_size, LEAKY_RELU_GAIN)
     self.conv_1 = Conv2d(in_channels=in_channels,
                          out_channels=out_channels,
                          kernel_size=kernel_size,
                          stride=1,
                          padding=1)
     self.LeakyReLU_1 = LeakyReLU(0.2)
     self.weight_scaling_2 = Weight_Scaling(
         out_channels * kernel_size * kernel_size, LEAKY_RELU_GAIN)
     self.conv_2 = Conv2d(in_channels=out_channels,
                          out_channels=out_channels,
                          kernel_size=kernel_size,
                          stride=1,
                          padding=1)
     self.LeakyReLU_2 = LeakyReLU(0.2)
     self.style_scaling = Weight_Scaling(style_size, 1)
     self.style_affine_1 = Linear(style_size, in_channels)
     self.noise_scaler_1 = torch.nn.Parameter(
         torch.zeros(out_channels).view(1, out_channels, 1, 1))
     self.style_affine_2 = Linear(style_size, in_channels)
     self.noise_scaler_2 = torch.nn.Parameter(
         torch.zeros(out_channels).view(1, out_channels, 1, 1))
Exemple #9
0
 def __init__(self, seg_classes, backbone_arch):
     super().__init__(seg_classes, backbone_arch)
     self.fpn = FPNSegmentation(inner_filters=256, filters=encoder_params[backbone_arch]["filters"])
     self.up = UpsamplingBilinear2d(scale_factor=4)
     self.final = Conv1x1(in_channels=128, out_channels=seg_classes)
     self.dropout = nn.Dropout2d(p=0.15)
     _initialize_weights(self.fpn)
     _initialize_weights(self.final)
Exemple #10
0
def genUpsample2(input_channels, output_channels, kernel_size):
   return Sequential(
        Conv2d(input_channels, output_channels, kernel_size=kernel_size, stride=1, padding= (kernel_size-1) // 2 ),
        BatchNorm2d(output_channels),
        LeakyReLU(negative_slope=negative_slope),
        Conv2d(output_channels, output_channels, kernel_size=kernel_size, stride=1, padding= (kernel_size-1) // 2 ),
        BatchNorm2d(output_channels),
        LeakyReLU(negative_slope=negative_slope),
        UpsamplingBilinear2d(scale_factor=2))
    def __init__(self,
                 num_classes,
                 num_channels=3,
                 encoder_name='resnet34',
                 use_last_decoder=False):
        if not hasattr(self, 'first_layer_stride_two'):
            self.first_layer_stride_two = False
        if not hasattr(self, 'decoder_block'):
            self.decoder_block = UnetDecoderBlock
        if not hasattr(self, 'bottleneck_type'):
            self.bottleneck_type = ConvBottleneck

        self.filters = encoder_params[encoder_name]['filters']
        self.decoder_filters = encoder_params[encoder_name].get(
            'decoder_filters', self.filters[:-1])
        self.last_upsample_filters = encoder_params[encoder_name].get(
            'last_upsample', self.decoder_filters[0] // 2)

        super().__init__()

        self.num_channels = num_channels
        self.num_classes = num_classes
        self.bottlenecks = nn.ModuleList([
            self.bottleneck_type(self.filters[-i - 2] + f, f)
            for i, f in enumerate(reversed(self.decoder_filters[:]))
        ])

        self.decoder_stages = nn.ModuleList([
            self.get_decoder(idx)
            for idx in range(0, len(self.decoder_filters))
        ])

        if self.first_layer_stride_two:
            if use_last_decoder:
                self.last_upsample = self.decoder_block(
                    self.decoder_filters[0], self.last_upsample_filters,
                    self.last_upsample_filters)
            else:
                self.last_upsample = UpsamplingBilinear2d(scale_factor=2)
        self.final = self.make_final_classifier(
            self.last_upsample_filters if self.first_layer_stride_two else
            self.decoder_filters[0], num_classes)
        self._initialize_weights()
        self.dropout = Dropout2d(p=0.0)
        encoder = encoder_params[encoder_name]['init_op']()
        self.encoder_stages = nn.ModuleList([
            self.get_encoder(encoder, idx) for idx in range(len(self.filters))
        ])
        if encoder_params[encoder_name]['url'] is not None:
            self.initialize_encoder(encoder,
                                    encoder_params[encoder_name]['url'],
                                    num_channels != 3)
Exemple #12
0
def get_mnist_knapsack(images, labels, nb_classes=10, dim=128):

    bboxes = []

    canvas = -torch.ones((dim, dim))
    noise_canvas = torch.zeros((nb_classes, dim, dim))
    condition_canvas = torch.zeros((nb_classes, dim, dim))

    hs, ws = 28 + 5 * np.random.randn(2, images.shape[0])
    hs = np.clip(hs, 14, 48).astype('int')
    ws = np.clip(ws, 14, 48).astype('int')

    rectangles = list(zip(hs, hs))
    bins = [(128, 128)]

    packer = newPacker()

    # Add the rectangles to packing queue
    for r in rectangles:
        packer.add_rect(*r)

    # Add the bins where the rectangles will be placed
    for b in bins:
        packer.add_bin(*b)

    # Start packing
    packer.pack()

    for i, rect in enumerate(packer.rect_list()):
        _, x, y, w, h, _ = rect

        scaled_crop = UpsamplingBilinear2d(size=(h, w))(images[i][None, None])
        canvas[y:y + h, x:x + w] = torch.max(canvas[y:y + h, x:x + w],
                                             scaled_crop)

        z = torch.randn(1, 1, 7, 7)
        z = UpsamplingNearest2d(size=(h, w))(z)
        noise_canvas[labels[i], y:y + h, x:x + w] = z

        condition_canvas[labels[i], y:y + h, x:x + w] = torch.ones((h, w))

        bboxes.append([x, y, x + w, y + h])

    return canvas, noise_canvas, condition_canvas, torch.Tensor(bboxes)