示例#1
0
    def __init__(self,
                 encoder: nn.Module,
                 n_classes: int,
                 blur: bool = False,
                 blur_final=True,
                 self_attention: bool = False,
                 y_range: Optional[Tuple[float, float]] = None,
                 last_cross: bool = True,
                 bottle: bool = False,
                 **kwargs):
        imsize = (args.size, args.size)
        sfs_szs = model_sizes(encoder, size=imsize)
        sfs_idxs = list(reversed(_get_sfs_idxs(sfs_szs)))
        self.sfs = hook_outputs([encoder[i] for i in sfs_idxs])
        x = dummy_eval(encoder, imsize).detach()

        ni = sfs_szs[-1][1]
        middle_conv = nn.Sequential(conv_layer(ni, ni * 2, **kwargs),
                                    conv_layer(ni * 2, ni, **kwargs)).eval()
        x = middle_conv(x)
        layers = [encoder, batchnorm_2d(ni), nn.ReLU(), middle_conv]

        self.hc_hooks = [Hook(layers[-1], _hook_inner, detach=False)]
        hc_c = [x.shape[1]]

        for i, idx in enumerate(sfs_idxs):
            not_final = i != len(sfs_idxs) - 1
            up_in_c, x_in_c = int(x.shape[1]), int(sfs_szs[idx][1])
            do_blur = blur and (not_final or blur_final)
            sa = self_attention and (i == len(sfs_idxs) - 3)
            unet_block = UnetBlock(up_in_c,
                                   x_in_c,
                                   self.sfs[i],
                                   final_div=not_final,
                                   blur=blur,
                                   self_attention=sa,
                                   **kwargs).eval()
            layers.append(unet_block)
            x = unet_block(x)
            self.hc_hooks.append(Hook(layers[-1], _hook_inner, detach=False))
            hc_c.append(x.shape[1])

        ni = x.shape[1]
        if imsize != sfs_szs[0][-2:]:
            layers.append(PixelShuffle_ICNR(ni, **kwargs))
        if last_cross:
            layers.append(MergeLayer(dense=True))
            ni += in_channels(encoder)
            layers.append(res_block(ni, bottle=bottle, **kwargs))
        hc_c.append(ni)
        layers.append(Hcolumns(self.hc_hooks, hc_c))
        layers += [
            conv_layer(ni * len(hc_c),
                       n_classes,
                       ks=1,
                       use_activ=False,
                       **kwargs)
        ]
        if y_range is not None: layers.append(SigmoidRange(*y_range))
        super().__init__(*layers)
示例#2
0
 def __init__(self,
              encoder: nn.Module,
              n_classes,
              final_bias=0.,
              chs=256,
              n_anchors=9,
              flatten=True):
     super().__init__()
     self.n_classes, self.flatten = n_classes, flatten
     imsize = (256, 256)
     sfs_szs = model_sizes(encoder, size=imsize)
     sfs_idxs = list(reversed(_get_sfs_idxs(sfs_szs)))
     self.sfs = hook_outputs([encoder[i] for i in sfs_idxs])
     self.encoder = encoder
     self.c5top5 = conv2d(sfs_szs[-1][1], chs, ks=1, bias=True)
     self.c5top6 = conv2d(sfs_szs[-1][1], chs, stride=2, bias=True)
     self.p6top7 = nn.Sequential(nn.ReLU(),
                                 conv2d(chs, chs, stride=2, bias=True))
     self.merges = nn.ModuleList([
         LateralUpsampleMerge(chs, sfs_szs[idx][1], hook)
         for idx, hook in zip(sfs_idxs[-2:-4:-1], self.sfs[-2:-4:-1])
     ])
     self.smoothers = nn.ModuleList(
         [conv2d(chs, chs, 3, bias=True) for _ in range(3)])
     self.classifier = self._head_subnet(n_classes,
                                         n_anchors,
                                         final_bias,
                                         chs=chs)
     self.box_regressor = self._head_subnet(4, n_anchors, 0., chs=chs)
    def __init__(self,
                 encoder: nn.Module,
                 n_classes,
                 final_bias: float = 0.,
                 n_conv: float = 4,
                 chs=256,
                 n_anchors=9,
                 anchors=None,
                 oversize=1.5,
                 flatten=True,
                 sizes=None,
                 patchClassifier: nn.Module = None):
        super().__init__()
        self.n_classes, self.flatten = n_classes, flatten
        imsize = (256, 256)
        self.sizes = sizes
        self.grid_target_size = (64, 64)
        self._sizes = torch.Tensor(sizes)
        self._oversize = oversize

        sfs_szs, x, hooks = self._model_sizes(encoder, size=imsize)
        sfs_idxs = _get_sfs_idxs(sfs_szs)
        self.encoder = encoder
        self.c5top5 = conv2d(sfs_szs[-1][1], chs, ks=1, bias=True)
        self.c5top6 = conv2d(sfs_szs[-1][1], chs, stride=2, bias=True)
        self.p6top7 = nn.Sequential(nn.ReLU(),
                                    conv2d(chs, chs, stride=2, bias=True))
        self.merges = nn.ModuleList([
            LateralUpsampleMerge(chs, szs[1], hook)
            for szs, hook in zip(sfs_szs[-2:-4:-1], hooks[-2:-4:-1])
        ])
        self.smoothers = nn.ModuleList(
            [conv2d(chs, chs, 3, bias=True) for _ in range(3)])
        self.classifier = self._head_subnet(n_classes,
                                            n_anchors,
                                            final_bias,
                                            chs=chs,
                                            n_conv=n_conv)
        self.box_regressor = self._head_subnet(3,
                                               n_anchors,
                                               0.,
                                               chs=chs,
                                               n_conv=n_conv)
        if (patchClassifier is None):
            self.box_stn = create_cnn_model(models.resnet18,
                                            n_classes,
                                            cut=None,
                                            pretrained=True,
                                            lin_ftrs=None,
                                            ps=0.5,
                                            split_on=None,
                                            bn_final=False,
                                            concat_pool=True)

#            self._stn_subnet(n_classes, final_bias, chs=chs, n_conv=n_conv)
        else:
            self.box_stn = patchClassifier
        self.anchors = anchors
示例#4
0
    def __init__(self,
                 vol_size,
                 encoder: nn.Module,
                 n_classes: int,
                 last_cross: bool = True,
                 bottle: bool = False,
                 pixel_shuffle=False,
                 light_up_block=True,
                 self_attention: bool = False,
                 **kwargs):
        sfs_szs = model_sizes(encoder, size=vol_size)
        # sfs_idxs = list(reversed(_get_sfs_idxs(sfs_szs)))
        sfs_idxs = np.unique(_get_sfs_idxs(sfs_szs))[::-1].tolist()
        self.sfs = hook_outputs([encoder[i] for i in sfs_idxs])
        x = dummy_eval(encoder, vol_size).detach()

        ni = sfs_szs[-1][1]
        middle_conv = nn.Sequential(conv_layer3d(ni, ni * 2, **kwargs),
                                    conv_layer3d(ni * 2, ni, **kwargs)).eval()
        x = middle_conv(x)
        layers = [encoder, batchnorm_3d(ni), nn.ReLU(), middle_conv]

        for i, idx in enumerate(sfs_idxs):
            not_final = i != len(sfs_idxs) - 1
            ds = not any(
                [a >= b for a, b in zip(x.shape[-3:], sfs_szs[idx][-3:])])
            ds = ds if pixel_shuffle else pixel_shuffle
            up_in_c, x_in_c = int(x.shape[1]), int(sfs_szs[idx][1])
            sa = self_attention and (i == len(sfs_idxs) - 3)
            vnet_block = VnetBlock(up_in_c,
                                   x_in_c,
                                   self.sfs[i],
                                   final_div=not_final,
                                   do_shuffle=ds,
                                   double_out_conv=not light_up_block,
                                   self_attention=sa,
                                   **kwargs).eval()
            layers.append(vnet_block)
            x = vnet_block(x)

        ni = x.shape[1]
        if vol_size != sfs_szs[0][-3:]:
            #layers.append(PixelShuffle_ICNR(ni, **kwargs))
            layers.append(Upsample3d(vol_size))

        if last_cross:
            layers.append(MergeLayer(dense=True))
            ni += in_channels(encoder)
            layers.append(res_block3d(ni, bottle=bottle, **kwargs))

        layers += [
            conv_layer3d(ni, n_classes, ks=1, use_activ=False, **kwargs)
        ]
        super().__init__(*layers)
示例#5
0
 def __init__(self, encoder: nn.Module, n_classes, final_bias:float=0.,  n_conv:float=4,
              chs=512, n_anchors=9, flatten=True, sizes=None):
     super().__init__()
     self.n_classes, self.flatten = n_classes, flatten
     imsize = (512, 512)
     self.sizes = sizes
     sfs_szs, x, hooks = self._model_sizes(encoder, size=imsize)
     sfs_idxs = _get_sfs_idxs(sfs_szs)
     self.encoder = encoder
     self.c5top5 = conv2d(sfs_szs[-1][1], chs, ks=1, bias=True)
     self.c5top6 = conv2d(sfs_szs[-1][1], chs, stride=2, bias=True)
     self.p6top7 = nn.Sequential(nn.ReLU(), conv2d(chs, chs, stride=2, bias=True))
     self.merges = nn.ModuleList([LateralUpsampleMerge(chs, szs[1], hook)
                                  for szs, hook in zip(sfs_szs[-2:-4:-1], hooks[-2:-4:-1])])
     self.smoothers = nn.ModuleList([conv2d(chs, chs, 3, bias=True) for _ in range(3)])
     self.classifier = self._head_subnet(n_classes, n_anchors, final_bias, chs=chs, n_conv=n_conv)
     self.box_regressor = self._head_subnet(4, n_anchors, 0., chs=chs, n_conv=n_conv)
示例#6
0
    def __init__(self,
                 encoder=None,
                 n_classes=2,
                 last_filters=32,
                 imsize=(256, 256),
                 y_range=None,
                 **kwargs):

        self.n_classes = n_classes

        layers = nn.ModuleList()

        # Encoder
        sfs_szs = model_sizes(encoder, size=imsize)
        sfs_idxs = list(reversed(_get_sfs_idxs(sfs_szs)))
        self.sfs = hook_outputs([encoder[i] for i in sfs_idxs])
        layers.append(encoder)

        x = dummy_eval(encoder, imsize).detach()

        self.hc_hooks = []
        hc_c = []

        ni = sfs_szs[-1][1]
        middle_conv = nn.Sequential(conv_layer(ni, ni * 2),
                                    conv_layer(ni * 2, ni)).eval()
        x = middle_conv(x)
        layers.extend([batchnorm_2d(ni), nn.ReLU(), middle_conv])

        # self.hc_hooks = [Hook(layers[-1], _hook_inner, detach=False)]
        # hc_c = [x.shape[1]]

        # Decoder
        n_filters = [64, 128, 256, 512]
        n = len(n_filters)
        is_deconv = True

        for i, idx in enumerate(sfs_idxs[:-1]):
            in_c, out_c = int(n_filters[n - i - 1] +
                              n_filters[n - i - 2]) // 2, int(sfs_szs[idx][1])

            dec_bloc = DecoderBlock(in_c, out_c, self.sfs[i], is_deconv,
                                    True).eval()
            layers.append(dec_bloc)

            x = dec_bloc(x)

            self.hc_hooks.append(Hook(layers[-1], _hook_inner, detach=False))
            hc_c.append(x.shape[1])

        ni = x.shape[1]

        layers.append(PixelShuffle_ICNR(n_filters[0], scale=2))

        layers.append(Hcolumns(self.hc_hooks, hc_c))

        fin_block = FinalBlock(ni * (len(hc_c) + 1), last_filters, n_classes)
        layers.append(fin_block)

        if y_range is not None:
            layers.append(SigmoidRange(*y_range))
        super().__init__(*layers)
示例#7
0
    def __init__(self, encoder:nn.Module, n_classes:int,
                 y_range:Optional[Tuple[float,float]]=None, skip_connections=True,
                 **kwargs):

        encoder[2] = encoder[0:3]
        encoder = nn.Sequential(*list(encoder.children())[2:])

        attented_layers = []
        filter = []

        imsize = (256,256)
        sfs_szs = model_sizes(encoder, size=imsize)
        sfs_idxs = list(reversed(_get_sfs_idxs(sfs_szs)))
        self.sfs = hook_outputs([encoder[i] for i in sfs_idxs])
        sfs_idxs = sfs_idxs[:-1]

        attented_layers.extend([encoder[ind] for ind in sfs_idxs[::-1]])
        attented_layers.append(encoder[-1])

        filter.extend([sfs_szs[ind][1] for ind in sfs_idxs[::-1]])

        x = dummy_eval(encoder, imsize).detach()

        ni = sfs_szs[-1][1]
        filter.append(ni)

        middle_conv_enc = conv_layer(ni, ni*2, **kwargs).eval()
        middle_conv_dec = conv_layer(ni*2, ni, **kwargs).eval()

        x = middle_conv_enc(x)
        x = middle_conv_dec(x)

        layers = list(encoder)
        layers = layers + [batchnorm_2d(ni), nn.ReLU(), middle_conv_enc, middle_conv_dec]

        attented_layers.append(middle_conv_enc)
        attented_layers.append(middle_conv_dec)

        filter.extend([ni*2,ni*2,ni])

        # sfs_idxs = sfs_idxs[:-2]
        for i,idx in enumerate(sfs_idxs):
            up_in_c, x_in_c = int(x.shape[1]), int(sfs_szs[idx][1])
            if skip_connections:
                not_final = not (i!=len(sfs_idxs)-1)
                unet_block = UnetBlock(up_in_c, x_in_c, self.sfs[i], final_div=not_final,
                                       **kwargs).eval()
            else:
                unet_block = UnetBlockWithoutSkipConnection(up_in_c, **kwargs).eval()


            layers.append(unet_block)
            x = unet_block(x)

            attented_layers.append(layers[-1])
            filter.append(x_in_c)   # in for first filter param for attention block

        filter = filter[:-1]

        ni = x.shape[1]

        unet_block_last = UnetBlockWithoutSkipConnection(10,
                                                        # final_div=not_final,
                                                        blur=False, self_attention=False,
                                   **kwargs)

        if imsize != sfs_szs[0][-2:]:
            unet_block_last.shuf = PixelShuffle_ICNR(ni, **kwargs)
        else:
            unet_block_last.shuf = nn.Identity()

        unet_block_last.conv1 = conv_layer(ni, n_classes, ks=1, use_activ=False, **kwargs)
        unet_block_last.conv2 = nn.Identity()
        unet_block_last.relu = nn.Identity()

        layers.append(unet_block_last)
        attented_layers.append(unet_block_last)
        # if skip_connections:
        #     ni = 32
        filter.extend([ni, n_classes])

        if y_range is not None: layers.append(SigmoidRange(*y_range))

        super().__init__(*layers)
        self.attended_layers = attented_layers
        self.filter = filter
示例#8
0
    def forward(self, x:Tensor):
        n = len(self.hooks)
        out = [F.interpolate(self.hooks[i].stored if self.factorization is None
            else self.factorization[i](self.hooks[i].stored), scale_factor=2**(self.n-i),
            mode='bilinear',align_corners=False) for i in range(self.n)] + [x]
        return torch.cat(out, dim=1)

class DynamicUnet_Hcolumns(SequentialEx):
    "Create a U-Net from a given architecture."
    def __init__(self, encoder:nn.Module, n_classes:int, blur:bool=False, blur_final=True, 
                 self_attention:bool=False,
                 y_range:Optional[Tuple[float,float]]=None,
                 last_cross:bool=True, bottle:bool=False, **kwargs):
        imsize = (args.size, args.size)
        sfs_szs = model_sizes(encoder, size=imsize)
        sfs_idxs = list(reversed(_get_sfs_idxs(sfs_szs)))
        self.sfs = hook_outputs([encoder[i] for i in sfs_idxs])
        x = dummy_eval(encoder, imsize).detach()

        ni = sfs_szs[-1][1]
        middle_conv = nn.Sequential(conv_layer(ni, ni*2, **kwargs),
                                    conv_layer(ni*2, ni, **kwargs)).eval()
        x = middle_conv(x)
        layers = [encoder, batchnorm_2d(ni), nn.ReLU(), middle_conv]

        self.hc_hooks = [Hook(layers[-1], _hook_inner, detach=False)]
        hc_c = [x.shape[1]]
        
        for i,idx in enumerate(sfs_idxs):
            not_final = i!=len(sfs_idxs)-1
            up_in_c, x_in_c = int(x.shape[1]), int(sfs_szs[idx][1])
示例#9
0
    def __init__(
        self,
        encoder: nn.Module,
        n_classes: int,
        blur: bool = False,
        blur_final=True,
        self_attention: bool = False,
        y_range: Optional[Tuple[float, float]] = None,
        last_cross: bool = True,
        bottle: bool = False,
        small=True,
        **kwargs,
    ):
        imsize = (256, 256)
        # for resnet50 ... but memory not enough...
        # sfs_szs = [(1, 64, 128, 128), (1, 64, 128, 128), (1, 64, 1...512, 32, 32), (1, 1024, 16, 16), (1, 2048, 8, 8)]
        # sfs_idxs = [6, 5, 4, 2]  #? 3?
        sfs_szs = model_sizes(encoder, size=imsize)
        # for resnext50_32x4d
        # [torch.Size([1, 64, 64, 64]), torch.Size([1, 64, 64, 64]), torch.Size([1, 64, 64, 64]), torch.Size([1, 64, 32, 32]), torch.Size([1, 256, 32, 32]), torch.Size([1, 512, 16, 16]), torch.Size([1, 1024, 8, 8]), torch.Size([1, 2048, 4, 4])]
        sfs_idxs = list(reversed(_get_sfs_idxs(sfs_szs)))
        # if small: sfs_idxs = sfs_idxs[-3:] (need to do double upscale)
        self.sfs = hook_outputs([encoder[i] for i in sfs_idxs])
        x = dummy_eval(encoder, imsize).detach()

        ni = sfs_szs[-1][1]
        if small:
            middle_conv_size_down_scale = 2
            middle_conv = conv_layer(ni, ni // middle_conv_size_down_scale,
                                     **kwargs).eval()
        else:
            middle_conv_size_scale = 2
            middle_conv = nn.Sequential(
                conv_layer(ni, ni * middle_conv_size_scale, **kwargs),
                conv_layer(ni * middle_conv_size_scale, ni, **kwargs),
            ).eval()
        x = middle_conv(x)
        layers = [encoder, batchnorm_2d(ni), nn.ReLU(), middle_conv]

        if small:
            self.hc_hooks = []
            hc_c = []
        else:
            self.hc_hooks = [Hook(layers[-1], _hook_inner, detach=False)]
            hc_c = [x.shape[1]]

        for i, idx in enumerate(sfs_idxs):
            final_unet_flag = i == len(sfs_idxs) - 1
            up_in_c, x_in_c = int(x.shape[1]), int(sfs_szs[idx][1])
            do_blur = blur and (final_unet_flag or blur_final)
            sa = self_attention and (i == len(sfs_idxs) - 3)
            unet_block_class = UnetBlockSmall if small else UnetBlock
            unet_block = unet_block_class(
                up_in_c,
                x_in_c,
                self.sfs[i],
                final_div=final_unet_flag,
                blur=blur,
                self_attention=sa,
                **kwargs,
            ).eval()
            print(unet_block)
            layers.append(unet_block)
            x = unet_block(x)
            # added for hypercolumns, two line
            self.hc_hooks.append(Hook(layers[-1], _hook_inner, detach=False))
            hc_c.append(x.shape[1])

        ni = x.shape[1]
        if imsize != sfs_szs[0][-2:]:
            layers.append(PixelShuffle_ICNR(ni, **kwargs))
        if last_cross:
            layers.append(MergeLayer(dense=True))
            ni += in_channels(encoder)
            layers.append(res_block(ni, bottle=bottle, **kwargs))
        # added for hypercolumns, two line
        hc_c.append(ni)
        layers.append(Hcolumns(self.hc_hooks, hc_c))
        layers += [
            conv_layer(ni * len(hc_c),
                       n_classes,
                       ks=1,
                       use_activ=False,
                       **kwargs)
        ]
        if y_range is not None:
            layers.append(SigmoidRange(*y_range))
        super().__init__(*layers)