Esempio n. 1
0
 def __init__(self,
              up_in_c: int,
              x_in_c: int,
              nf: int = None,
              blur: bool = False,
              self_attention: bool = False,
              padding: int = 1,
              **kwargs):
     super().__init__()
     self.shuf = PixelShuffle_ICNR(up_in_c,
                                   up_in_c // 2,
                                   blur=blur,
                                   **kwargs)
     self.bn = nn.BatchNorm2d(x_in_c)
     ni = up_in_c // 2 + x_in_c
     nf = nf if nf is not None else max(up_in_c // 2, 32)
     self.conv1 = ConvLayer(ni,
                            nf,
                            norm_type=None,
                            padding=padding,
                            **kwargs)
     self.conv2 = ConvLayer(
         nf,
         nf,
         norm_type=None,
         padding=padding,
         xtra=SelfAttention(nf) if self_attention else None,
         **kwargs)
     self.relu = nn.ReLU(inplace=True)
 def _head_subnet(self, n_classes, n_anchors, final_bias=0., n_conv=4, chs=256):
     "Helper function to create one of the subnet for regression/classification."
     layers = [ConvLayer(chs, chs, bias=True, norm_type=None) for _ in range(n_conv)]
     layers += [conv2d(chs, n_classes * n_anchors, bias=True)]
     layers[-1].bias.data.zero_().add_(final_bias)
     layers[-1].weight.data.fill_(0)
     return nn.Sequential(*layers)
    def __init__(self,
                 block,
                 expansion,
                 layers,
                 p=0.0,
                 c_in=3,
                 n_out=1000,
                 stem_szs=(32, 32, 64),
                 widen=1.0,
                 sa=False,
                 act_cls=defaults.activation,
                 ndim=2,
                 ks=3,
                 stride=2,
                 **kwargs):
        xresnet.store_attr('block,expansion,act_cls,ndim,ks')
        if ks % 2 == 0:
            raise Exception('kernel size has to be odd!')
        stem_szs = [c_in, *stem_szs]
        stem = [
            ConvLayer(stem_szs[i],
                      stem_szs[i + 1],
                      ks=ks,
                      stride=stride if i == 0 else 1,
                      act_cls=act_cls,
                      ndim=ndim) for i in range(3)
        ]

        block_szs = [
            int(o * widen)
            for o in [64, 128, 256, 512] + [256] * (len(layers) - 4)
        ]
        block_szs = block_szs[:len(layers)]
        block_szs = [64 // expansion] + block_szs
        blocks = self._make_blocks(layers, block_szs, sa, stride, **kwargs)

        super(xresnet.XResNet, self).__init__(
            *stem,
            # MaxPool(ks=ks, stride=stride, padding=ks//2, ndim=ndim),
            *blocks,
            ConvLayer(block_szs[-1] * expansion,
                      n_out,
                      stride=1,
                      act_cls=act_cls,
                      ndim=ndim),
        )
        xresnet.init_cnn(self)
Esempio n. 4
0
    def __init__(self,
                 in_channels=1,
                 n_classes=2,
                 stride=1,
                 inplanes=64,
                 pre_ssl=True,
                 **kwargs):
        super().__init__()
        store_attr('in_channels, n_classes, inplanes, pre_ssl')
        #encoder
        if pre_ssl:
            m = torch.hub.load(
                'facebookresearch/semi-supervised-ImageNet1K-models',
                'resnext50_32x4d_ssl')
        else:
            m = ResNet(Bottleneck, [3, 4, 6, 3], groups=32, width_per_group=4)
        m.conv1.padding = (0, 0)

        if in_channels < 3:
            #print('Cutting input layer weights to', in_channels, 'channel(s).')
            with torch.no_grad():
                m.conv1.weight = nn.Parameter(m.conv1.weight[:, :in_channels,
                                                             ...])
        elif in_channels > 3:
            m.conv1 = nn.Conv2d(in_channels,
                                self.inplanes,
                                kernel_size=7,
                                stride=2,
                                bias=False)

        #self.bn1 =  m.bn1 if in_channels==3 else nn.BatchNorm2d(self.inplanes)
        self.enc0 = nn.Sequential(m.conv1, m.bn1, nn.ReLU(inplace=True))
        self.enc1 = nn.Sequential(
            nn.MaxPool2d(kernel_size=3, stride=2, padding=1, dilation=1),
            m.layer1)  #256
        self.enc2 = m.layer2  #512
        self.enc3 = m.layer3  #1024
        self.enc4 = m.layer4  #2048
        #aspp with customized dilatations
        self.aspp = ASPP(
            2048,
            256,
            out_c=512,
            dilations=[stride * 1, stride * 2, stride * 3, stride * 4])
        self.drop_aspp = nn.Dropout2d(0.5)
        #decoder
        self.dec4 = UnetBlock(512, 1024, 256, padding=0)
        self.dec3 = UnetBlock(256, 512, 128, padding=0)
        self.dec2 = UnetBlock(128, 256, 64, padding=0)
        self.dec1 = UnetBlock(64, 64, 32, padding=0)
        self.fpn = FPN([512, 256, 128, 64], [16] * 4)
        self.drop = nn.Dropout2d(0.1)
        self.final_conv = ConvLayer(32 + 16 * 4,
                                    n_classes,
                                    ks=1,
                                    norm_type=None,
                                    act_cls=None)
Esempio n. 5
0
def get_learner(dls):
    model = torch.nn.Sequential(ConvLayer(3, 24, stride=2),
                                ConvLayer(24, 32, stride=2),
                                ConvLayer(32, 64, stride=2),
                                ConvLayer(64, 128, stride=2),
                                ConvLayer(128, 256, stride=2),
                                torch.nn.AdaptiveAvgPool2d(1), Flatten(),
                                torch.nn.Linear(256, 50), torch.nn.ReLU(),
                                torch.nn.Linear(50, dls.c), torch.nn.Tanh())
    #print(model)
    callbacks = ActivationStats(with_hist=True)
    learn = Learner(dls,
                    model,
                    loss_func=MSELossFlat(),
                    metrics=[rmse],
                    cbs=callbacks)
    #valley = learn.lr_find()
    return learn
    def __init__(self,
                 encoder,
                 n_classes,
                 img_size,
                 blur=False,
                 blur_final=True,
                 self_attention=False,
                 y_range=None,
                 bottle=False,
                 act_cls=defaults.activation,
                 init=nn.init.kaiming_normal_,
                 norm_type=None,
                 include_encoder=True,
                 include_middle_conv=True,
                 **kwargs):
        imsize = img_size
        sizes = model_sizes(encoder, size=imsize)
        sz_chg_idxs = list(reversed(_get_sz_change_idxs(sizes)))
        # self.sfs = hook_outputs([encoder[i] for i in sz_chg_idxs], detach=False)
        x = dummy_eval(encoder, imsize).detach()

        layers = []
        if include_encoder:
            layers.append(encoder)

        if include_middle_conv:
            ni = sizes[-1][1]
            middle_conv = (nn.Sequential(
                ConvLayer(ni,
                          ni * 2,
                          act_cls=act_cls,
                          norm_type=norm_type,
                          **kwargs),
                ConvLayer(ni * 2,
                          ni,
                          act_cls=act_cls,
                          norm_type=norm_type,
                          **kwargs))).eval()
            x = middle_conv(x)
            layers += [BatchNorm(ni), nn.ReLU(), middle_conv]

        for i, idx in enumerate(sz_chg_idxs):
            not_final = (i != len(sz_chg_idxs) - 1)
            up_in_c = int(x.shape[1])
            do_blur = blur and (not_final or blur_final)
            sa = self_attention and (i == len(sz_chg_idxs) - 3)
            noskip_unet_block = NoSkipUnetBlock(up_in_c,
                                                final_div=not_final,
                                                blur=do_blur,
                                                self_attention=sa,
                                                act_cls=act_cls,
                                                init=init,
                                                norm_type=norm_type,
                                                **kwargs).eval()
            layers.append(noskip_unet_block)
            x = noskip_unet_block(x)

        ni = x.shape[1]
        if imsize != sizes[0][-2:]:
            layers.append(
                PixelShuffle_ICNR(ni, act_cls=act_cls, norm_type=norm_type))

        layers += [
            ConvLayer(ni,
                      n_classes,
                      ks=1,
                      act_cls=None,
                      norm_type=norm_type,
                      **kwargs)
        ]

        if include_middle_conv:
            apply_init(nn.Sequential(layers[3], layers[-2]), init)
            apply_init(nn.Sequential(layers[2]), init)

        if y_range is not None:
            layers.append(SigmoidRange(*y_range))
        super().__init__(*layers)