Пример #1
0
 def __init__(self, emb_szs, n_cont, out_sz, layers, ps=None, embed_p=0.,
              y_range=None, use_bn=True, bn_final=False, bn_cont=True, act_cls=nn.ReLU(inplace=True)):
     ps = ifnone(ps, [0] * len(layers))
     if not is_listy(ps): ps = [ps] * len(layers)
     self.embeds = nn.ModuleList([Embedding(ni, nf) for ni, nf in emb_szs])
     self.emb_drop = nn.Dropout(embed_p)
     self.bn_cont = nn.BatchNorm1d(n_cont) if bn_cont else None
     n_emb = sum(e.embedding_dim for e in self.embeds)
     self.n_emb, self.n_cont = n_emb, n_cont
     sizes = [n_emb + n_cont] + layers + [out_sz]
     actns = [act_cls for _ in range(len(sizes) - 2)] + [None]
     _layers = [LinBnDrop(sizes[i], sizes[i + 1], bn=use_bn and (i != len(actns) - 1 or bn_final), p=p, act=a, lin_first=True)
                for i, (p, a) in enumerate(zip(ps + [0.], actns))]
     if y_range is not None: _layers.append(SigmoidRange(*y_range))
     self.layers = nn.Sequential(*_layers)
Пример #2
0
    def __init__(self,
                 encoder=None,
                 n_classes=2,
                 last_filters=32,
                 imsize=(256, 256),
                 y_range=None,
                 **kwargs):

        self.n_classes = n_classes

        layers = nn.ModuleList()

        # Encoder
        sfs_szs = model_sizes(encoder, size=imsize)
        sfs_idxs = list(reversed(_get_sfs_idxs(sfs_szs)))
        self.sfs = hook_outputs([encoder[i] for i in sfs_idxs])
        layers.append(encoder)

        x = dummy_eval(encoder, imsize).detach()

        self.hc_hooks = []
        hc_c = []

        ni = sfs_szs[-1][1]
        middle_conv = nn.Sequential(conv_layer(ni, ni * 2),
                                    conv_layer(ni * 2, ni)).eval()
        x = middle_conv(x)
        layers.extend([batchnorm_2d(ni), nn.ReLU(), middle_conv])

        # self.hc_hooks = [Hook(layers[-1], _hook_inner, detach=False)]
        # hc_c = [x.shape[1]]

        # Decoder
        n_filters = [64, 128, 256, 512]
        n = len(n_filters)
        is_deconv = True

        for i, idx in enumerate(sfs_idxs[:-1]):
            in_c, out_c = int(n_filters[n - i - 1] +
                              n_filters[n - i - 2]) // 2, int(sfs_szs[idx][1])

            dec_bloc = DecoderBlock(in_c, out_c, self.sfs[i], is_deconv,
                                    True).eval()
            layers.append(dec_bloc)

            x = dec_bloc(x)

            self.hc_hooks.append(Hook(layers[-1], _hook_inner, detach=False))
            hc_c.append(x.shape[1])

        ni = x.shape[1]

        layers.append(PixelShuffle_ICNR(n_filters[0], scale=2))

        layers.append(Hcolumns(self.hc_hooks, hc_c))

        fin_block = FinalBlock(ni * (len(hc_c) + 1), last_filters, n_classes)
        layers.append(fin_block)

        if y_range is not None:
            layers.append(SigmoidRange(*y_range))
        super().__init__(*layers)
Пример #3
0
 def __init__(self, n_in, n_out, seq_len=10, range=(-1, 1), **kwargs):
     super().__init__()
     self.mod = nn.Sequential(TST(n_in, n_out, seq_len, **kwargs),
                              SigmoidRange(*range))
Пример #4
0
 def __init__(self, n_in, n_out, range=(-1, 1)):
     super().__init__()
     self.mod = nn.Sequential(InceptionTime(n_in, n_out),
                              SigmoidRange(*range))
Пример #5
0
 def create_head(self, nf, c_out, fc_dropout=0., y_range=None, **kwargs):
     layers = [nn.Dropout(fc_dropout)] if fc_dropout else []
     layers += [nn.Linear(nf, c_out)]
     if y_range: layers += [SigmoidRange(*y_range)]
     return nn.Sequential(*layers)
    def __init__(self,
                 encoder,
                 n_classes,
                 img_size,
                 blur=False,
                 blur_final=True,
                 self_attention=False,
                 y_range=None,
                 bottle=False,
                 act_cls=defaults.activation,
                 init=nn.init.kaiming_normal_,
                 norm_type=None,
                 include_encoder=True,
                 include_middle_conv=True,
                 **kwargs):
        imsize = img_size
        sizes = model_sizes(encoder, size=imsize)
        sz_chg_idxs = list(reversed(_get_sz_change_idxs(sizes)))
        # self.sfs = hook_outputs([encoder[i] for i in sz_chg_idxs], detach=False)
        x = dummy_eval(encoder, imsize).detach()

        layers = []
        if include_encoder:
            layers.append(encoder)

        if include_middle_conv:
            ni = sizes[-1][1]
            middle_conv = (nn.Sequential(
                ConvLayer(ni,
                          ni * 2,
                          act_cls=act_cls,
                          norm_type=norm_type,
                          **kwargs),
                ConvLayer(ni * 2,
                          ni,
                          act_cls=act_cls,
                          norm_type=norm_type,
                          **kwargs))).eval()
            x = middle_conv(x)
            layers += [BatchNorm(ni), nn.ReLU(), middle_conv]

        for i, idx in enumerate(sz_chg_idxs):
            not_final = (i != len(sz_chg_idxs) - 1)
            up_in_c = int(x.shape[1])
            do_blur = blur and (not_final or blur_final)
            sa = self_attention and (i == len(sz_chg_idxs) - 3)
            noskip_unet_block = NoSkipUnetBlock(up_in_c,
                                                final_div=not_final,
                                                blur=do_blur,
                                                self_attention=sa,
                                                act_cls=act_cls,
                                                init=init,
                                                norm_type=norm_type,
                                                **kwargs).eval()
            layers.append(noskip_unet_block)
            x = noskip_unet_block(x)

        ni = x.shape[1]
        if imsize != sizes[0][-2:]:
            layers.append(
                PixelShuffle_ICNR(ni, act_cls=act_cls, norm_type=norm_type))

        layers += [
            ConvLayer(ni,
                      n_classes,
                      ks=1,
                      act_cls=None,
                      norm_type=norm_type,
                      **kwargs)
        ]

        if include_middle_conv:
            apply_init(nn.Sequential(layers[3], layers[-2]), init)
            apply_init(nn.Sequential(layers[2]), init)

        if y_range is not None:
            layers.append(SigmoidRange(*y_range))
        super().__init__(*layers)
Пример #7
0
    def __init__(self, encoder=None, n_classes=2, last_filters=32, imsize=(256, 256), y_range=None, **kwargs):

        self.n_classes = n_classes

        layers = nn.ModuleList()

        # Encoder
        sfs_idxs = [4, 3, 2, 1, 0]
        self.sfs = hook_outputs([encoder[i] for i in sfs_idxs])
        layers.append(encoder)

        x = dummy_eval(encoder, imsize).detach()

        self.hc_hooks = []
        hc_c = []

        # ni = sfs_szs[-1][1]
        # middle_conv = nn.Sequential(conv_layer(ni, ni * 2),
        #                             conv_layer(ni * 2, ni)).eval()
        # x = middle_conv(x)
        # layers.extend([batchnorm_2d(ni), nn.ReLU(), middle_conv])

        # Decoder
        n_filters = [128, 256, 512, 1024, 2048]
        n = len(n_filters)
        is_deconv = True

        for i, idx in enumerate(sfs_idxs[:-1]):
            if i == 0:
                in_c, out_c = n_filters[n - i - 1], n_filters[n - i - 2]
            else:
                in_c, out_c = 2 * n_filters[n - i - 1], n_filters[n - i - 2]

            if i == 3:
                scale = False
            else:
                scale = True

            dec_bloc = DecoderBlock(in_c, out_c, self.sfs[i+1], is_deconv, scale).eval()
            layers.append(dec_bloc)

            x = dec_bloc(x)

            self.hc_hooks.append(Hook(layers[-1], _hook_inner, detach=False))
            hc_c.append(x.shape[1])

            # print(x.size())

        # last decoder
        in_c, out_c = n_filters[0] + 64, n_filters[0]
        dec_bloc = DecoderBlock(in_c, out_c, None, is_deconv, True).eval()
        layers.append(dec_bloc)

        x = dec_bloc(x)

        self.hc_hooks.append(Hook(layers[-1], _hook_inner, detach=False))
        hc_c.append(x.shape[1])

        # print(x.size())


        ni = x.shape[1]

        layers.append(PixelShuffle_ICNR(n_filters[0], scale=2))

        layers.append(Hcolumns(self.hc_hooks, hc_c))

        fin_block = FinalBlock(ni * (len(hc_c) + 1), last_filters, n_classes)
        layers.append(fin_block)

        if y_range is not None:
            layers.append(SigmoidRange(*y_range))
        super().__init__(*layers)