Пример #1
0
    def __init__(self,
                 base_arch,
                 no_diseases,
                 dropout=0.5,
                 init=nn.init.kaiming_normal_):
        super(CNNPretrainedModel, self).__init__()

        self.body = create_body(base_arch, pretrained=True)
        nf = num_features_model(nn.Sequential(*self.body.children())) * 2

        self.disease_head = create_head(nf,
                                        no_diseases,
                                        ps=0.5,
                                        concat_pool=True,
                                        bn_final=False)
        #self.age_head = create_head(nf, 1, ps=0.5, concat_pool=True, bn_final=False)
        self.gender_head = create_head(nf,
                                       2,
                                       ps=0.5,
                                       concat_pool=True,
                                       bn_final=False)
        #self.projection_head = create_head(nf, 3, ps=0.5, concat_pool=True, bn_final=False)

        self.disease_model = nn.Sequential(self.body, self.disease_head)

        self.meta = cnn_config(base_arch)
        self.split(self.meta['split'])
        self.freeze()

        apply_init(self.disease_head, init)
        #apply_init(self.age_head, init)
        apply_init(self.gender_head, init)
Пример #2
0
    def __init__(self,
                 base_arch,
                 no_classes,
                 dropout=0.5,
                 init=nn.init.kaiming_normal_):
        super(CNNPretrainedModel, self).__init__()

        self.model = create_cnn_model(base_arch, no_classes, ps=dropout)
        self.meta = cnn_config(base_arch)
        self.split(self.meta['split'])
        self.freeze()

        apply_init(self.model[1], init)
Пример #3
0
def unet_learner_wide(
    data: DataBunch,
    arch: Callable,
    pretrained: bool = True,
    blur_final: bool = True,
    norm_type: Optional[NormType] = NormType,
    split_on: Optional[SplitFuncOrIdxList] = None,
    blur: bool = False,
    self_attention: bool = False,
    y_range: Optional[Tuple[float, float]] = None,
    last_cross: bool = True,
    bottle: bool = False,
    nf_factor: int = 1,
    **kwargs: Any
) -> Learner:
    "Build Unet learner from `data` and `arch`."
    meta = cnn_config(arch)
    body = create_body(arch, pretrained)
    model = to_device(
        DynamicUnetWide(
            body,
            n_classes=data.c,
            blur=blur,
            blur_final=blur_final,
            self_attention=self_attention,
            y_range=y_range,
            norm_type=norm_type,
            last_cross=last_cross,
            bottle=bottle,
            nf_factor=nf_factor,
        ),
        data.device,
    )
    learn = Learner(data, model, **kwargs)
    learn.split(ifnone(split_on, meta['split']))
    if pretrained:
        learn.freeze()
    apply_init(model[2], nn.init.kaiming_normal_)
    return learn
Пример #4
0
def unet_learner_without_skip_connections(n_classes, device, arch:Callable, pretrained:bool=True, blur_final:bool=True,
                 norm_type:Optional[NormType]=NormType, split_on:Optional[SplitFuncOrIdxList]=None, blur:bool=False,
                 y_range:Optional[Tuple[float,float]]=None, skip_connections=True,
                 cut:Union[int,Callable]=None, **learn_kwargs:Any):
    "Build Unet learner from `data` and `arch`."
    from fastai.vision import create_body
    from fastai.torch_core import to_device
    from fastai.torch_core import apply_init

    # meta = cnn_config(arch)

    body = create_body(arch, pretrained, cut)
    # noinspection PyTypeChecker
    model = to_device(DynamicUnetWithoutSkipConnections(body, n_classes=n_classes, y_range=y_range, norm_type=norm_type,
                                                        skip_connections=skip_connections),
                      device)

    # learn = Learner(data, model, **learn_kwargs)
    # learn.split(ifnone(split_on, meta['split']))
    # if pretrained: learn.freeze()

    apply_init(model[2], nn.init.kaiming_normal_)
    return model
    def __init__(self,
                 encoder,
                 n_classes,
                 img_size,
                 blur=False,
                 blur_final=True,
                 self_attention=False,
                 y_range=None,
                 bottle=False,
                 act_cls=defaults.activation,
                 init=nn.init.kaiming_normal_,
                 norm_type=None,
                 include_encoder=True,
                 include_middle_conv=True,
                 **kwargs):
        imsize = img_size
        sizes = model_sizes(encoder, size=imsize)
        sz_chg_idxs = list(reversed(_get_sz_change_idxs(sizes)))
        # self.sfs = hook_outputs([encoder[i] for i in sz_chg_idxs], detach=False)
        x = dummy_eval(encoder, imsize).detach()

        layers = []
        if include_encoder:
            layers.append(encoder)

        if include_middle_conv:
            ni = sizes[-1][1]
            middle_conv = (nn.Sequential(
                ConvLayer(ni,
                          ni * 2,
                          act_cls=act_cls,
                          norm_type=norm_type,
                          **kwargs),
                ConvLayer(ni * 2,
                          ni,
                          act_cls=act_cls,
                          norm_type=norm_type,
                          **kwargs))).eval()
            x = middle_conv(x)
            layers += [BatchNorm(ni), nn.ReLU(), middle_conv]

        for i, idx in enumerate(sz_chg_idxs):
            not_final = (i != len(sz_chg_idxs) - 1)
            up_in_c = int(x.shape[1])
            do_blur = blur and (not_final or blur_final)
            sa = self_attention and (i == len(sz_chg_idxs) - 3)
            noskip_unet_block = NoSkipUnetBlock(up_in_c,
                                                final_div=not_final,
                                                blur=do_blur,
                                                self_attention=sa,
                                                act_cls=act_cls,
                                                init=init,
                                                norm_type=norm_type,
                                                **kwargs).eval()
            layers.append(noskip_unet_block)
            x = noskip_unet_block(x)

        ni = x.shape[1]
        if imsize != sizes[0][-2:]:
            layers.append(
                PixelShuffle_ICNR(ni, act_cls=act_cls, norm_type=norm_type))

        layers += [
            ConvLayer(ni,
                      n_classes,
                      ks=1,
                      act_cls=None,
                      norm_type=norm_type,
                      **kwargs)
        ]

        if include_middle_conv:
            apply_init(nn.Sequential(layers[3], layers[-2]), init)
            apply_init(nn.Sequential(layers[2]), init)

        if y_range is not None:
            layers.append(SigmoidRange(*y_range))
        super().__init__(*layers)