Пример #1
0
def cs_learner(data: fv.DataBunch,
               arch: Callable,
               instructor,
               td_c=1,
               bu_c=0,
               embedding=fv.embedding,
               lateral=laterals.ConvAddLateral,
               td_out_lateral=None,
               ppm=False,
               pretrained: bool = True,
               **learn_kwargs: Any) -> fv.Learner:
    """Build Counter Stream learner from `data` and `arch`."""
    body = fv.create_body(arch, pretrained)
    size = next(iter(data.train_dl))[0].shape[-2:]
    model = fv.to_device(
        CounterStream(body,
                      instructor,
                      td_c=td_c,
                      bu_c=bu_c,
                      img_size=size,
                      embedding=embedding,
                      lateral=lateral,
                      td_out_lateral=td_out_lateral), data.device)
    learn = fv.Learner(data, model, **learn_kwargs)
    learn.split([learn.model.td[0]])
    if pretrained:
        learn.freeze()
    return learn
Пример #2
0
def double_unet_learner(data: fv.DataBunch,
                        arch: Callable,
                        iterations=2,
                        td_c=16,
                        **learn_kwargs: Any) -> fv.Learner:
    """Build Counter Stream learner from `data` and `arch`."""
    body = fv.create_body(arch, pretrained=False)
    size = next(iter(data.train_dl))[0].shape[-2:]
    model = DoubleUnet(body, iterations=iterations, td_c=td_c, img_size=size)
    model = fv.to_device(model, data.device)
    learn = fv.Learner(data, model, **learn_kwargs)
    fv.apply_init(learn.model, nn.init.kaiming_normal_)
    return learn
def _pspnet_learner(data,  backbone, chip_size=224, pyramid_sizes=(1, 2, 3, 6), pretrained=True, **kwargs):
    "Build psp_net learner from `data` and `arch`."
    model = to_device(PSPNet(data.c, backbone, chip_size, pyramid_sizes, pretrained), data.device)
    if not _isnotebook() and arcgis_os.name=='posix':
        distributed_prep = DummyDistributed()
        _set_ddp_multigpu(distributed_prep)
        if distributed_prep._multigpu_training:
            learn = Learner(data, model, **kwargs).to_distributed(distributed_prep._rank_distributed)
        else:
            learn = Learner(data, model, **kwargs)
    else:
        learn = Learner(data, model, **kwargs)
    return learn
Пример #4
0
    def __call__(self, x):
        X_left = torch.stft(x[:, 0, :],
                       n_fft=self.n_fft,
                       hop_length=self.n_hop,
                       win_length=self.n_fft,
                       window=to_device(self.window, x.device),
                       onesided=True,
                       center=True,
                       pad_mode='constant',
                       normalized=True)
        # compute power from real and imag parts (magnitude^2)
        X_left.pow_(2.0)
        X_left = X_left[:,:,:,0] + X_left[:,:,:,1]
        X_left = X_left.unsqueeze(1) # Add channel dimension

        if (x.size(1) > 1):
            X_right = torch.stft(x[:, 1, :],
                           n_fft=self.n_fft,
                           hop_length=self.n_hop,
                           win_length=self.n_fft,
                           window=to_device(self.window, x.device),
                           onesided=True,
                           center=True,
                           pad_mode='constant',
                           normalized=True)        
            # compute power from real and imag parts (magnitude^2)
            X_right.pow_(2.0)
            X_right = X_right[:,:,:,0] + X_right[:,:,:,1]
            X_right = X_right.unsqueeze(1) # Add channel dimension
            res = torch.cat([X_left, X_right], dim=1) 
            assert(res.dim() == 4) # Check dim (n sample * channels * h * w)
            return res
            
        else:
            assert(X_left.dim() == 4) # Check dim (n sample * channels * h * w)
            return X_left # Return only mono channel
def unet_learner(
    data: DataBunch,
    arch: Callable,
    pretrained: bool = True,
    blur_final: bool = True,
    norm_type: Optional[NormType] = NormType,
    split_on: Optional[SplitFuncOrIdxList] = None,
    blur: bool = False,
    self_attention: bool = False,
    y_range: Optional[Tuple[float, float]] = None,
    last_cross: bool = True,
    bottle: bool = False,
    cut: Union[int, Callable] = None,
    hypercolumns=True,
    **learn_kwargs: Any,
) -> Learner:
    "Build Unet learner from `data` and `arch`."
    meta = cnn_config(arch)
    body = create_body(arch, pretrained, cut)
    M = DynamicUnet_Hcolumns if hypercolumns else DynamicUnet
    model = to_device(
        M(
            body,
            n_classes=data.c,
            blur=blur,
            blur_final=blur_final,
            self_attention=self_attention,
            y_range=y_range,
            norm_type=norm_type,
            last_cross=last_cross,
            bottle=bottle,
        ),
        data.device,
    )
    learn = Learner(data, model, **learn_kwargs)
    learn.split(ifnone(split_on, meta["split"]))
    if pretrained:
        learn.freeze()
    apply_init(model[2], nn.init.kaiming_normal_)
    return learn
Пример #6
0
def part_learner(data,
                 arch,
                 obj_tree: ObjectTree,
                 pretrained=False,
                 sample_one=False,
                 emb_op=torch.mul,
                 **learn_kwargs):
    body = fv.create_body(arch, pretrained)
    model = CsNet(body, obj_tree, sample_one=sample_one, emb_op=emb_op)
    model = fv.to_device(model, device=data.device)

    loss = Loss(obj_tree)
    learn = fv.Learner(data, model, loss_func=loss, **learn_kwargs)
    metrics = BrodenMetrics(learn,
                            obj_tree=obj_tree,
                            preds_func=obj_tree.cs_preds_func,
                            restrict=False)
    learn.callbacks.extend([metrics, utils.AddTargetClbk()])

    learn.split([learn.model.td[0]])
    if pretrained:
        learn.freeze()
    return learn
Пример #7
0
 def __call__(self, spec_f):
     spec_m = to_device(torch.from_numpy(self.mel_fb), spec_f.device) @ spec_f
     assert(spec_m.dim() == 4) # Check dim (n sample * channels * h * w)
     return spec_m
Пример #8
0
def _pspnet_learner(data,  backbone, chip_size=224, pyramid_sizes=(1, 2, 3, 6), pretrained=True, **kwargs):
    "Build psp_net learner from `data` and `arch`."
    model = to_device(PSPNet(data.c, backbone, chip_size, pyramid_sizes, pretrained), data.device)
    learn = Learner(data, model, **kwargs)
    return learn