Пример #1
0
    def __init__(self,
                 predictive_model: Type[torch.nn.Module],
                 ensemble: Dict[int, Dict[str, torch.Tensor]],
                 calculate_coordinates: bool = False, **kwargs: Dict) -> None:

        self.use_gpu = torch.cuda.is_available()

        self.ensemble = ensemble
        self.predictive_model = copy.deepcopy(predictive_model)

        self.num_classes = kwargs.get("num_classes")
        if self.num_classes is None:
            hookF = [Hook(layer[1]) for layer in list(predictive_model._modules.items())]
            mock_forward(predictive_model)
            self.num_classes = [hook.output.shape for hook in hookF][-1][1]
        self.downsample_factor = kwargs.get("downsample_factor")
        if self.downsample_factor is None:
            hookF = [Hook(layer[1]) for layer in list(predictive_model._modules.items())]
            mock_forward(predictive_model)
            imsize = [hook.output.shape[-1] for hook in hookF]
            self.downsample_factor = max(imsize) / min(imsize)

        self.calculate_coordinates = calculate_coordinates
        if self.calculate_coordinates:
            self.eps = kwargs.get("eps", 0.5)
            self.thresh = kwargs.get("threshold", 0.5)
Пример #2
0
def test_init_spec2im_dupsample(upsample, dim_):
    from atomai.utils import Hook, mock_forward
    X_train, X_test = gen_spectra()
    y_train, y_test = gen_image_data()
    in_dim, out_dim = (16,), (8, 8)
    t = ImSpecTrainer(in_dim, out_dim, decoder_upsampling=upsample)
    t.compile_trainer((X_train, y_train, X_test, y_test),
                      batch_size=4, loss="mse", training_cycles=1)
    hookF = [Hook(layer[1]) for layer in list(t.net.decoder._modules.items())]
    mock_forward(t.net, dims=(1, 16))
    assert_equal([h.output.shape[-1] for h in hookF][1:4], dim_)
Пример #3
0
def test_init_im2spec_edownsample(downsample_factor, output_dim):
    from atomai.utils import Hook, mock_forward
    X_train, X_test = gen_image_data()
    y_train, y_test = gen_spectra()
    in_dim, out_dim = (8, 8), (16,)
    t = ImSpecTrainer(in_dim, out_dim, encoder_downsampling=downsample_factor)
    t.compile_trainer((X_train, y_train, X_test, y_test),
                      batch_size=4, loss="mse", training_cycles=1)
    hookF = [Hook(layer[1]) for layer in list(t.net.encoder._modules.items())]
    mock_forward(t.net, dims=(1, 8, 8))
    assert_equal(tuple([h.output.shape[-2:] for h in hookF][0]), output_dim)
Пример #4
0
    def __init__(self,
                 trained_model: Type[torch.nn.Module],
                 refine: bool = False,
                 resize: Union[Tuple, List] = None,
                 use_gpu: bool = False,
                 logits: bool = True,
                 seed: int = 1,
                 **kwargs: Union[int, float, bool]) -> None:
        """
        Initializes predictive object
        """
        if seed:
            set_train_rng(seed)
        model = trained_model
        self.nb_classes = kwargs.get('nb_classes', None)
        if self.nb_classes is None:
            hookF = [Hook(layer[1]) for layer in list(model._modules.items())]
            mock_forward(model)
            self.nb_classes = [hook.output.shape for hook in hookF][-1][1]
        self.downsampling = kwargs.get('downsampling', None)
        if self.downsampling is None:
            hookF = [Hook(layer[1]) for layer in list(model._modules.items())]
            mock_forward(model)
            imsize = [hook.output.shape[-1] for hook in hookF]
            self.downsampling = max(imsize) / min(imsize)
        self.model = model
        if use_gpu and torch.cuda.is_available():
            self.model.cuda()
        else:
            self.model.cpu()

        self.resize = resize
        self.logits = logits
        self.refine = refine
        self.d = kwargs.get("d", None)
        self.thresh = kwargs.get("thresh", .5)
        self.use_gpu = use_gpu
        self.verbose = kwargs.get("verbose", True)