Esempio n. 1
0
 def __init__(self,
              ds_factor=(1, 2, 2),
              crop_factor=None,
              crop_slice=None,
              order=None,
              **super_kwargs):
     """
     :param ds_factor: If factor is 2, then downscaled to half-resolution
     :param crop_factor: If factor is 2, the central crop of half-size is taken
     :param order: downscaling order
     """
     super(DownsampleAndCrop3D, self).__init__(**super_kwargs)
     self.order = order
     self.ds_factor = ds_factor
     if crop_factor is not None:
         assert isinstance(crop_factor, (tuple, list))
         # assert crop_slice is None
     if crop_slice is not None:
         # assert crop_slice is not None
         assert isinstance(crop_slice, str)
         crop_slice = parse_data_slice(crop_slice)
     self.crop_factor = crop_factor
     self.crop_slice = crop_slice
Esempio n. 2
0
    def get_kwargs_for_each_run(self):
        # TODO: add option to load GT and affs directly in run_agglomeration? (More memory efficient)
        nb_iterations = self.get("postproc_config/nb_iterations", 1)

        kwargs_collected = []

        postproc_config = self.get("postproc_config", ensure_exists=True)
        iterated_options = self.get("postproc_config/iterated_options", {})

        # Assert to have a sample:
        if "sample" not in iterated_options:
            assert "sample" in postproc_config, "At least one sample-dataset should be given"
            iterated_options["sample"] = postproc_config["sample"]

        # Initialize default iterated options:
        iterated_options.setdefault("noise_factor",
                                    [postproc_config.get("noise_factor", 0.)])
        iterated_options.setdefault("edge_prob",
                                    [postproc_config.get("edge_prob", 0.)])
        iterated_options.setdefault("preset",
                                    [postproc_config.get("preset", None)])
        iterated_options.setdefault(
            "local_attraction",
            [postproc_config.get("local_attraction", False)])
        iterated_options.setdefault(
            "crop_slice", [postproc_config.get("crop_slice", ":,:,:,:")])
        iterated_options.setdefault(
            "sub_crop_slice", postproc_config.get("sub_crop_slice",
                                                  [":,:,:,:"]))

        # Make sure to have lists:
        for iter_key in iterated_options:
            if isinstance(iterated_options[iter_key], dict):
                for dict_key in iterated_options[iter_key]:
                    iterated_options[iter_key][dict_key] = iterated_options[iter_key][dict_key] \
                        if isinstance(iterated_options[iter_key][dict_key], list) \
                        else [iterated_options[iter_key][dict_key]]
            else:
                iterated_options[iter_key] = iterated_options[iter_key] if isinstance(iterated_options[iter_key], list) \
                    else [iterated_options[iter_key]]

        for _ in range(nb_iterations):
            collected_data = {"affs": {}, "GT": {}, "affs_mask": {}}

            for sample in iterated_options['sample']:
                print("Loading...")
                # Create new dict entry if needed:
                for dt_type in collected_data:
                    collected_data[dt_type][sample] = {} if sample not in collected_data[dt_type] else \
                        collected_data[dt_type][sample]

                # Check if we have a dictionary with single values for each sample:
                all_crops = iterated_options['crop_slice'][sample] if isinstance(iterated_options['crop_slice'], dict) \
                    else iterated_options['crop_slice']
                all_subcrops = iterated_options['sub_crop_slice'][sample] if isinstance(iterated_options['sub_crop_slice'], dict) \
                    else iterated_options['sub_crop_slice']

                # ----------------------------------------------------------------------
                # Load data (and possibly add noise or select long range edges):
                # ----------------------------------------------------------------------
                for crop in all_crops:
                    # Create new dict entry if needed:
                    for dt_type in collected_data:
                        collected_data[dt_type][sample][crop] = {} if crop not in collected_data[dt_type][sample] else \
                            collected_data[dt_type][sample][crop]

                    for sub_crop in all_subcrops:
                        # Create new dict entry if needed:
                        for dt_type in collected_data:
                            collected_data[dt_type][sample][crop][sub_crop] = {} \
                                if sub_crop not in collected_data[dt_type][sample][crop] else \
                                collected_data[dt_type][sample][crop][sub_crop]

                        noise_seed = np.random.randint(-100000, 100000)

                        GT_vol_config = deepcopy(self.get('volume_config/GT'))
                        affs_vol_config = deepcopy(
                            self.get('volume_config/affinities'))

                        # FIXME: if I pass multiple crops, they get ignored and I get an error below when I create the runs...
                        if "crop_slice" in GT_vol_config:
                            gt_crop_slc = GT_vol_config.pop("crop_slice")
                        else:
                            gt_crop_slc = segm_utils.parse_data_slice(crop)[1:]

                        GT = segm_utils.readHDF5_from_volume_config(
                            sample,
                            crop_slice=gt_crop_slc,
                            run_connected_components=False**GT_vol_config,
                        )
                        # print(GT.shape)
                        # print("NUmber of clusters before: ", np.unique(GT).shape)
                        # GT = vigra.analysis.labelVolumeWithBackground(GT.astype('uint32'))
                        # print("NUmber of clusters after: ", np.unique(GT).shape)
                        # raise ValueError

                        if self.get("volume_config/ignore_glia", False):
                            print("Ignoring glia during evaluation")
                            inner_path_glia = self.get(
                                "volume_config/glia_specs/inner_path",
                                ensure_exists=True)
                            glia_label = self.get(
                                "volume_config/glia_specs/glia_label",
                                ensure_exists=True)
                            ignore_label = self.get(
                                "volume_config/glia_specs/ignore_label",
                                ensure_exists=True)
                            GT_vol_config['inner_path'] = inner_path_glia
                            various_masks = segm_utils.readHDF5_from_volume_config(
                                sample,
                                crop_slice=gt_crop_slc,
                                run_connected_components=False**GT_vol_config,
                            )
                            GT[various_masks == glia_label] = ignore_label

                        # Optionally, affinity paths are deduced dynamically:
                        if self.get("affinities_dir_path") is not None:
                            affs_vol_config['path'] = \
                                os.path.join(self.get("affinities_dir_path"), "predictions_sample_{}.h5".format(sample))

                        if "crop_slice" in affs_vol_config:
                            affs_crop_slc = affs_vol_config.pop("crop_slice")
                        else:
                            affs_crop_slc = segm_utils.parse_data_slice(crop)

                        affinities = segm_utils.readHDF5_from_volume_config(
                            sample,
                            crop_slice=affs_crop_slc,
                            run_connected_components=False**affs_vol_config,
                        )

                        assert GT.shape == affinities.shape[
                            1:], "Loaded GT and affinities do not match: {} - {}".format(
                                GT.shape, affinities.shape[1:])
                        sub_crop_slc = segm_utils.parse_data_slice(sub_crop)
                        affinities = affinities[sub_crop_slc]
                        GT = GT[sub_crop_slc[1:]]
                        GT = vigra.analysis.labelVolumeWithBackground(
                            GT.astype('uint32'))

                        collected_data["GT"][sample][crop][sub_crop] = GT
                        collected_data["affs"][sample][crop][sub_crop] = {}
                        collected_data["affs_mask"][sample][crop][
                            sub_crop] = {}
                        for long_range_prob in iterated_options['edge_prob']:
                            for noise in iterated_options['noise_factor']:
                                if noise != 0.:
                                    raise DeprecationWarning(
                                        "Opensimplex noise no longer in nifty")
                                    noise_mod = postproc_config.get(
                                        "noise_mod", 'split-biased')
                                    collected_data["affs"][sample][crop][sub_crop][noise] = \
                                        postproc_utils.add_opensimplex_noise_to_affs(
                                        affinities, noise,
                                        mod=noise_mod,
                                        target_affs='all',
                                        seed=noise_seed
                                        )
                                else:
                                    collected_data["affs"][sample][crop][
                                        sub_crop][noise] = affinities

                                # Fix already long-range edges that will be in the graph:
                                if long_range_prob < 1.0 and long_range_prob > 0.0:
                                    collected_data["affs_mask"][sample][crop][
                                        sub_crop][
                                            long_range_prob] = np.random.random(
                                                affinities.shape
                                            ) < long_range_prob

                # ----------------------------------------------------------------------
                # Create iterators:
                # ----------------------------------------------------------------------
                print("Creating pool instances...")
                for crop in all_crops:
                    for sub_crop in all_subcrops:
                        assert collected_data["affs"][sample][crop][
                            sub_crop] is not None
                        for local_attr in iterated_options['local_attraction']:
                            for preset in iterated_options['preset']:
                                if local_attr and preset in [
                                        'greedyFixation', 'GAEC'
                                ]:
                                    continue
                                for edge_prob in iterated_options['edge_prob']:
                                    for noise in iterated_options[
                                            'noise_factor']:
                                        # Build a new run dictionary with the iterated options:
                                        kwargs_collected.append({
                                            'affinities':
                                            collected_data['affs'][sample]
                                            [crop][sub_crop][noise],
                                            'GT':
                                            collected_data['GT'][sample][crop]
                                            [sub_crop],
                                            'sample':
                                            sample,
                                            'noise_factor':
                                            noise,
                                            'edge_prob':
                                            edge_prob,
                                            'preset':
                                            preset,
                                            'local_attraction':
                                            local_attr,
                                            'crop_slice':
                                            crop,
                                            'sub_crop_slice':
                                            sub_crop,
                                            'mask_used_edges':
                                            collected_data['affs_mask'][sample]
                                            [crop][sub_crop].get(
                                                edge_prob, None)
                                        })

        return kwargs_collected
        self.merge_rule.merge_edges(aliveEdge, deadEdge)
        if self.debug:
            print("Merge edges", aliveEdge, deadEdge)

    def mergeNodes(self, aliveNode, deadNode):
        if self.debug:
            print("Merge nodes", aliveNode, deadNode)

    def contractEdgeDone(self, contractedEdge):
        if self.debug:
            print("############")


for sample in ["A+", "B+", "C+"]:
    # for sample in ["C"]:
    crop_slice = parse_data_slice(
        config["volume_config"]["GT"]["crop_slice"][sample])

    # Load glia and GT:
    glia_prediction_path = os.path.join(
        project_dir, glia_mask_exp, "predictions_sample_{}.h5".format(sample))
    print("Loading glia for sample ", sample)
    glia_mask = readHDF5(glia_prediction_path, "glia_mask")[0]
    glia_mask = glia_mask[crop_slice]

    invalid_glia_mask = np.logical_or(glia_mask < 0., glia_mask > 1.)
    glia_mask[invalid_glia_mask] = 0
    print("Average glia mask (should be close to zero): ", glia_mask.mean())

    #  This is already cropped
    gt_segm = readHDF5_from_volume_config(sample,
                                          **config["volume_config"]["GT"])
Esempio n. 4
0
import segmfriends.vis as vis
from matplotlib import pyplot as plt

sample = "C"
# slices = {'A': ":,:,:,:", 'B': ":, :, 90:, 580: 1900", 'C': ":, :, 70:1450, 95:1425"}
slices = {
    'A': ":,:,:,:",
    'B': ":,:,:,:",
    'C': ":,70:-6,50:-50,50:-50",
    "0": ":,:,:,:",
    "1": ":,:,:,:",
    "2": ":,:,:,:",
    "3": ":,:,:,:",
}

parsed_slice = parse_data_slice(slices[sample])
data_path = os.path.join(get_abailoni_hci_home_path(),
                         "datasets/new_cremi/sample{}.h5".format(sample))
# data_path = os.path.join(get_trendytukan_drive_path(), "datasets/new_cremi/fib25/sample{}.h5".format(sample))
with h5py.File(data_path, 'r') as f:
    print([atr for atr in f['volumes/labels']])
    #     glia = f['volumes/labels/glia'][:]
    raw = f['volumes/raw'][parsed_slice[1:]]
    GT = f['volumes/labels/neuron_ids_fixed'][parsed_slice[1:]]

# Load affs:
from segmfriends.utils.various import writeHDF5, readHDF5
affs_path = os.path.join(
    get_trendytukan_drive_path(),
    "projects/pixel_embeddings/{}/predictions_sample_{}.h5".format(
        "v4_addSparseAffs_eff", sample))
Esempio n. 5
0
    def forward(self, predictions, all_targets):
        predictions = [predictions] if not isinstance(predictions, (list, tuple)) else predictions
        all_targets = [all_targets] if not isinstance(all_targets, (list, tuple)) else all_targets

        loss = 0

        # # ----------------------------
        # # Predict glia mask:
        # # ----------------------------
        if self.train_glia_mask:
            assert not self.target_has_various_masks, "To be implemented"
            frg_kwargs = self.model.models[-1].foreground_prediction_kwargs
            if frg_kwargs is None:
                # Legacy:
                nb_glia_preds = 1
                nb_glia_targets = [0]
            else:
                nb_glia_preds = len(frg_kwargs)
                nb_glia_targets = [frg_kwargs[dpth]["nb_target"] for dpth in frg_kwargs]

            all_glia_preds = predictions[-nb_glia_preds:]
            predictions = predictions[:-nb_glia_preds]

            loss_glia = 0
            for counter, glia_pred, nb_tar in zip(range(len(all_glia_preds)), all_glia_preds, nb_glia_targets):
                glia_target = all_targets[nb_tar][:,[-1]]
                all_targets[nb_tar] = all_targets[nb_tar][:, :-1]
                assert self.target_has_label_segm
                gt_segm = all_targets[nb_tar][:,[0]]

                glia_target = auto_crop_tensor_to_shape(glia_target, glia_pred.shape)
                gt_segm = auto_crop_tensor_to_shape(gt_segm, glia_pred.shape)
                # TODO: generalize ignore label:
                valid_mask = (gt_segm != 0).float()
                glia_pred = glia_pred * valid_mask
                glia_target = glia_target * valid_mask
                with warnings.catch_warnings(record=True) as w:
                    loss_glia_new = data_parallel(self.loss, (glia_pred, glia_target), self.devices).mean()
                loss_glia = loss_glia + loss_glia_new
                log_image("glia_target_d{}".format(counter), glia_target)
                log_image("glia_pred_d{}".format(counter), glia_pred)
            loss = loss + loss_glia
            log_scalar("loss_glia", loss_glia)

        for counter, nb_pred in enumerate(self.predictions_specs):
            assert len(predictions) > nb_pred
            pred = predictions[nb_pred]
            # TODO: add precrop_pred?
            # if self.precrop_pred is not None:
            #     from segmfriends.utils.various import parse_data_slice
            #     crop_slc = (slice(None), slice(None)) + parse_data_slice(self.precrop_pred)
            #     predictions = predictions[crop_slc]
            pred_specs = self.predictions_specs[nb_pred]
            target = all_targets[pred_specs.get("target", 0)]

            target_dws_fact = pred_specs.get("target_dws_fact", None)
            if target_dws_fact is not None:
                assert isinstance(target_dws_fact, list) and len(target_dws_fact) == 3
                target = target[(slice(None), slice(None)) + tuple(slice(None,None,dws) for dws in target_dws_fact)]

            target = auto_crop_tensor_to_shape(target, pred.shape,
                                            ignore_channel_and_batch_dims=True)

            if self.target_has_label_segm:
                if self.target_has_various_masks:
                    target = target[:, 2:]
                else:
                    target = target[:,1:]
            assert target.shape[1] % 2 == 0, "Target should include both affinities and masks"

            # Get ignore-mask and affinities:
            nb_channels = int(target.shape[1] / 2)

            affs_channels = pred_specs.get("affs_channels", None)
            if affs_channels is not None:
                if isinstance(affs_channels, str):
                    affs_slice = parse_data_slice(affs_channels)[0]
                elif isinstance(affs_channels, list):
                    # TODO: make as a tuple???
                    affs_slice = affs_channels
                else:
                    raise ValueError("The passed affinities channels are not compatible")
            else:
                affs_slice = slice(None)

            gt_affs = target[:,:nb_channels][:, affs_slice]

            assert gt_affs.shape[1] == pred.shape[1], "Prediction has a wrong number of offset channels"

            valid_pixels = target[:,nb_channels:][:, affs_slice]

            # Invert affinities for Dice loss: (1 boundary, 0 otherwise)
            gt_affs = 1. - gt_affs

            pred = pred*valid_pixels
            gt_affs = gt_affs*valid_pixels

            with warnings.catch_warnings(record=True) as w:
                loss_new = data_parallel(self.loss, (pred, gt_affs), self.devices).mean()
            loss = loss + loss_new
            log_scalar("loss_sparse_d{}".format(counter), loss_new)

        # TODO: use Callback from Roman to run it every N iterations
        gc.collect()
        return loss
Esempio n. 6
0
import matplotlib
matplotlib.rcParams.update({'font.size': 25})

sample = "C"
# slices = {'A': ":,:,:,:", 'B': ":, :, 90:, 580: 1900", 'C': ":, :, 70:1450, 95:1425"}
slices = {
    'A': ":,:,:,:",
    'B': ":,:,:,:",
    'C': ":,70:-6,50:-50,50:-50",
    "0": ":,:,:,:",
    "1": ":,:,:,:",
    "2": ":,:,:,:",
    "3": ":,:,:,:",
}

parsed_slice = parse_data_slice(slices[sample])
conf_folder_path = os.path.join(
    get_trendytukan_drive_path(),
    "projects/pixel_embeddings/v4_addSparseAffs_avgDirectVar/scores")

prefix_math = [
    "C__MEAN___",
    # "C__MutexWatershed___",
    "C__MWS__stride10___"
]

labels = {
    "C__MEAN___": "Gasp Average",
    "C__MWS__stride10___": "Mutex Watershed"
}
Esempio n. 7
0
    affs[affs_valid_mask==0] = 1

    # Combine left and right affinities:
    segment_mask = np.logical_and(affs[0], affs[1])

    # This functions erode binary out_mask (segments 1, boundary 0)
    eroded_segment_mask = segment_mask.copy()
    for z in range(eroded_segment_mask.shape[0]):
        eroded_segment_mask[z] = vigra.filters.multiBinaryErosion(segment_mask[z], radius=2.)
    boundary_mask = np.logical_not(eroded_segment_mask)

    BOUNDARY_LABEL = 2
    DEFECTED_LABEL = 3
    out_mask = glia.copy()
    out_mask[boundary_mask] = BOUNDARY_LABEL

    # Mask defected slices:
    for slc in defected_slices[sample]:
        out_mask[parse_data_slice(slc)] = DEFECTED_LABEL


    # Copy GT from previous (to avoid weird connected components problems):
    for z in copy_from_previous[sample]:
        GT[z] = GT[z-1]

    print("Now writing...")

    from segmfriends.utils.various import writeHDF5
    writeHDF5(out_mask, data_path, 'volumes/labels/various_masks')
    writeHDF5(GT, data_path, 'volumes/labels/neuron_ids_fixed')
Esempio n. 8
0
        active_nodes[tuple(self.starting_coordinate)] = 2
        return out_segm, active_nodes


import os
from vaeAffs.utils.path_utils import get_abailoni_hci_home_path
import h5py
sample = "A"
data_path = os.path.join(
    get_abailoni_hci_home_path(),
    "../ialgpu1_local_home/datasets/cremi/SOA_affinities/sample{}_train.h5".
    format(sample))
crop_slice = ":,20:21,300:400,300:400"
from segmfriends.utils.various import parse_data_slice

crop_slice = parse_data_slice(crop_slice)
with h5py.File(data_path, 'r') as f:
    affs = f['predictions']['full_affs'][crop_slice]
    raw = f['raw'][crop_slice[1:]]

offsets = [[-1, 0, 0], [0, -1, 0], [0, 0, -1], [-2, 0, 0], [0, -3, 0],
           [0, 0, -3], [-3, 0, 0], [0, -9, 0], [0, 0, -9], [-4, 0, 0],
           [0, -27, 0], [0, 0, -27]]

# Fake duplicate affinities:
duplicate_affs = np.empty_like(affs)
for i, off in enumerate(offsets):
    duplicate_affs[i] = np.roll(affs[i], off)
affs = np.concatenate([affs, duplicate_affs], axis=0)
offsets = offsets + [[-off[0], -off[1], -off[2]] for off in offsets]
Esempio n. 9
0
    def forward(self, all_predictions, target):
        mdl_kwargs = self.model_kwargs
        ptch_kwargs = mdl_kwargs["patchNet_kwargs"]

        nb_inputs = mdl_kwargs.get("number_multiscale_inputs")

        # print([(pred.shape[-3], pred.shape[-2], pred.shape[-1]) for pred in all_predictions])
        # print([(targ.shape[-3], targ.shape[-2], targ.shape[-1]) for targ in target])

        # Plot some patches with the raw:
        if self.model.return_input:
            raw_inputs = all_predictions[-nb_inputs:]
            all_predictions = all_predictions[:-nb_inputs]

        loss = 0

        # # ----------------------------
        # # Predict glia mask:
        # # ----------------------------
        if self.train_glia_mask:
            assert self.glia_label is not None

            frg_kwargs = self.model.foreground_prediction_kwargs
            if frg_kwargs is None:
                # Legacy:
                nb_glia_preds = 1
                nb_glia_targets = [0]
            else:
                nb_glia_preds = len(frg_kwargs)
                nb_glia_targets = [
                    frg_kwargs[dpth]["nb_target"] for dpth in frg_kwargs
                ]

            all_glia_preds = all_predictions[-nb_glia_preds:]
            all_predictions = all_predictions[:-nb_glia_preds]
            loss_glia = 0
            for counter, glia_pred, nb_tar in zip(range(len(all_glia_preds)),
                                                  all_glia_preds,
                                                  nb_glia_targets):
                glia_target = (target[nb_tar][:,
                                              [1]] == self.glia_label).float()
                valid_mask = (target[nb_tar][:, [0]] !=
                              self.ignore_label).float()

                glia_target = auto_crop_tensor_to_shape(
                    glia_target, glia_pred.shape)
                valid_mask = auto_crop_tensor_to_shape(valid_mask,
                                                       glia_pred.shape)
                glia_pred = glia_pred * valid_mask
                glia_target = glia_target * valid_mask
                with warnings.catch_warnings(record=True) as w:
                    loss_glia_cur = data_parallel(self.loss,
                                                  (glia_pred, glia_target),
                                                  self.devices).mean()
                loss_glia = loss_glia + loss_glia_cur
                log_image("glia_target_d{}".format(counter), glia_target)
                log_image("glia_pred_d{}".format(counter), glia_pred)
            loss = loss + loss_glia
            log_scalar("loss_glia", loss_glia)
        else:
            glia_pred = all_predictions.pop(-1)

        if self.train_sparse_loss:
            loss = loss + self.sparse_multilevelDiceLoss(
                all_predictions, target)
            # Delete affinities from targets:
            target = [tar[:, :2].int() for tar in target]

        # IoU loss:
        if self.add_IoU_loss:
            assert self.boundary_label is None, "Not implemented"
            assert self.indx_trained_patchNets is None
            loss = loss + self.IoU_loss(all_predictions, target)

        if self.indx_trained_patchNets is None:
            nb_preds = len(all_predictions)
            assert len(ptch_kwargs) == nb_preds
            indx_trained_patchNets = zip(range(nb_preds), range(nb_preds))
        else:
            indx_trained_patchNets = self.indx_trained_patchNets

        # ----------------------------
        # Loss on patches:
        # ----------------------------
        for nb_patch_net, nb_pr in indx_trained_patchNets:
            # ----------------------------
            # Initializations:
            # ----------------------------
            pred = all_predictions[nb_pr]
            kwargs = ptch_kwargs[nb_patch_net]
            if isinstance(target, (list, tuple)):
                assert "nb_target" in kwargs, "Multiple targets passed. Target should be specified"
                gt_segm = target[kwargs["nb_target"]]
            else:
                gt_segm = target

            # Collect options from config:
            patch_shape_input = kwargs.get("patch_size")
            assert all(i % 2 == 1
                       for i in patch_shape_input), "Patch should be odd"
            patch_dws_fact = kwargs.get("patch_dws_fact", [1, 1, 1])
            stride = tuple(kwargs.get("patch_stride", [1, 1, 1]))
            pred_dws_fact = kwargs.get("pred_dws_fact", [1, 1, 1])
            # print(nb_patch_net, patch_dws_fact, pred_dws_fact)
            precrop_pred = kwargs.get("precrop_pred", None)
            limit_nb_patches = kwargs.get("limit_nb_patches", None)
            from segmfriends.utils.various import parse_data_slice
            if precrop_pred is not None:
                precrop_pred_slice = (
                    slice(None), slice(None)) + parse_data_slice(precrop_pred)
                pred = pred[precrop_pred_slice]

            central_shape = tuple(kwargs.get("central_shape", [1, 3, 3]))
            max_random_crop = tuple(kwargs.get("max_random_crop", [0, 5, 5]))
            if self.fix_bug_multiscale_patches:
                real_patch_shape = tuple(
                    pt * fc - fc + 1
                    for pt, fc in zip(patch_shape_input, patch_dws_fact))
            else:
                real_patch_shape = tuple(
                    pt * fc
                    for pt, fc in zip(patch_shape_input, patch_dws_fact))

            full_target_shape = gt_segm.shape[-3:]
            assert all([
                i <= j for i, j in zip(real_patch_shape, full_target_shape)
            ]), "Real-sized patch is too large!"

            # ----------------------------
            # Deduce crop size of the prediction and select target patches accordingly:
            # ----------------------------
            # print(pred.shape, full_target_shape, pred_dws_fact, real_patch_shape)
            crop_slice_targets, crop_slice_prediction = get_slicing_crops(
                pred.shape[2:], full_target_shape, pred_dws_fact,
                real_patch_shape)
            # print(crop_slice_prediction, crop_slice_targets, nb_patch_net)
            gt_segm = gt_segm[crop_slice_targets]
            pred = pred[crop_slice_prediction]
            full_target_shape = gt_segm.shape[-3:]

            # # ----------------------------
            # # Plot some random patches with associated raw patch:
            # # ----------------------------
            if self.model.return_input and nb_patch_net < 5:
                # raw = raw_inputs[kwargs["nb_target"]][crop_slice_targets]
                # FIXME: raw is not correct for deeper ones
                raw = raw_inputs[0][crop_slice_targets]
                raw_to_plot, gt_labels_to_plot, gt_masks_to_plot, pred_emb_to_plot = [], [], [], []
                for n in range(40):
                    # Select a random pixel and define sliding-window crop slices:
                    selected_coord = [
                        np.random.randint(shp) for shp in pred.shape[2:]
                    ]
                    # selected_coord[0] = 4 # For plots, get always 4
                    full_patch_slice = (slice(None), slice(0, 1)) + tuple(
                        slice(selected_coord[i], selected_coord[i] +
                              real_patch_shape[i])
                        for i in range(len(selected_coord)))
                    emb_slice = (slice(None), slice(0, 1)) + tuple(
                        slice(
                            selected_coord[i] +
                            int(real_patch_shape[i] / 2), selected_coord[i] +
                            int(real_patch_shape[i] / 2) + 1)
                        for i in range(len(selected_coord)))
                    pred_center_coord = [
                        int(selected_coord[i] / pred_dws_fact[i])
                        for i in range(len(selected_coord))
                    ]
                    emb_slice_pred = (slice(None), slice(None)) + tuple(
                        slice(pred_center_coord[i], pred_center_coord[i] + 1)
                        for i in range(len(selected_coord)))

                    # Collect data for current sliding window:
                    center_label = gt_segm[emb_slice]
                    center_label_repeated = center_label.repeat(
                        1, 1, *real_patch_shape)
                    gt_patch_labels = gt_segm[full_patch_slice]
                    gt_masks_to_plot.append(
                        gt_patch_labels != center_label_repeated)
                    gt_labels_to_plot.append(gt_patch_labels)
                    # ignore_mask_patch = (gt_patch_labels == 0)
                    pred_emb_to_plot.append(pred[emb_slice_pred])

                    raw_to_plot.append(raw[full_patch_slice])

                # Highlight center pixel:
                raw_to_plot = torch.cat(raw_to_plot, dim=0)
                center_pixel_coord = (slice(None), 0) + tuple(
                    int(shp / 2) for shp in real_patch_shape)
                raw_to_plot[center_pixel_coord] = raw_to_plot.min() - 1.

                gt_labels_to_plot = torch.cat(gt_labels_to_plot, dim=0)
                gt_masks_to_plot = torch.cat(gt_masks_to_plot, dim=0)
                pred_emb_to_plot = torch.cat(pred_emb_to_plot, dim=0)

                # Decode embeddings:
                ptch_num = kwargs["patchNet_number"]
                pred_patch_to_plot = data_parallel(
                    self.model.patch_models[ptch_num],
                    pred_emb_to_plot[:, :, 0, 0, 0], self.devices)

                # Downscale and rescale targets:
                down_sc_slice = (slice(None), slice(None)) + tuple(
                    slice(int(dws_fact / 2), None, dws_fact)
                    for dws_fact in patch_dws_fact)
                gt_masks_to_plot = torch.nn.functional.interpolate(
                    gt_masks_to_plot[down_sc_slice].float(),
                    scale_factor=tuple(patch_dws_fact))
                pred_patch_to_plot = torch.nn.functional.interpolate(
                    pred_patch_to_plot, scale_factor=tuple(patch_dws_fact))

                gt_masks_to_plot = 1. - gt_masks_to_plot
                if patch_dws_fact[1] <= 6:
                    pred_patch_to_plot = 1. - pred_patch_to_plot

                log_image("raw_patch_l{}".format(nb_patch_net), raw_to_plot)
                log_image("gt_label_patch_l{}".format(nb_patch_net),
                          gt_labels_to_plot)
                log_image("gt_mask_patch_l{}".format(nb_patch_net),
                          gt_masks_to_plot)
                log_image("pred_patch_l{}".format(nb_patch_net),
                          pred_patch_to_plot)

            # # ----------------------------
            # # Patch-Loss:
            # # ----------------------------
            if kwargs.get("skip_standard_patch_loss", False):
                continue

            # If multiple strides were given, process all of them:
            all_strides = stride if isinstance(stride[0], list) else [stride]
            if limit_nb_patches is not None:
                all_limit_nb_patches = limit_nb_patches if isinstance(
                    limit_nb_patches[0], list) else [limit_nb_patches]
            else:
                all_limit_nb_patches = [None for _ in all_strides]

            for nb_stride, stride, limit_nb_patches in zip(
                    range(len(all_strides)), all_strides,
                    all_limit_nb_patches):

                # ----------------------------
                # Get some random prediction embeddings:
                # ----------------------------
                pred_strides = get_prediction_strides(pred_dws_fact, stride)
                pred_patches, crop_slice_pred, nb_patches = extract_patches_torch_new(
                    pred, (1, 1, 1),
                    stride=pred_strides,
                    max_random_crop=max_random_crop)

                # Try to get some raw patches:
                # TODO: the factor is simply the level in the UNet
                # get_slicing_crops(pred.shape[2:], full_target_shape, [1,1,1], real_patch_shape)

                # ----------------------------
                # Collect gt_segm patches and corresponding center labels:
                # ----------------------------
                crop_slice_targets = tuple(
                    slice(sl.start, None) for sl in crop_slice_pred)
                gt_patches, _, _ = extract_patches_torch_new(
                    gt_segm,
                    real_patch_shape,
                    stride=stride,
                    crop_slice=crop_slice_targets,
                    limit_patches_to=nb_patches)
                gt_patches = gt_patches[:, [0]]

                # Make sure to crop some additional border and get the centers correctly:
                # TODO: this can be now easily done by cropping the gt_patches...
                crop_slice_center_labels = (slice(None), slice(None)) + tuple(
                    slice(slc.start + int(sh / 2), slc.stop) for slc, sh in
                    zip(crop_slice_targets[2:], real_patch_shape))
                target_at_patch_center, _, _ = extract_patches_torch_new(
                    gt_segm, (1, 1, 1),
                    stride=stride,
                    crop_slice=crop_slice_center_labels,
                    limit_patches_to=nb_patches)
                # Get GT and other masks separately:
                label_at_patch_center = target_at_patch_center[:, [0]]
                mask_at_patch_center = target_at_patch_center[:, [1]]

                # ----------------------------
                # Ignore patches on the boundary or involving ignore-label:
                # ----------------------------
                # Ignore pixels involving ignore-labels:
                ignore_masks = (gt_patches == self.ignore_label)
                valid_patches = (label_at_patch_center != self.ignore_label)

                assert self.boundary_label is not None, "Old boundary method is deprecated"
                # # Exclude a patch from training if the central region contains more than one gt label
                # # (i.e. it is really close to a boundary):
                # central_crop = (slice(None), slice(None)) + convert_central_shape_to_crop_slice(gt_patches.shape[-3:], central_shape)
                # mean_central_crop_labels = gt_patches[central_crop].mean(dim=-1, keepdim=True) \
                #     .mean(dim=-2, keepdim=True) \
                #     .mean(dim=-3, keepdim=True)
                #
                # valid_patches = valid_patches & (mean_central_crop_labels == center_labels)
                # is_on_boundary_mask = None
                patch_is_on_boundary = (
                    mask_at_patch_center == self.boundary_label).repeat(
                        1, 1, *real_patch_shape)

                # Ignore patches that represent a glia:
                if not self.train_patches_on_glia:
                    assert self.glia_label is not None
                    # print("Glia: ", (mask_at_patch_center != self.glia_label).min())
                    valid_patches = valid_patches & (mask_at_patch_center !=
                                                     self.glia_label)

                # Delete redundant patches from batch:
                valid_batch_indices = np.argwhere(
                    valid_patches[:, 0, 0, 0, 0].cpu().detach().numpy())[:, 0]
                if limit_nb_patches is not None:
                    limit = limit_nb_patches[0]
                    if limit_nb_patches[1] == 'number':
                        if valid_batch_indices.shape[0] > limit:
                            valid_batch_indices = np.random.choice(
                                valid_batch_indices, limit, replace=False)
                    elif limit_nb_patches[1] == 'factor':
                        assert limit <= 1. and limit >= 0.
                        valid_batch_indices = np.random.choice(
                            valid_batch_indices,
                            int(limit * valid_batch_indices.shape[0]),
                            replace=False)
                if valid_batch_indices.shape[0] == 0:
                    print(
                        "ZERO valid patches at level {}!".format(nb_patch_net))
                    # Avoid problems if all patches are invalid and torch complains that autograd cannot be performed:
                    loss += pred_patches.sum() * 0.
                    continue

                # ----------------------------
                # Compute the actual (inverted) MeMasks targets: (0 is me, 1 are the others)
                # best targets for Dice loss (usually more me than others)
                # ----------------------------
                center_labels_repeated = label_at_patch_center.repeat(
                    1, 1, *real_patch_shape)
                me_masks = gt_patches != center_labels_repeated

                if patch_is_on_boundary is not None:
                    # If on boundary, we make (inverted) me_masks completely 1 (split from everything)
                    me_masks = me_masks | patch_is_on_boundary

                # Downscale MeMasks using MaxPooling (preserve narrow processes):
                # moreover, during the maxPool, better shrink me mask than expanding (avoid merge predictions)
                if all(fctr == 1 for fctr in patch_dws_fact):
                    maxpool = Identity()
                else:
                    maxpool = nn.MaxPool3d(kernel_size=patch_dws_fact,
                                           stride=patch_dws_fact,
                                           padding=0)

                # Downscaling patch:
                down_sc_slice = (slice(None), slice(None)) + tuple(
                    slice(int(dws_fact / 2), None, dws_fact)
                    for dws_fact in patch_dws_fact)

                # Final targets:
                patch_targets = me_masks[valid_batch_indices].float(
                )[down_sc_slice]
                patch_ignore_masks = ignore_masks[valid_batch_indices][
                    down_sc_slice].byte()

                # Invert MeMasks:
                # best targets for Dice loss are: meMask == 0; others == 1
                # FIXME: generalize
                if patch_dws_fact[1] > 6:
                    patch_targets = 1. - patch_targets

                assert valid_batch_indices.max() < pred_patches.shape[
                    0], "Something went wrong, more target patches were collected than predicted: {} targets vs {} pred...".format(
                        valid_batch_indices.max(), pred_patches.shape[0])
                pred_embed = pred_patches[valid_batch_indices]
                pred_embed = pred_embed[:, :, 0, 0, 0]

                # ----------------------------
                # Expand embeddings to patches using PatchNet models:
                # ----------------------------
                if "model_number" in kwargs:
                    # FIXME: update this crap
                    # In this case we are training a stacked model:
                    mdl_num = kwargs["model_number"]
                    ptch_num = kwargs["patchNet_number"]
                    expanded_patches = data_parallel(
                        self.model.models[mdl_num].patch_models[ptch_num],
                        pred_embed, self.devices)
                else:
                    expanded_patches = data_parallel(
                        self.model.patch_models[nb_patch_net], pred_embed,
                        self.devices)
                # print(expanded_patches.shape)
                assert expanded_patches.shape[
                    1] == 1, "PatchNet should output only a one-channel mask!"

                # Some logs:
                if nb_stride == 0:
                    log_image("ptc_trg_l{}".format(nb_patch_net),
                              patch_targets)
                    log_image("ptc_pred_l{}".format(nb_patch_net),
                              expanded_patches)
                    # log_image("ptc_ign_l{}".format(nb_patch_net), patch_ignore_masks)
                    log_scalar("avg_targets_l{}".format(nb_patch_net),
                               patch_targets.float().mean())

                # Train only checkerboard pattern:
                if self.apply_checkerboard:
                    checkerboard = np.zeros(patch_shape_input)
                    # Verticals:
                    center_coord = [int(sh / 2) for sh in patch_shape_input]
                    checkerboard[:, center_coord[1], :] = 1
                    checkerboard[:, :, center_coord[2]] = 1
                    # Two diagonals:
                    indices = np.indices(patch_shape_input)
                    checkerboard[indices[1] == indices[2]] = 1
                    checkerboard[indices[1] == (patch_shape_input[2] -
                                                indices[2] - 1)] = 1
                    # Reduce z-context:
                    z_mask = np.zeros_like(checkerboard)
                    z_mask[center_coord[0]] = 1
                    for z in range(patch_shape_input[0]):
                        offs = abs(center_coord[0] - z)
                        if offs != 0:
                            z_mask[z, offs:-offs, offs:-offs] = 1
                    checkerboard[np.logical_not(z_mask)] = 0
                    # Expand channels and wrap:
                    checkerboard = torch.from_numpy(checkerboard).cuda(
                        patch_ignore_masks.get_device()).float()
                    checkerboard = checkerboard.unsqueeze(0).unsqueeze(0)
                    checkerboard = checkerboard.repeat(
                        *patch_ignore_masks.shape[:2], 1, 1, 1)

                # ----------------------------
                # Apply ignore mask and compute loss:
                # ----------------------------
                patch_valid_masks = 1. - patch_ignore_masks.float()
                if self.apply_checkerboard:
                    patch_valid_masks = patch_valid_masks * checkerboard
                expanded_patches = expanded_patches * patch_valid_masks
                patch_targets = patch_targets * patch_valid_masks
                with warnings.catch_warnings(record=True) as w:
                    loss_unet = data_parallel(
                        self.loss, (expanded_patches, patch_targets.float()),
                        self.devices).mean()

                loss = loss + loss_unet
                if nb_stride == 0:
                    log_scalar("loss_l{}".format(nb_patch_net), loss_unet)
                    log_scalar("nb_patches_l{}".format(nb_patch_net),
                               expanded_patches.shape[0])

        # print("Loss done, memory {}", torch.cuda.memory_allocated(0)/1000000)
        # TODO: use Callback from Roman to run it every N iterations
        gc.collect()
        return loss
from segmfriends.utils.various import parse_data_slice
import os
import h5py

from scipy.ndimage import zoom

crp_slices = {"B": ":,:-1,:", "C": ":,:,:-1"}

for sample in ["A", "B", "C", "0", "1", "2"]:
    print("Sample", sample)

    data_path = os.path.join(get_abailoni_hci_home_path(),
                             "datasets/new_cremi/sample{}.h5".format(sample))

    if sample in crp_slices:
        crp_slc = parse_data_slice(crp_slices[sample])
    else:
        crp_slc = slice(None)

    with h5py.File(data_path, 'r+') as f:
        print([atr for atr in f['volumes/labels']])
        #     glia = f['volumes/labels/glia'][:]
        # raw = f['volumes/raw'][crp_slc]
        # GT = f['volumes/labels/neuron_ids_fixed'][crp_slc]
        # various_masks = f['volumes/labels/various_masks'][crp_slc]
        various_masks = f['volumes/labels/various_masks_noDefects'][crp_slc]

        print("Now writing...")
        # f['volumes/raw_2x'] = zoom(raw, (1, 0.5, 0.5), order=3)
        # f['volumes/labels/neuron_ids_fixed_2x'] = zoom(GT, (1, 0.5, 0.5), order=0)
        if 'volumes/labels/various_masks_noDefects_2x' in f:
Esempio n. 11
0
    def forward(self, all_predictions, target):
        mdl = self.model

        nb_inputs = mdl.number_multiscale_inputs

        # Plot some patches with the raw:
        if self.model.return_input:
            raw_inputs = all_predictions[-nb_inputs:]
            all_predictions = all_predictions[:-nb_inputs]

        loss = 0

        if self.train_sparse_loss:
            raise NotImplementedError
            loss = loss + self.sparse_multilevelDiceLoss(all_predictions, target)
            # Delete affinities from targets:
            target = [tar[:, :2].int() for tar in target]

        # ----------------------------
        # Loss on patches:
        # ----------------------------
        for mask_dec_indx in range(len(all_predictions)):
            # ----------------------------
            # Initializations:
            # ----------------------------
            mask_dec = self.model.mask_decoders[mask_dec_indx]
            pred = all_predictions[mask_dec_indx]

            gt_segm = target[mask_dec.target_index]

            # Collect options from config:
            mask_shape = mask_dec.mask_shape
            mask_dws_fact = mask_dec.mask_dws_fact
            sample_strides = mask_dec.sample_strides
            pred_dws_fact = mask_dec.pred_dws_fact
            crop_slice_prediction = mask_dec.crop_slice_prediction
            limit_nb_decoded_masks_to = mask_dec.limit_nb_decoded_masks_to

            if crop_slice_prediction is not None:
                precrop_pred_slice = (slice(None), slice(None)) + parse_data_slice(crop_slice_prediction)
                pred = pred[precrop_pred_slice]

            max_random_crop = mask_dec.max_random_crop

            real_shape_mask = tuple(pt * fc for pt, fc in zip(mask_shape, mask_dws_fact))

            full_target_shape = gt_segm.shape[-3:]
            assert all([i <= j for i, j in zip(real_shape_mask, full_target_shape)]), "Real-sized patch is too large!"

            # ----------------------------
            # Deduce crop size of the prediction and select target patches accordingly:
            # ----------------------------
            # TODO: explain better what is going on here
            crop_slice_targets, crop_slice_prediction = get_slicing_crops(pred.shape[2:], full_target_shape,
                                                                          pred_dws_fact, real_shape_mask)
            gt_segm = gt_segm[crop_slice_targets]
            pred = pred[crop_slice_prediction]
            full_target_shape = gt_segm.shape[-3:]

            # # # ----------------------------
            # # # Plot some random patches with associated raw patch:
            # # # ----------------------------
            # if self.model.return_input and mask_dec_indx<5:
            #     # raw = raw_inputs[kwargs["nb_target"]][crop_slice_targets]
            #     # FIXME: raw is not correct for deeper ones
            #     raw = raw_inputs[0][crop_slice_targets]
            #     raw_to_plot, gt_labels_to_plot, gt_masks_to_plot, pred_emb_to_plot = [], [], [], []
            #     for n in range(40):
            #         # Select a random pixel and define sliding-window crop slices:
            #         selected_coord = [np.random.randint(shp) for shp in pred.shape[2:]]
            #         # selected_coord[0] = 4 # For plots, get always 4
            #         full_patch_slice = (slice(None), slice(0,1)) + tuple(
            #             slice(selected_coord[i], selected_coord[i] + real_shape_mask[i]) for i in range(len(selected_coord)))
            #         emb_slice = (slice(None), slice(0,1)) + tuple(slice(selected_coord[i] + int(real_shape_mask[i] / 2),
            #                                                             selected_coord[i] + int(
            #                                                                 real_shape_mask[i] / 2) + 1) for i in
            #                                                       range(len(selected_coord)))
            #         pred_center_coord = [int(selected_coord[i] / pred_dws_fact[i]) for i in range(len(selected_coord))]
            #         emb_slice_pred = (slice(None), slice(None)) + tuple(
            #             slice(pred_center_coord[i], pred_center_coord[i] + 1)
            #             for i in range(len(selected_coord)))
            #
            #         # Collect data for current sliding window:
            #         center_label = gt_segm[emb_slice]
            #         center_label_repeated = center_label.repeat(1, 1, *real_shape_mask)
            #         gt_patch_labels = gt_segm[full_patch_slice]
            #         gt_masks_to_plot.append(gt_patch_labels != center_label_repeated)
            #         gt_labels_to_plot.append(gt_patch_labels)
            #         # ignore_mask_patch = (gt_patch_labels == 0)
            #         pred_emb_to_plot.append(pred[emb_slice_pred])
            #
            #         raw_to_plot.append(raw[full_patch_slice])
            #
            #     # Highlight center pixel:
            #     raw_to_plot = torch.cat(raw_to_plot, dim=0)
            #     center_pixel_coord = (slice(None), 0) + tuple(int(shp / 2) for shp in real_shape_mask)
            #     raw_to_plot[center_pixel_coord] = raw_to_plot.min() - 1.
            #
            #     gt_labels_to_plot = torch.cat(gt_labels_to_plot, dim=0)
            #     gt_masks_to_plot = torch.cat(gt_masks_to_plot, dim=0)
            #     pred_emb_to_plot = torch.cat(pred_emb_to_plot, dim=0)
            #
            #     # Decode embeddings:
            #     ptch_num = kwargs["patchNet_number"]
            #     pred_patch_to_plot = data_parallel(self.model.patch_models[ptch_num], pred_emb_to_plot[:, :, 0, 0, 0], self.devices)
            #
            #     # Downscale and rescale targets:
            #     down_sc_slice = (slice(None), slice(None)) + tuple(
            #         slice(int(dws_fact / 2), None, dws_fact) for dws_fact in mask_dws_fact)
            #     gt_masks_to_plot = torch.nn.functional.interpolate(gt_masks_to_plot[down_sc_slice].float(), scale_factor=tuple(mask_dws_fact))
            #     pred_patch_to_plot = torch.nn.functional.interpolate(pred_patch_to_plot,
            #                                                          scale_factor=tuple(mask_dws_fact))
            #
            #     gt_masks_to_plot = 1. - gt_masks_to_plot
            #     if mask_dws_fact[1] <= 6:
            #         pred_patch_to_plot = 1. - pred_patch_to_plot
            #
            #     log_image("raw_patch_l{}".format(mask_dec_indx), raw_to_plot)
            #     log_image("gt_label_patch_l{}".format(mask_dec_indx), gt_labels_to_plot)
            #     log_image("gt_mask_patch_l{}".format(mask_dec_indx), gt_masks_to_plot)
            #     log_image("pred_patch_l{}".format(mask_dec_indx), pred_patch_to_plot)

            # # ----------------------------
            # # Patch-Loss:
            # # ----------------------------

            # If multiple strides were given, process all of them:
            sample_strides = sample_strides if isinstance(sample_strides[0], list) else [sample_strides]
            if limit_nb_decoded_masks_to is not None:
                limit_nb_decoded_masks_to = limit_nb_decoded_masks_to if isinstance(limit_nb_decoded_masks_to[0],
                                                                                    list) else [
                    limit_nb_decoded_masks_to]
            else:
                limit_nb_decoded_masks_to = [None for _ in sample_strides]

            for nb_stride, smpl_stride, max_nb_masks in zip(range(len(sample_strides)), sample_strides,
                                                            limit_nb_decoded_masks_to):

                # ----------------------------
                # Get some random prediction embeddings:
                # ----------------------------
                prediction_strides = get_prediction_strides(pred_dws_fact, smpl_stride)
                selected_embeddings, crop_slice_pred, nb_selected_masks = extract_patches_torch(pred, (1, 1, 1),
                                                                                                stride=prediction_strides,
                                                                                                max_random_crop=max_random_crop)

                # ----------------------------
                # Collect gt_segm patches and corresponding center labels:
                # ----------------------------
                crop_slice_targets = tuple(slice(sl.start, None) for sl in crop_slice_pred)
                gt_patches, _, _ = extract_patches_torch(gt_segm, real_shape_mask, stride=smpl_stride,
                                                         apply_specific_crop_slice=crop_slice_targets,
                                                         limit_patches_nb_to=nb_selected_masks)
                gt_patches = gt_patches[:, [0]]

                # Make sure to crop some additional border and get the centers correctly:
                # TODO: this can be now easily done by cropping the gt_patches...
                crop_slice_center_labels = (slice(None), slice(None)) + tuple(
                    slice(slc.start + int(sh / 2), slc.stop) for slc, sh in
                    zip(crop_slice_targets[2:], real_shape_mask))
                target_at_patch_center, _, _ = extract_patches_torch(gt_segm, (1, 1, 1), stride=smpl_stride,
                                                                     apply_specific_crop_slice=crop_slice_center_labels,
                                                                     limit_patches_nb_to=nb_selected_masks)
                # Get GT and other masks separately:
                label_at_patch_center = target_at_patch_center[:, [0]]
                mask_at_patch_center = target_at_patch_center[:, [1]]

                # ----------------------------
                # Ignore patches on the boundary or involving ignore-label:
                # ----------------------------
                # Ignore pixels involving ignore-labels:
                ignore_masks = (gt_patches == self.ignore_label)
                valid_patches = (label_at_patch_center != self.ignore_label)

                patch_is_on_boundary = None
                if self.boundary_label is not None:
                    patch_is_on_boundary = (mask_at_patch_center == self.boundary_label).repeat(1, 1, *real_shape_mask)

                # Delete non-valid patches from batch:
                valid_batch_indices = np.argwhere(valid_patches[:, 0, 0, 0, 0].cpu().detach().numpy())[:, 0]
                if max_nb_masks is not None:
                    limit = max_nb_masks[0]
                    if max_nb_masks[1] == 'number':
                        if valid_batch_indices.shape[0] > limit:
                            valid_batch_indices = np.random.choice(valid_batch_indices, limit, replace=False)
                    elif max_nb_masks[1] == 'factor':
                        assert limit <= 1. and limit >= 0.
                        valid_batch_indices = np.random.choice(valid_batch_indices,
                                                               int(limit * valid_batch_indices.shape[0]), replace=False)

                if valid_batch_indices.shape[0] == 0:
                    # Avoid problems if all patches are invalid and
                    # torch complaining that autograd cannot be performed:
                    loss += selected_embeddings.sum() * 0.
                    print("ZERO valid patches at level {}".format(mask_dec_indx))
                    continue

                # ----------------------------
                # Compute the actual (inverted) MeMasks targets: (0 is me, 1 are the others)
                # best targets for Dice loss (usually more me than others)
                # ----------------------------
                center_labels_repeated = label_at_patch_center.repeat(1, 1, *real_shape_mask)
                target_me_masks = gt_patches != center_labels_repeated

                if patch_is_on_boundary is not None:
                    # If on boundary, we make (inverted) me_masks completely 1 (split from everything)
                    target_me_masks = target_me_masks | patch_is_on_boundary

                # Downscaling patches:
                down_sc_slice = (slice(None), slice(None)) + tuple(
                    slice(int(dws_fact / 2), None, dws_fact) for dws_fact in mask_dws_fact)

                # Final targets:
                target_me_masks = target_me_masks[valid_batch_indices].float()[down_sc_slice]
                ignore_masks = ignore_masks[valid_batch_indices][down_sc_slice].byte()

                # Invert MeMasks:
                # best targets for Dice loss are: meMask == 0; others == 1
                # TODO: generalize
                if mask_dws_fact[1] > 6:
                    target_me_masks = 1. - target_me_masks

                assert valid_batch_indices.max() < selected_embeddings.shape[
                    0], "Something went wrong, more target patches were collected than those predicted: {} targets vs {} pred...".format(
                    valid_batch_indices.max(), selected_embeddings.shape[0])
                selected_embeddings = selected_embeddings[valid_batch_indices]
                selected_embeddings = selected_embeddings[:, :, 0, 0, 0]

                # ----------------------------
                # Decode the actual predicted using the decoder models:
                # ----------------------------
                decoded_masks = data_parallel(mask_dec, selected_embeddings, self.devices)
                # print(expanded_patches.shape)
                assert decoded_masks.shape[1] == 1, "MaskDecoder should output only single-channel masks!"

                # Some logs:
                if nb_stride == 0:
                    log_image("ptc_trg_l{}".format(mask_dec_indx), target_me_masks)
                    log_image("ptc_pred_l{}".format(mask_dec_indx), decoded_masks)
                    # log_image("ptc_ign_l{}".format(nb_patch_net), patch_ignore_masks)
                    log_scalar("avg_targets_l{}".format(mask_dec_indx), target_me_masks.float().mean())

                # ----------------------------
                # Apply ignore mask and compute loss:
                # ----------------------------
                valid_masks = 1. - ignore_masks.float()
                decoded_masks = decoded_masks * valid_masks
                target_me_masks = target_me_masks * valid_masks
                with warnings.catch_warnings(record=True) as w:
                    reconstruction_loss = data_parallel(self.loss, (decoded_masks, target_me_masks.float()),
                                                        self.devices).mean()

                loss = loss + reconstruction_loss
                if nb_stride == 0:
                    log_scalar("loss_l{}".format(mask_dec_indx), reconstruction_loss)
                    log_scalar("nb_patches_l{}".format(mask_dec_indx), decoded_masks.shape[0])

        gc.collect()
        return loss