def load_stack_of_tiff_as_numpy(folder_path, output_filename):

    # path = "/home/abailoni_local/trendyTukan_localdata0/datasets/battery_data/Daten for AlbertoBailoni"
    # Z, Y, X
    # TODO: generalize
    collected_dataset = np.empty((100, 1080, 1197, 2), dtype='uint8')
    for filename in os.listdir(folder_path):
        if filename.endswith(".tiff"):
            im = Image.open(os.path.join(folder_path, filename))
            imarray = np.array(im)
            parts = filename.split("_")
            slice_nb = int(parts[-1].split(".")[0]) - 1
            if parts[0] == "2ND":
                collected_dataset[slice_nb, :, 4:, 0] = imarray
            else:
                imarray = imarray[:-4]
                collected_dataset[slice_nb, :, :-4, 1] = imarray
    #         channel_nb = 0 if parts[0] == "2ND" else 1
    #         collected_dataset[slice_nb, :, ]
    #         print(slice_nb, channel_nb)
    #         print(imarray.shape, parts[0])
    #         all_arrays[]
    #          print(os.path.join(directory, filename))
    #         continue
        else:
            continue
    print(collected_dataset.max())

    writeHDF5(collected_dataset,
              os.path.join(folder_path, output_filename),
              "data")
Пример #2
0
    def save_infer_output(self, output):
        print("Saving....")
        if self.get("export_path") is not None:
            dir_path = os.path.join(
                self.get("export_path"),
                self.get("name_experiment", default="generic_experiment"))
        else:
            try:
                # Only works for my experiments saving on trendyTukan, otherwise it will throw an error:
                trendyTukan_path = get_trendytukan_drive_dir()
            except ValueError:
                raise ValueError(
                    "TrendyTukan drive not found. Please specify an `export_path` in the config file."
                )
            dir_path = os.path.join(
                trendyTukan_path, "projects/pixel_embeddings",
                self.get("name_experiment", default="generic_experiment"))
        check_dir_and_create(dir_path)
        filename = os.path.join(
            dir_path,
            "predictions_sample_{}.h5".format(self.get("loaders/infer/name")))
        print("Writing to ", self.get("inner_path_output", 'data'))
        writeHDF5(output.astype(np.float16), filename,
                  self.get("inner_path_output", 'data'))
        print("Saved to ", filename)

        # Dump configuration to export folder:
        self.dump_configuration(os.path.join(dir_path,
                                             "prediction_config.yml"))
Пример #3
0
    def save_infer_output(self, output):
        import h5py
        import numpy as np
        print("Saving....")
        from segmfriends.utils.various import check_dir_and_create
        dir_path = os.path.join(
            get_trendytukan_drive_path(), "projects/pixel_embeddings",
            self.get("name_experiment", default="generic_experiment"))
        check_dir_and_create(dir_path)
        filename = os.path.join(
            dir_path,
            "predictions_sample_{}.h5".format(self.get("loaders/infer/name")))
        print("Writing to ", self.get("inner_path_output", 'data'))
        from segmfriends.utils.various import writeHDF5
        writeHDF5(output.astype(np.float16), filename,
                  self.get("inner_path_output", 'data'))
        print("Saved to ", filename)

        # Dump configuration to export folder:
        self.dump_configuration(os.path.join(dir_path,
                                             "prediction_config.yml"))
        "datasets/CREMI/official_test_samples/sample_{}_padded_20160601.hdf".
        format(sample))
    out_file = os.path.join(
        get_trendytukan_drive_path(),
        "datasets/CREMI/official_test_samples/full_aligned_samples/sample_{}_aligned_plus_raw_mask.hdf"
        .format(sample))

    import h5py
    GT_mask_file = os.path.join(
        get_trendytukan_drive_path(),
        "datasets/CREMI/official_test_samples/full_aligned_samples/sample_{}_GT_mask_temp.hdf"
        .format(sample))
    mask_inner_path = "volumes/labels/mask"
    GT_box = np.zeros(padded_shape, dtype="uint32")
    GT_box[slice_GT_mask] = 1

    from segmfriends.utils.various import writeHDF5
    writeHDF5(GT_box, GT_mask_file, mask_inner_path)

    from cremi_tools.alignment import realign

    # GT_mask_file = os.path.join(get_trendytukan_drive_path(), "datasets/CREMI/alignment_experiments/sample_{}_backaligned.hdf".format(sample))

    realign(raw_file,
            sample,
            out_file,
            labels_file=GT_mask_file,
            labels_key=mask_inner_path)

    os.remove(GT_mask_file)
Пример #5
0
def run_method_on_graph(method_type,
                        true_assign,
                        k=None,
                        p=None,
                        n=None,
                        spectral_method_name=None,
                        linkage_criteria=None,
                        add_cannot_link_constraints=None,
                        signed_edge_weights=None,
                        multicut_solver_type="kernighanLin",
                        graph=None,
                        A_p=None,
                        A_n=None,
                        experiment_name=None,
                        eta=None,
                        gauss_sigma=None,
                        project_directory=None,
                        save_output_segm=False,
                        affinities=None,
                        offsets=None,
                        output_shape=None):
    # Run clustering:
    tick = time.time()
    # print(method_type)
    if method_type == "GASP":
        # TODO: assert
        print(linkage_criteria, add_cannot_link_constraints)
        node_labels, _ = run_GASP(
            graph,
            signed_edge_weights,
            # edge_sizes=edge_sizes,
            linkage_criteria=linkage_criteria,
            add_cannot_link_constraints=add_cannot_link_constraints,
            use_efficient_implementations=False,
            # **additional_kwargs
        )
    elif method_type == "multicut":
        print(multicut_solver_type)
        node_labels = multicut(graph,
                               None,
                               None,
                               signed_edge_weights,
                               solver_type=multicut_solver_type)
    elif method_type == "spectral":
        c = Cluster((A_p, A_n))
        print(spectral_method_name)
        try:
            if spectral_method_name == "BNC":
                node_labels = c.spectral_cluster_bnc(k=k, normalisation='sym')
            elif spectral_method_name == "L-sym":
                node_labels = c.spectral_cluster_laplacian(k=k,
                                                           normalisation='sym')
            elif spectral_method_name == "SPONGE":
                # FIXME: not sure about this...
                # node_labels = c.geproblem_laplacian(k = k, normalisation='additive')
                node_labels = c.SPONGE(k=k)
            elif spectral_method_name == "SPONGE-sym":
                # node_labels = c.geproblem_laplacian(k = k, normalisation='multiplicative')
                node_labels = c.SPONGE_sym(k=k)
            else:
                raise NotImplementedError
        except np.linalg.LinAlgError:
            print("#### LinAlgError ({}) ####".format(spectral_method_name))
            return
    else:
        raise NotImplementedError

    runtime = time.time() - tick

    # Compute scores and stats:
    RAND_score = adjusted_rand_score(node_labels, true_assign)
    counts = np.bincount(node_labels.astype('int64'))
    nb_clusters = (counts > 0).sum()
    biggest_clusters = np.sort(counts)[::-1][:10]

    # Save stuff/results...
    print(runtime, RAND_score)
    # print(nb_clusters, biggest_clusters)

    # Save config setup:
    new_results = {}

    # TODO: delete this crap
    new_results["method_type"] = method_type
    new_results["k"] = k
    new_results["p"] = p
    new_results["n"] = n
    new_results["eta"] = eta
    new_results["guass_sigma"] = gauss_sigma
    new_results["spectral_method_name"] = spectral_method_name
    new_results["linkage_criteria"] = linkage_criteria
    new_results["add_cannot_link_constraints"] = add_cannot_link_constraints
    new_results["multicut_solver_type"] = multicut_solver_type
    new_results["experiment_name"] = experiment_name
    new_results["project_directory"] = project_directory
    ID = str(np.random.randint(1000000000))
    new_results["ID"] = ID

    experiment_dir_path = os.path.join(project_directory, experiment_name)
    check_dir_and_create(experiment_dir_path)
    check_dir_and_create(os.path.join(experiment_dir_path, 'scores'))
    result_file = os.path.join(
        experiment_dir_path, 'scores',
        '{}_{}_{}_{}.json'.format(ID, method_type, spectral_method_name,
                                  linkage_criteria))

    # Actually save results:
    new_results["runtime"] = runtime
    new_results["RAND_score"] = RAND_score
    new_results["nb_clusters"] = int(nb_clusters)
    new_results["biggest_clusters"] = [int(size) for size in biggest_clusters]

    # Save output:
    if save_output_segm:
        assert output_shape is not None
        assert affinities is not None
        assert offsets is not None
        check_dir_and_create(os.path.join(experiment_dir_path, 'out_segms'))
        export_file = os.path.join(
            experiment_dir_path, 'out_segms',
            '{}_{}_{}.h5'.format(method_type, spectral_method_name,
                                 linkage_criteria))
        from segmfriends.utils.various import writeHDF5
        writeHDF5(node_labels.reshape(output_shape),
                  export_file,
                  'segm',
                  compression='gzip')

        # Delete small segments:
        from GASP.segmentation.watershed import SizeThreshAndGrowWithWS
        hmap_kwargs = {"offset_weights": [1.0, 1.0], "used_offsets": [1, 2]}
        size_grower = SizeThreshAndGrowWithWS(20,
                                              offsets,
                                              hmap_kwargs=hmap_kwargs)
        segm_WS = size_grower(affinities, node_labels.reshape(output_shape))
        writeHDF5(segm_WS, export_file, 'segm_WS', compression='gzip')

        RAND_score_WS = adjusted_rand_score(segm_WS.flatten(), true_assign)
        new_results["RAND_score_WS"] = RAND_score_WS

    with open(result_file, 'w') as f:
        json.dump(new_results, f, indent=4, sort_keys=True)
Пример #6
0
def get_kwargs_iter_CREMI(fixed_kwargs,
                          kwargs_to_be_iterated,
                          init_kwargs_iter=None,
                          nb_iterations=1):
    kwargs_iter = init_kwargs_iter if isinstance(init_kwargs_iter,
                                                 list) else []

    iter_collected = {}

    KEYS_TO_ITER = [
        'method_type', 'spectral_method_name', 'linkage_criteria',
        'add_cannot_link_constraints'
    ]
    for key in KEYS_TO_ITER:
        if key in fixed_kwargs:
            iter_collected[key] = [fixed_kwargs[key]]
        elif key in kwargs_to_be_iterated:
            iter_collected[key] = kwargs_to_be_iterated[key]
        else:
            raise ValueError("Iter key {} was not passed!".format(key))

    fixed_kwargs = deepcopy(fixed_kwargs)
    dataset = fixed_kwargs.pop("dataset")

    # Load the data:
    for _ in range(nb_iterations):
        if dataset == "CREMI":
            from .load_datasets import get_dataset_data, CREMI_crop_slices, get_dataset_offsets
            affs, GT = get_dataset_data(
                dataset='CREMI',
                sample="B",
                crop_slice_str=":,18:28,1020:1120,990:1090",
                run_connected_components=True)

            n = 10000
            # p = fixed_kwargs.pop("p")
            print("Creating pixel graph:")
            from GASP.utils.graph import build_pixel_lifted_graph_from_offsets
            offsets = np.array(get_dataset_offsets("CREMI"))
            graph, is_local_edge, true_assign, edge_sizes = build_pixel_lifted_graph_from_offsets(
                GT.shape, offsets, GT_label_image=GT)
            true_assign = true_assign.astype('uint64')
            assert (edge_sizes == 1).all()
            uv_ids = graph.uvIds()

            k = np.unique(true_assign).shape[0]

            # TODO: check sign of affinities
            edge_weights = graph.edgeValues(np.rollaxis(affs, 0, 4))

            # FIXME: always use additive cost for the moment...
            # Compute log costs:
            signed_edge_weights = edge_weights - 0.5

            print("Create positive and negative graph adj. matrices:")

            A_p, A_n = from_edge_list_to_adj_matrix(uv_ids,
                                                    signed_edge_weights)
            A_p = sparse.csr_matrix(A_p)
            A_n = sparse.csr_matrix(A_n)

            # Dump data:
            experiment_dir_path = os.path.join(
                fixed_kwargs['project_directory'],
                fixed_kwargs['experiment_name'])
            check_dir_and_create(experiment_dir_path)
            check_dir_and_create(os.path.join(experiment_dir_path, 'segms'))
            check_dir_and_create(os.path.join(experiment_dir_path,
                                              'out_segms'))
            export_file = os.path.join(experiment_dir_path, 'out_segms',
                                       'inputs.h5')
            from segmfriends.utils.various import writeHDF5
            writeHDF5(affs.astype('float32'),
                      export_file,
                      'affs',
                      compression='gzip')
            writeHDF5(GT, export_file, 'gt', compression='gzip')

            # Start collecting kwargs:
            for method_type in iter_collected["method_type"]:
                if method_type == "spectral":
                    for spectral_method_name in iter_collected[
                            "spectral_method_name"]:
                        new_kwargs = {}
                        new_kwargs.update(fixed_kwargs)

                        iterated_kwargs = {
                            'method_type': method_type,
                            'true_assign': true_assign,
                            'spectral_method_name': spectral_method_name,
                            'A_p': A_p,
                            'A_n': A_n,
                            'n': n,
                            'eta': None,
                            'p': None,
                            'k': k,
                            'save_output_segm': True,
                            'output_shape': GT.shape,
                            'affinities': affs,
                            'offsets': get_dataset_offsets("CREMI")
                        }
                        new_kwargs.update({
                            k: v
                            for k, v in iterated_kwargs.items()
                            if k not in new_kwargs
                        })
                        kwargs_iter.append(new_kwargs)
                elif method_type == "GASP":
                    for linkage in iter_collected["linkage_criteria"]:
                        for CNC in iter_collected[
                                "add_cannot_link_constraints"]:
                            new_kwargs = {}
                            new_kwargs.update(fixed_kwargs)

                            iterated_kwargs = {
                                'method_type': method_type,
                                'true_assign': true_assign,
                                'linkage_criteria': linkage,
                                'add_cannot_link_constraints': CNC,
                                'signed_edge_weights': signed_edge_weights,
                                'graph': graph,
                                'eta': None,
                                'n': n,
                                'p': None,
                                'k': k,
                                'save_output_segm': True,
                                'output_shape': GT.shape,
                                'affinities': affs,
                                'offsets': get_dataset_offsets("CREMI")
                            }
                            new_kwargs.update({
                                k: v
                                for k, v in iterated_kwargs.items()
                                if k not in new_kwargs
                            })
                            kwargs_iter.append(new_kwargs)
                elif method_type == "multicut":
                    new_kwargs = {}
                    new_kwargs.update(fixed_kwargs)

                    iterated_kwargs = {
                        'method_type': method_type,
                        'true_assign': true_assign,
                        'signed_edge_weights': signed_edge_weights,
                        'graph': graph,
                        'eta': None,
                        'n': n,
                        'p': None,
                        'k': k,
                        'save_output_segm': True,
                        'output_shape': GT.shape,
                        'affinities': affs,
                        'offsets': get_dataset_offsets("CREMI")
                    }
                    new_kwargs.update({
                        k: v
                        for k, v in iterated_kwargs.items()
                        if k not in new_kwargs
                    })
                    kwargs_iter.append(new_kwargs)
                else:
                    raise NotImplementedError
        else:
            raise NotImplementedError

    return kwargs_iter
    affs, affs_valid_mask = compute_affinities(GT.astype('int64'),
                                               offsets,
                                               ignore_label=0,
                                               have_ignore_label=True)

    # Where it is not valid, we should not predict a boundary label:
    affs[affs_valid_mask == 0] = 1

    # Combine left and right affinities:
    segment_mask = np.logical_and(affs[0], affs[1])

    # This functions erode binary mask (segments 1, boundary 0)
    eroded_segment_mask = segment_mask.copy()
    for z in range(eroded_segment_mask.shape[0]):
        eroded_segment_mask[z] = vigra.filters.multiBinaryErosion(
            segment_mask[z], radius=2.)
    boundary_mask = np.logical_not(eroded_segment_mask)

    # Max int32 value:
    BOUNDARY_VALUE = 2147483647

    modified_GT = GT.copy()
    modified_GT[boundary_mask] = BOUNDARY_VALUE

    print("Done")

    from segmfriends.utils.various import writeHDF5
    legacy_mod = "_OLD" if sample == "B" and old_raw else ""
    writeHDF5(modified_GT, data_path,
              'segmentations/groundtruth_plus_boundary2' + legacy_mod)
                    scores_dir = os.path.join(project_dir, exp_name, "scores")
                    prev_score_file = os.path.join(
                        scores_dir, filename.replace(".h5", ".yml"))

                    result_config = yaml2dict(prev_score_file)
                    result_config['score_WS'] = evals

                    new_score_file_path = prev_score_file.replace(
                        ".yml", "{}.yml".format(postfix))

                    with open(new_score_file_path, 'w') as f:
                        # json.dump(config_to_save, f, indent=4, sort_keys=True)
                        yaml.dump(result_config, f)

                # Save new segm:
                out_segm_path = pred_file.replace(".h5",
                                                  "{}.h5".format(postfix))
                writeHDF5(segm.astype('uint32'), out_segm_path, "segm_WS")

                if PREPARE_SUBMISSION:
                    from vaeAffs.postproc.utils import prepare_submission

                    path_bbox_slice = config["volume_config"][
                        "paths_padded_boxes"]
                    prepare_submission(sample,
                                       out_segm_path,
                                       inner_path_segm="segm_WS",
                                       path_bbox_slice=path_bbox_slice[sample],
                                       ds_factor=(1, 2, 2))
    glia_mask = readHDF5(glia_prediction_path, "glia_mask")

    for exp_name, invert in EXP_NAMES:
        pred_dir = os.path.join(project_dir, exp_name)

        for item in os.listdir(pred_dir):
            if os.path.isfile(os.path.join(pred_dir, item)):
                filename = item
                if not filename.endswith(".h5") or filename.startswith(
                        ".") or not filename.startswith("predictions_sample"):
                    continue
                pred_file = os.path.join(pred_dir, filename)

                # Load glia mask and predictions:
                # TODO: add crop slice
                print("Loading affs for ", exp_name)
                affs = readHDF5(pred_file, "data")

                print("Computing...")
                new_affs = compute_boundary_mask_from_label_image(
                    affs,
                    glia_mask,
                    invert_affs=invert,
                    offsets=offsets,
                    combination_method=combination_method,
                    set_invalid_values_to=-1)

                print("Saving...")
                writeHDF5(new_affs, pred_file,
                          "affs_plus_glia_{}".format(combination_method))
Пример #10
0
    affs[affs_valid_mask==0] = 1

    # Combine left and right affinities:
    segment_mask = np.logical_and(affs[0], affs[1])

    # This functions erode binary out_mask (segments 1, boundary 0)
    eroded_segment_mask = segment_mask.copy()
    for z in range(eroded_segment_mask.shape[0]):
        eroded_segment_mask[z] = vigra.filters.multiBinaryErosion(segment_mask[z], radius=2.)
    boundary_mask = np.logical_not(eroded_segment_mask)

    BOUNDARY_LABEL = 2
    DEFECTED_LABEL = 3
    out_mask = glia.copy()
    out_mask[boundary_mask] = BOUNDARY_LABEL

    # Mask defected slices:
    for slc in defected_slices[sample]:
        out_mask[parse_data_slice(slc)] = DEFECTED_LABEL


    # Copy GT from previous (to avoid weird connected components problems):
    for z in copy_from_previous[sample]:
        GT[z] = GT[z-1]

    print("Now writing...")

    from segmfriends.utils.various import writeHDF5
    writeHDF5(out_mask, data_path, 'volumes/labels/various_masks')
    writeHDF5(GT, data_path, 'volumes/labels/neuron_ids_fixed')
    with open(csv_file_path, mode='w') as f:
        employee_writer = csv.writer(f,
                                     delimiter=';',
                                     quotechar='"',
                                     quoting=csv.QUOTE_MINIMAL)
        for i in range(3):
            employee_writer.writerow(
                [str(crop_slice[i].start),
                 str(crop_slice[i].stop)])

    print(crop_slice)
    GT = GT[crop_slice]

    # Write affs and mask in target file:
    target_path = os.path.join(
        get_trendytukan_drive_path(),
        "datasets/CREMI/padded_data/cropped_aligned_samples/sample_{}.h5".
        format(sample))
    target_path_2x = os.path.join(
        get_trendytukan_drive_path(),
        "datasets/CREMI/padded_data/cropped_aligned_samples/sample_{}_2x.h5".
        format(sample))

    raw = readHDF5(source_path, "volumes/raw", crop_slice=crop_slice)
    print("Saving...")
    writeHDF5(GT, target_path, gt_inner_path)
    writeHDF5(raw, target_path, "volumes/raw")
    print("Downscaling and saving...")
    writeHDF5(zoom(GT, (1, 0.5, 0.5), order=0), target_path_2x, gt_inner_path)
    writeHDF5(zoom(raw, (1, 0.5, 0.5), order=3), target_path_2x, "volumes/raw")
    print("Saving...")
    # target_path_old = os.path.join(get_hci_home_path(),
    #                            "datasets/CREMI/official_test_samples/cropped_aligned_samples/sample{}_cropped{}.h5".format(
    #                                sample, POSTFIX))
    target_path = os.path.join(get_hci_home_path(),
                             "datasets/CREMI/official_test_samples/cropped_aligned_samples/sample{}_cropped{}.h5".format(sample, POSTFIX))

    # if include_affs:
    #     affs_path = os.path.join(get_trendytukan_drive_path(), "datasets/CREMI/constantin_affs/test_samples/sample{}.h5".format(sample))
    #     affs_inner_path = "affinities"
    #     affs = readHDF5(affs_path, affs_inner_path, crop_slice=(slice(None), ) + crop_slice)
    #     writeHDF5(affs, target_path, "volumes/affinities")

    # raw = readHDF5(source_path, "volumes/raw")
    # # raw = readHDF5(target_path_old, "volumes/raw_2x")
    # if sample in blacked_out:
    #     for blk in blacked_out[sample]:
    #         print("blacking out ", blk)
    #         raw[blk] = 0


    # mask_gt = readHDF5(source_path, mask_inner_path, dtype="uint16", crop_slice=crop_slice)
    # writeHDF5(raw, target_path, "volumes/raw")
    # writeHDF5(mask_big_pad.astype('uint16'), target_path, "volumes/labels/mask_gt")
    # writeHDF5(mask_gt, target_path, "volumes/labels/mask_gt")

    if downscale:
        writeHDF5(zoom(mask_big_pad, (1, 0.5, 0.5), order=0), target_path, "volumes/labels/mask_raw_2x")
        # writeHDF5(zoom(raw, (1, 0.5, 0.5), order=3), target_path, "volumes/raw_2x")
        # writeHDF5(raw, target_path, "volumes/raw_2x")
    print("Saving...")
    target_path = os.path.join(
        get_trendytukan_drive_path(),
        "datasets/CREMI/official_test_samples/cropped_aligned_samples/sample{}_cropped.h5"
        .format(sample))

    if include_affs:
        affs_path = os.path.join(
            get_trendytukan_drive_path(),
            "datasets/CREMI/constantin_affs/test_samples/sample{}.h5".format(
                sample))
        affs_inner_path = "affinities"
        affs = readHDF5(affs_path,
                        affs_inner_path,
                        crop_slice=(slice(None), ) + crop_slice)
        writeHDF5(affs, target_path, "volumes/affinities")

    raw = readHDF5(source_path, "volumes/raw", crop_slice=crop_slice)
    if sample is blacked_out:
        for blk in blacked_out[sample]:
            raw[blk] = 0

    mask_gt = readHDF5(source_path,
                       mask_inner_path,
                       dtype="uint16",
                       crop_slice=crop_slice)
    writeHDF5(raw, target_path, "volumes/raw")
    writeHDF5(mask_big_pad[crop_slice].astype('uint16'), target_path,
              "volumes/labels/mask_big_pad")
    writeHDF5(mask_gt, target_path, "volumes/labels/mask_gt")
Пример #14
0
    # Combine left and right affinities:
    segment_mask = np.logical_and(affs[0], affs[1])

    # This functions erode binary out_mask (segments 1, boundary 0)
    eroded_segment_mask = segment_mask.copy()
    for z in range(eroded_segment_mask.shape[0]):
        eroded_segment_mask[z] = vigra.filters.multiBinaryErosion(
            segment_mask[z], radius=1.)
    boundary_mask = np.logical_not(eroded_segment_mask)

    out_mask = glia.copy()
    out_mask[boundary_mask] = BOUNDARY_LABEL
    # # Make sure not to have added some boundary inside glia:
    # out_mask[glia == GLIA_LABEL] = GLIA_LABEL

    # Mask defected slices:
    for slc in defected_slices[sample]:
        out_mask[parse_data_slice(slc)] = DEFECTED_LABEL

    # # Copy GT from previous (to avoid weird connected components problems):
    # for z in copy_from_previous[sample]:
    #     GT[z] = GT[z-1]

    print("Now writing...")

    from segmfriends.utils.various import writeHDF5
    writeHDF5(out_mask, data_path,
              'volumes/labels/various_masks_noDefects_thinBound')
    # writeHDF5(GT, data_path, 'volumes/labels/neuron_ids_fixed')
    "A": [],
    "B": [15, 16, 44, 45],
    "C": [14, 74],
}

for sample in ["A", "B", "C"]:
    # -----------
    # LOAD data
    # -----------

    data_path = os.path.join(
        get_abailoni_hci_home_path(),
        "../ialgpu1_local_home/datasets/cremi/SOA_affinities/sample{}_train.h5"
        .format(sample))

    old_raw = True
    legacy_mod = "_OLD" if sample == "B" and old_raw else ""
    with h5py.File(data_path, 'r') as f:
        GT = f['segmentations/groundtruth_plus_boundary' + legacy_mod][:]

    # Max int32 value - 1:
    DEFECTED_VALUE = 2147483647 - 1

    for slc_idx in defected_slices[sample]:
        GT[slc_idx] = DEFECTED_VALUE

    from segmfriends.utils.various import writeHDF5

    writeHDF5(GT, data_path,
              'segmentations/groundtruth_plus_boundary_defectMod' + legacy_mod)