Пример #1
0
def test_locate_module_and_cells_performance_caipB():
    # load data
    seq1, seq2, anns = caip_dataB()

    # perform detection
    seq1, boxes1 = locate_module_and_cells(seq1,
                                           estimate_distortion=False,
                                           return_bounding_boxes=True)
    seq2, boxes2 = locate_module_and_cells(seq2,
                                           estimate_distortion=False,
                                           return_bounding_boxes=True)

    # check IoU > 0.93
    ious = list()
    recalls = list()
    for img in seq1:
        iou, _, rec = objdetect_metrics(anns[img.path.name],
                                        boxes1[img.path.name],
                                        iou_thresh=CAIP_THRESH)
        ious.append(iou)
        recalls.append(rec)
    for img in seq2:
        iou, _, rec = objdetect_metrics(anns[img.path.name],
                                        boxes2[img.path.name],
                                        iou_thresh=CAIP_THRESH)
        ious.append(iou)
        recalls.append(rec)

    x = np.mean(recalls, axis=0)
    auc = metrics.auc(CAIP_THRESH, x)

    # TODO: Check, why this performs worse than in paper
    assert auc > 0.06
Пример #2
0
def test_locate_partial_module():
    img = detection.locate_module_and_cells(data.datasets.poly10x6(1)[0])
    part = detection.segment_module_part(img, 0, 0, 3, 2, padding=0.5)
    part_det = detection.locate_module_and_cells(part, False)

    x1_true = [part_det.shape[1] * 1 / 8, part_det.shape[0] * 1 / 6]
    x2_true = [part_det.shape[1] * 7 / 8, part_det.shape[0] * 5 / 6]
    x1 = part_det.get_meta("transform")(np.array([[0.0, 0.0]])).flatten()
    x2 = part_det.get_meta("transform")(np.array([[3.0, 2.0]])).flatten()
    eps = 0.05 * part_det.shape[1]

    assert_equal(x1[0], x1_true[0], eps)
    assert_equal(x1[1], x1_true[1], eps)
    assert_equal(x2[0], x2_true[0], eps)
    assert_equal(x2[1], x2_true[1], eps)
Пример #3
0
def locate_and_stitch_modules(
    images: ImageSequence,
    rows: int,
    cols: int,
    n_horizontal: int = 1,
    n_vertical: int = 1,
    overlap_horizontal: int = 0,
    overlap_vertical: int = 0,
    enable_background_suppression: bool = True,
    equalize_intensity: bool = False,
) -> Image:
    """Locate and stitch partial recordings of a module

    This method applies localization of modules followed by stitching. It relies on an exact specification
    of the visible module geometry (by means of rows, cols, n_horizontal, n_vertical, overlap_horizontal,
    overlap_vertical). Furthermore images need to be given in the specified order.

    This method optionally provides adaptation of intensities of the partial recordings. This is turned off
    by default, since it changes the original intensities, which might be undesirable in some cases.

    Args:
        images (ImageSequence): Partial recordings to be stitched together (needs to be order left -> right / top -> bottom)
        rows (int): Number of fully visible rows of cells in every partial recording
        cols (int): Number of fully visible columns of cells in every partial recording
        n_horizontal (int): Number of partial recordings in horizontal direction
        n_vertical (int): Number of partial recordings in vertical direction
        overlap_horizontal (int): Number of fully visible cells that overlap between any two partial recordings
            in horizontal direction
        overlap_vertical (int): Number of fully visible cells that overlap between any two partial recordings
            in vertical direction
        enable_background_suppression (bool): Enable the background suppression for the module detection. This sometimes causes
            problems with PL images and disabling it might help.
        equalize_intensity (bool): Match the median intensity of every partial recording to the median intensity of the first

    Returns:
        image (Image): The stitched image
    """

    modimages = ModuleImageSequence([
        ModuleImage(x.data, modality=Modality.EL_IMAGE, cols=cols, rows=rows)
        for x in images
    ])

    # locate
    modimages = locate_module_and_cells(
        modimages,
        orientation="horizontal",
        enable_background_suppresion=enable_background_suppression,
    )

    # stitch
    return stitch_modules(
        modimages,
        n_horizontal,
        n_vertical,
        overlap_horizontal,
        overlap_vertical,
        equalize_intensity,
    )
Пример #4
0
def test_save_cell_images(tmp_path):
    cells = segment_cells(locate_module_and_cells(datasets.poly10x6(1)[0]))
    save_images(tmp_path, cells)

    for cell in cells:
        p = tmp_path / "{}_row{:02d}_col{:02d}{}".format(
            cell.path.stem, cell.row, cell.col, cell.path.suffix)
        img_read = read_module_image(p, EL_IMAGE)
Пример #5
0
def test_segment_cells_single_image():
    seq = data.datasets.poly10x6(1)
    seq = detection.locate_module_and_cells(seq, True)
    cells = detection.segment_cells(seq[0])

    assert isinstance(cells, CellImageSequence)
    assert len(cells) == 60
    assert isinstance(cells[0], CellImage)
Пример #6
0
def test_segment_padding_transform():
    img = detection.locate_module_and_cells(data.datasets.poly10x6(1)[0])
    part = detection.segment_module_part(img, 0, 0, 3, 2, padding=0.5, size=20)
    x1 = part.get_meta("transform")(np.array([[0.0, 1.0]])).flatten()
    x2 = part.get_meta("transform")(np.array([[2.0, 3.0]])).flatten()
    assert_equal(x1[0], 10)
    assert_equal(x1[1], 30)
    assert_equal(x2[0], 50)
    assert_equal(x2[1], 70)
Пример #7
0
def test_segment_module_part():
    mod = data.datasets.poly10x6(1)[0]
    mod = detection.locate_module_and_cells(mod)
    part = detection.segment_module_part(mod, 1, 2, 2, 3)

    assert_equal(part.shape[1], 2 / 3 * part.shape[0], 0.1)
    assert part.first_col == 1
    assert part.first_row == 2
    assert part.cols == 2
    assert part.rows == 3
Пример #8
0
def test_save_images_filename_hook(tmp_path: Path):
    cells = segment_cells(locate_module_and_cells(datasets.poly10x6(1)[0]))
    hook = lambda x: "{}_{:02d}{:02d}{}".format(x.path.stem, x.row, x.col, x.
                                                path.suffix)
    save_images(tmp_path, cells)

    for cell in cells:
        p = tmp_path / "{}_{:02d}{:02d}{}".format(cell.path.stem, cell.row,
                                                  cell.col, cell.path.suffix)
        # try read
        img_read = read_module_image(p, EL_IMAGE)
Пример #9
0
def test_segment_cells():
    seq = data.datasets.poly10x6(2)
    seq = detection.locate_module_and_cells(seq, True)
    cells = detection.segment_cells(seq)

    assert isinstance(cells, CellImageSequence)
    assert len(cells) == 120
    assert isinstance(cells[0], CellImage)
    assert cells[0].row == 0
    assert cells[0].col == 0
    assert cells[1].col == 1
    assert cells[11].row == 1
    assert cells[0].has_meta("segment_module_original")
    assert (cells[0].get_meta("segment_module_original").has_meta(
        "segment_module_original_box"))
Пример #10
0
def test_locate_full():
    seq = data.datasets.poly10x6(2)
    seq = detection.locate_module_and_cells(seq, True)

    assert isinstance(seq[0].get_meta("transform"), FullTransform)
    assert isinstance(seq[1].get_meta("transform"), FullTransform)
    assert seq[0].get_meta("transform").valid
    assert seq[1].get_meta("transform").valid

    # check correct origin
    x = seq[0].get_meta("transform")(np.array([[0.0, 0.0]])).flatten()
    assert x[0] > 1760 and x[0] < 1840
    assert x[1] > 80 and x[1] < 160
    x = seq[1].get_meta("transform")(np.array([[0.0, 0.0]])).flatten()
    assert x[0] > 1760 and x[0] < 1840
    assert x[1] > 80 and x[1] < 160
Пример #11
0
def test_single_segmentation():
    # load images
    img = datasets.poly10x6(N=1)[0]

    # perform detection
    module = locate_module_and_cells(img)

    # check result
    assert module.has_meta("transform")

    # check that show result does not fail
    module.show()

    # perform segmentation into cells
    cells = segment_cells(module)

    # check
    assert len(cells) == 60
Пример #12
0
def test_segment_modules():
    seq = data.datasets.poly10x6(2)
    seq = detection.locate_module_and_cells(seq, True)
    modules = detection.segment_modules(seq)

    assert isinstance(modules, ModuleImageSequence)
    assert isinstance(modules[0], ModuleImage)
    assert len(modules) == 2
    assert modules[0].path == seq[0].path
    assert modules.same_camera is False

    x = modules[0].get_meta("transform")(np.array([[0.0, 0.0]])).flatten()
    assert_equal(x[0], 0.0)
    assert_equal(x[1], 0.0)
    x = modules[0].get_meta("transform")(np.array([[10.0, 0.0]])).flatten()
    assert_equal(x[0], modules[0].shape[1])
    assert_equal(x[1], 0.0)
    x = modules[0].get_meta("transform")(np.array([[10.0, 6.0]])).flatten()
    assert_equal(x[0], modules[0].shape[1])
    assert_equal(x[1], modules[0].shape[0])
Пример #13
0
def test_multi_segmentation():
    # load images
    _, imgs = datasets.multi_module_detection(N=2)

    # perform multi module segmentation
    modules = locate_multiple_modules(imgs, rows=6, cols=10)

    # check result
    assert len(modules) == 12
    assert isinstance(modules[0].get_meta("multimodule_original"), Image)
    assert modules[0].get_meta("multimodule_original").path == imgs[0].path

    # check that plotting does not fail
    modules[0].get_meta("multimodule_original").show()

    # perform precise module localization and segmentation
    modules = locate_module_and_cells(modules)
    modules_crop = segment_modules(modules)

    # check result
    assert len(modules) == 12
    assert_equal(
        modules_crop[0].get_meta("transform")(np.array([[0.0, 0.0]])),
        np.array([0.0, 0.0]),
    )

    # make sure that original is preserved
    assert isinstance(modules_crop[0].get_meta("multimodule_original"), Image)
    assert modules_crop[0].get_meta(
        "multimodule_original").path == imgs[0].path

    # check show
    modules_crop.head()

    # check segmentation into cells
    cells = segment_cells(modules_crop[0])
    assert len(cells) == 60

    # make sure that original is preserved
    assert isinstance(cells[0].get_meta("multimodule_original"), Image)
    assert cells[0].get_meta("multimodule_original").path == imgs[0].path
Пример #14
0
def test_locate_module_and_cells_performance_caipD():
    # load data
    seq, anns = caip_dataD()

    # perform detection
    seq, boxes = locate_module_and_cells(seq,
                                         estimate_distortion=False,
                                         return_bounding_boxes=True)

    # check IoU > 0.95
    ious = list()
    recalls = list()
    for img in seq:
        iou, _, rec = objdetect_metrics(anns[img.path.name],
                                        boxes[img.path.name],
                                        iou_thresh=CAIP_THRESH)
        ious.append(iou)
        recalls.append(rec)

    x = np.mean(recalls, axis=0)
    auc = metrics.auc(CAIP_THRESH, x)

    assert auc > 0.07
Пример #15
0
def test_segment_padding():
    img = detection.locate_module_and_cells(data.datasets.poly10x6(1)[0])
    part = detection.segment_module_part(img, 0, 0, 3, 2, padding=0.5)
    assert_equal(part.shape[1] / part.shape[0], 8 / 6)
Пример #16
0
def test_segment_size():
    img = detection.locate_module_and_cells(data.datasets.poly10x6(1)[0])
    part = detection.segment_module_part(img, 1, 3, 2, 1, size=20)
    assert_equal(part.shape[1], 2 * 20)
    assert_equal(part.shape[0], 1 * 20)