Beispiel #1
0
def test_Format():
    # Assert that the arguments are appended in the right order
    fmt = "{},{},{},{},{},{},{a},{b},{c},{d}"
    args = (1, 2, 3)
    _args = (4, 5, 6)
    _kwargs = {"a": 7, "b": 8}
    kwargs = {"c": 9, "d": 10}

    with Pipeline() as pipeline:
        result = Format(fmt, *args, _args=_args, _kwargs=_kwargs, **kwargs)

    stream = pipeline.transform_stream()
    obj = next(stream)

    assert obj[result] == "1,2,3,4,5,6,7,8,9,10"

    # Assert that the keyword arguments replace as expected
    fmt = "{a},{b}"
    _kwargs = {"a": 1, "b": 2}
    kwargs = {"a": 3, "b": 4}

    with Pipeline() as pipeline:
        result = Format(fmt, _kwargs=_kwargs, **kwargs)

    stream = pipeline.transform_stream()
    obj = next(stream)

    assert obj[result] == "3,4"
Beispiel #2
0
def test_ecotaxa(tmp_path):
    archive_fn = tmp_path / "ecotaxa.zip"
    print(archive_fn)

    # Create an archive
    with Pipeline() as p:
        i = Unpack(range(10))

        meta = Call(dict, i=i, foo="Sömé UTF-8 ſtríng…")
        image = BinaryBlobs()
        image_name = Format("image_{}.png", i)

        EcotaxaWriter(archive_fn, (image_name, image), meta)

    result = [o.to_dict(meta=meta, image=image) for o in p.transform_stream()]

    # Read the archive
    with Pipeline() as p:
        image, meta = EcotaxaReader(archive_fn)

    roundtrip_result = [
        o.to_dict(meta=meta, image=image) for o in p.transform_stream()
    ]

    for meta_field in ("i", "foo"):
        assert [o["meta"][meta_field] for o in result
                ] == [o["meta"][meta_field] for o in roundtrip_result]

    assert_equal([o["image"] for o in result],
                 [o["image"] for o in roundtrip_result])
Beispiel #3
0
def test_ecotaxa(tmp_path, ext):
    archive_fn = tmp_path / ("ecotaxa" + ext)
    print(archive_fn)

    # Create an archive
    with Pipeline() as p:
        i = Unpack(range(10))

        meta = Call(dict, i=i, foo="Sömé UTF-8 ſtríng…")
        image = BinaryBlobs()
        image_name = Format("image_{}.png", i)

        EcotaxaWriter(
            archive_fn,
            (image_name, image),
            meta,
            object_meta={"foo": 0},
            acq_meta={"foo": 1},
            process_meta={"foo": 2},
            sample_meta={"foo": 3},
        )

    result = [o.to_dict(meta=meta, image=image) for o in p.transform_stream()]

    # Read the archive
    with Pipeline() as p:
        image, meta = EcotaxaReader(archive_fn)

    roundtrip_result = [
        o.to_dict(meta=meta, image=image) for o in p.transform_stream()
    ]

    for meta_field in ("i", "foo"):
        assert [o["meta"][meta_field] for o in result
                ] == [o["meta"][meta_field] for o in roundtrip_result]

    for i, prefix in enumerate(("object_", "acq_", "process_", "sample_")):
        assert [o["meta"][prefix + "foo"]
                for o in result] == [i for _ in roundtrip_result]

    assert_equal([o["image"] for o in result],
                 [o["image"] for o in roundtrip_result])
Beispiel #4
0
"""Process video data using MorphoCut and store frames as individual frames."""

from morphocut import Pipeline
from morphocut.image import ImageWriter
from morphocut.pims import VideoReader
from morphocut.str import Format

with Pipeline() as p:
    # Read individual frames from a video file
    frame = VideoReader("/path/to/video.avi")

    # Format filename
    filename = Format("/output/path/frame_#{}.png", frame.frame_no)

    # Write individual frames as image files
    ImageWriter(filename, frame)

p.run()
    # Show progress bar for frames
    #TQDM(Format("Frame {name}", name=name))
    
    # Apply running median to approximate the background image
    flat_field = RunningMedian(img, 5)

    # Correct image
    img = img / flat_field
    
    # Rescale intensities and convert to uint8 to speed up calculations
    img = RescaleIntensity(img, in_range=(0, 1.1), dtype="uint8")
    
    FilterVariables(name,img)
    #
    frame_fn = Format(os.path.join(CLEAN, "{name}.jpg"), name=name)
    ImageWriter(frame_fn, img)
    
    # Convert image to uint8 gray
    img_gray = RGB2Gray(img)
    
    # ?
    img_gray = Call(img_as_ubyte, img_gray)

    #Canny edge detection
    img_canny = Call(cv2.Canny, img_gray, 50,100)

    #Dilate
    kernel = Call(cv2.getStructuringElement, cv2.MORPH_ELLIPSE, (15, 15))
    img_dilate = Call(cv2.dilate, img_canny, kernel, iterations=2)
    
Beispiel #6
0
        image = ImageReader(path)

        # Do some thresholding
        mask = image < 128

        # Find regions in the image
        region = FindRegions(mask, image)

        # Extract just the object
        roi_image = region.intensity_image

        # An object is identified by its label
        roi_label = region.label

        # Calculate a filename for the ROI image:
        # "RUNNING_NUMBER-SOURCE_BASENAME-ROI_LABEL"
        roi_name = Format(
            "{:d}-{}-{:d}.jpg", running_number, source_basename, roi_label
        )

        meta = CalculateZooProcessFeatures(region, prefix="object_")
        # End of parallel execution

    # Store results
    EcotaxaWriter("archive.zip", (roi_name, roi_image), meta)

# After the Pipeline was defined, it can be executed.
# A stream is created and transformed by the operations
# defined in the Pipeline.
p.run()
Beispiel #7
0
        TQDM(lst_fn)

        obj = FlowCamReader(lst_fn)

        img = obj.image
        img_gray = RGB2Gray(img, True)

        mask = obj.mask

        regionprops = ImageProperties(mask, img_gray)

        object_meta = obj.data

        object_id = Format("{lst_name}_{id}",
                           lst_name=obj.lst_name,
                           _kwargs=object_meta)
        object_meta["id"] = object_id
        object_meta = CalculateZooProcessFeatures(regionprops, object_meta)

        EcotaxaWriter(
            os.path.join(export_path, "export.zip"),
            [
                (Format("{object_id}.jpg", object_id=object_id), img),
                (Format("{object_id}_gray.jpg",
                        object_id=object_id), img_gray),
                (Format("{object_id}_mask.jpg", object_id=object_id), mask),
            ],
            object_meta=object_meta,
        )
Beispiel #8
0
                    abs_path)

        # Read image
        img = ImageReader(abs_path)

        # Apply running median to approximate the background image
        flat_field = RunningMedian(img, 10)

        # Correct image
        img = img / flat_field

        # Rescale intensities and convert to uint8 to speed up calculations
        img = RescaleIntensity(img, in_range=(0, 1.1), dtype="uint8")

        # Show progress bar for frames
        TQDM(Format("Frame {name}", name=name))

        # Convert image to uint8 gray
        img_gray = RGB2Gray(img)
        img_gray = Call(img_as_ubyte, img_gray)

        # Apply threshold find objects
        threshold = 204  # Call(skimage.filters.threshold_otsu, img_gray)
        mask = img_gray < threshold

        # Write corrected frames
        frame_fn = Format(os.path.join(export_path, "{name}.jpg"), name=name)
        ImageWriter(frame_fn, img)

        # Find objects
        regionprops = FindRegions(mask,
Beispiel #9
0
        # TODO: Draw object info

        # Extract a vignette from the image
        vignette = ExtractROI(img, regionprops)

        # # Extract features from vignette
        # model = resnet18(pretrained=True)
        # model = torch.nn.Sequential(OrderedDict(
        #     list(model.named_children())[:-2]))

        # features = PyTorch(lambda x: model(x).cpu().numpy())(vignette)

        i = Enumerate()
        object_id = Format(
            "{sample_id}_{sample_split:d}_{sample_nsplit:d}_{sample_subid}_{i:d}",
            _kwargs=meta,
            i=i,
        )
        meta["object_id"] = object_id
        meta = CalculateZooProcessFeatures(regionprops, meta, "object_")

        # EcotaxaWriter(
        #     os.path.join(import_path, "export.zip"), "{object_id}.jpg",
        #     vignette, meta
        # )

        TQDM(object_id)

    p.run()
Beispiel #10
0
from morphocut.image import ImageWriter, RescaleIntensity
from morphocut.pims import BioformatsReader
from morphocut.str import Format
from morphocut.stream import TQDM, Slice, Pack

if __name__ == "__main__":
    with Pipeline() as p:
        input_fn = "/home/moi/Work/0-Datasets/06_CD20_brightfield_6.cif"

        frame, series = BioformatsReader(input_fn, meta=False)

        # Every second frame is in fact a mask
        # TODO: Batch consecutive objects in the stream

        frame, mask = Pack(2, frame).unpack(2)

        image_fn = Format("/tmp/cif/{}-img.png", series)
        mask_fn = Format("/tmp/cif/{}-mask.png", series)

        frame = RescaleIntensity(frame, dtype=np.uint8)
        mask = RescaleIntensity(mask, dtype=np.uint8)

        ImageWriter(image_fn, frame)
        ImageWriter(mask_fn, mask)

        TQDM()

    print(p)

    p.run()
    # Find objects
    regionprops = FindRegions(mask,
                              img_gray,
                              min_area=1000,
                              padding=10,
                              warn_empty=name)

    Call(rgb, 255, 0, 255)
    # For an object, extract a vignette/ROI from the image
    roi_orig = ExtractROI(img, regionprops, bg_color=255)

    # Generate an object identifier
    i = Enumerate()
    #Call(print,i)

    object_id = Format("{name}_{i:d}", name=name, i=i)
    #Call(print,object_id)

    object_fn = Format(os.path.join("/home/pi/PlanktonScope/", "OBJECTS",
                                    "{name}.jpg"),
                       name=object_id)

    ImageWriter(object_fn, roi_orig)

    # Calculate features. The calculated features are added to the global_metadata.
    # Returns a Variable representing a dict for every object in the stream.
    meta = CalculateZooProcessFeatures(regionprops,
                                       prefix="object_",
                                       meta=global_metadata)

    #     json_meta = Call(json.dumps,meta, sort_keys=True, default=str)