Esempio n. 1
0
def test_Find(data_path):
    d = data_path / "images"
    with Pipeline() as pipeline:
        result = Find(d, [".png"])

    stream = pipeline.transform_stream()
    pipeline.run()
Esempio n. 2
0
def test_Find(data_path, sort, verbose, capsys):
    d = data_path / "images"
    with Pipeline() as pipeline:
        filename = Find(d, [".png"], sort, verbose)

    stream = pipeline.transform_stream()

    filenames = [o[filename] for o in stream]

    if sort:
        assert filenames == sorted(filenames)

    if verbose:
        out = capsys.readouterr().out
        assert re.search(r"^Found \d+ files in .+\.$", out)
Esempio n. 3
0
    "acq_instrument_ID": "copepode",
    "acq_volume": 24,
    "acq_flowrate": "Unknown",
    "acq_camera.resolution" : "(3280, 2464)",
    "acq_camera.iso" : 60,
    "acq_camera.shutter_speed" : 100,
    "acq_camera.exposure_mode" : 'off',
    "acq_camera.awb_mode" : 'off',
    "acq_nb_frames" : 1000
}

# Define processing pipeline
with Pipeline() as p:
    # Recursively find .jpg files in import_path.
    # Sort to get consective frames.
    abs_path = Find(import_path, [".jpg"], sort=True, verbose=True)

    # Extract name from abs_path
    name = Call(lambda p: os.path.splitext(os.path.basename(p))[0], abs_path)

    # Read image
    img = ImageReader(abs_path)

    # Show progress bar for frames
    #TQDM(Format("Frame {name}", name=name))
    
    # Apply running median to approximate the background image
    flat_field = RunningMedian(img, 5)

    # Correct image
    img = img / flat_field
Esempio n. 4
0
from morphocut.file import Find
from morphocut.image import ExtractROI, ImageProperties, ThresholdConst, RGB2Gray
from morphocut.pandas import JoinMetadata, PandasWriter
from morphocut.str import Format, Parse
from morphocut.stream import TQDM, Enumerate, PrintObjects, StreamBuffer
from morphocut.contrib.zooprocess import CalculateZooProcessFeatures
from morphocut.integration.flowcam import FlowCamReader

import_path = "../../morphocut/tests/data/flowcam"
export_path = "/tmp/flowcam"

if __name__ == "__main__":
    print("Processing images under {}...".format(import_path))

    with Pipeline() as p:
        lst_fn = Find(import_path, [".lst"])

        TQDM(lst_fn)

        obj = FlowCamReader(lst_fn)

        img = obj.image
        img_gray = RGB2Gray(img, True)

        mask = obj.mask

        regionprops = ImageProperties(mask, img_gray)

        object_meta = obj.data

        object_id = Format("{lst_name}_{id}",
Esempio n. 5
0
from morphocut.str import Format, Parse
from morphocut.stream import TQDM, Enumerate, PrintObjects, StreamBuffer
from morphocut.zooprocess import CalculateZooProcessFeatures

# import_path = "/data-ssd/mschroeder/Datasets/generic_zooscan_peru_kosmos_2017"
import_path = "/home/moi/Work/0-Datasets/generic_zooscan_peru_kosmos_2017"
# import_path = '/studi-tmp/mkhan/generic_zooscan_peru_kosmos_2017'

if __name__ == "__main__":
    image_root = os.path.join(import_path, "raw")
    print("Processing images under {}...".format(image_root))

    with Pipeline() as p:
        # Images are named <sampleid>/<anything>_<a|b>.tif
        # e.g. generic_Peru_20170226_slow_M1_dnet/Peru_20170226_M1_dnet_1_8_a.tif
        abs_path = Find(image_root, [".tif"])

        rel_path = Call(os.path.relpath, abs_path, image_root)
        meta = Parse(
            "generic_{sample_id}/{:greedy}_{sample_split:d}_{sample_nsplit:d}_{sample_subid}.tif",
            rel_path,
        )

        meta = JoinMetadata(
            os.path.join(import_path,
                         "Morphocut_header_scans_peru_kosmos_2017.xlsx"),
            meta,
            "sample_id",
        )

        PandasWriter(
Esempio n. 6
0
                os.makedirs(ANNOTATED, exist_ok=True)

                OBJECTS = os.path.join(path_time, "OBJECTS")
                os.makedirs(OBJECTS, exist_ok=True)

                archive_fn = os.path.join(
                    directory, "export",
                    str(date) + "_" + str(time) + "_ecotaxa_export.zip")

                client.publish("receiver/segmentation", "Start")
                # Define processing pipeline
                # Define processing pipeline
                with Pipeline() as p:
                    # Recursively find .jpg files in import_path.
                    # Sort to get consective frames.
                    abs_path = Find(RAW, [".jpg"], sort=True, verbose=True)

                    FilterVariables(abs_path)

                    # Extract name from abs_path
                    name = Call(
                        lambda p: os.path.splitext(os.path.basename(p))[0],
                        abs_path)

                    Call(rgb, 0, 255, 0)
                    # Read image
                    img = ImageReader(abs_path)

                    # Show progress bar for frames
                    #TQDM(Format("Frame {name}", name=name))
    "acq_camera_shutter_speed": camera.shutter_speed
}

config_txt = open('/home/pi/PlanktonScope/config.txt', 'r')
node_red_metadata = json.loads(config_txt.read())

global_metadata = {**local_metadata, **node_red_metadata}

archive_fn = os.path.join("/home/pi/PlanktonScope/", "export",
                          "ecotaxa_export.zip")
# Define processing pipeline
with Pipeline() as p:
    # Recursively find .jpg files in import_path.
    # Sort to get consective frames.
    abs_path = Find("/home/pi/PlanktonScope/tmp", [".jpg"],
                    sort=True,
                    verbose=True)

    # Extract name from abs_path
    name = Call(lambda p: os.path.splitext(os.path.basename(p))[0], abs_path)

    Call(rgb, 0, 255, 0)
    # Read image
    img = ImageReader(abs_path)

    # Show progress bar for frames
    #TQDM(Format("Frame {name}", name=name))

    # Apply running median to approximate the background image
    flat_field = RunningMedian(img, 5)