def test_manual_image_creation_from_file(self):

        from jicbioimage.core.image import Image

        # Preamble: let us define the path to a TIFF file and create a numpy
        # array from it.
        #       from libtiff import TIFF
        #       tif = TIFF.open(path_to_tiff, 'r')
        #       ar = tif.read_image()
        path_to_tiff = os.path.join(DATA_DIR, 'single-channel.ome.tif')
        use_plugin('freeimage')
        ar = imread(path_to_tiff)

        # It is possible to create an image from a file.
        image = Image.from_file(path_to_tiff)
        self.assertEqual(len(image.history), 0)
        self.assertEqual(image.history.creation,
                         'Created Image from {}'.format(path_to_tiff))

        # With name...
        image = Image.from_file(path_to_tiff, name='Test1')
        self.assertEqual(image.history.creation,
                         'Created Image from {} as Test1'.format(path_to_tiff))

        # Without history...
        image = Image.from_file(path_to_tiff, log_in_history=False)
        self.assertEqual(len(image.history), 0)

        # It is worth noting the image can support more multiple channels.
        # This is particularly important when reading in images in rgb format.
        fpath = os.path.join(DATA_DIR, 'tjelvar.png')
        image = Image.from_file(fpath)
        self.assertEqual(image.shape, (50, 50, 3))
    def test_scaling_of_written_files(self):
        from jicbioimage.core.image import Image3D, Image
        directory = os.path.join(TMP_DIR, "im3d")

        z0 = np.zeros((50,50), dtype=np.uint8)
        z1 = np.ones((50, 50), dtype=np.uint8)

        stack = np.dstack([z0, z1])
        im3d = Image3D.from_array(stack)
        im3d.to_directory(directory)

        im0 = Image.from_file(os.path.join(directory, "z0.png"))
        im1 = Image.from_file(os.path.join(directory, "z1.png"))

        self.assertTrue(np.array_equal(z0, im0))
        self.assertTrue(np.array_equal(z1*255, im1))

        z2 = np.ones((50, 50), dtype=np.uint8) * 255

        stack = np.dstack([z0, z1, z2])
        im3d = Image3D.from_array(stack)
        im3d.to_directory(directory)

        im0 = Image.from_file(os.path.join(directory, "z0.png"))
        im1 = Image.from_file(os.path.join(directory, "z1.png"))
        im2 = Image.from_file(os.path.join(directory, "z2.png"))

        self.assertTrue(np.array_equal(z0, im0))
        self.assertTrue(np.array_equal(z1, im1))
        self.assertTrue(np.array_equal(z2*255, im1))
    def test_scaling_of_written_files(self):
        from jicbioimage.core.image import Image3D, Image
        directory = os.path.join(TMP_DIR, "im3d")

        z0 = np.zeros((50, 50), dtype=np.uint8)
        z1 = np.ones((50, 50), dtype=np.uint8)

        stack = np.dstack([z0, z1])
        im3d = Image3D.from_array(stack)
        im3d.to_directory(directory)

        im0 = Image.from_file(os.path.join(directory, "z0.png"))
        im1 = Image.from_file(os.path.join(directory, "z1.png"))

        self.assertTrue(np.array_equal(z0, im0))
        self.assertTrue(np.array_equal(z1 * 255, im1))

        z2 = np.ones((50, 50), dtype=np.uint8) * 255

        stack = np.dstack([z0, z1, z2])
        im3d = Image3D.from_array(stack)
        im3d.to_directory(directory)

        im0 = Image.from_file(os.path.join(directory, "z0.png"))
        im1 = Image.from_file(os.path.join(directory, "z1.png"))
        im2 = Image.from_file(os.path.join(directory, "z2.png"))

        self.assertTrue(np.array_equal(z0, im0))
        self.assertTrue(np.array_equal(z1, im1))
        self.assertTrue(np.array_equal(z2 * 255, im1))
def sample_image_from_lines(image_file, lines_file, dilation, reduce_method):

    data_image = Image.from_file(image_file)
    line_image = Image.from_file(lines_file)

    segmented_lines = segment(line_image, dilation)

    with open("all_series.csv", "w") as fh:
        fh.write(csv_header())
        for n, line_region in enumerate(yield_line_masks(segmented_lines)):
            line_intensity = data_image * line_region
            if reduce_method == "max":
                line_profile = np.amax(line_intensity, axis=1)
            elif reduce_method == "mean":
                sum_intensity = np.sum(line_intensity, axis=1)
                sum_rows = np.sum(line_region, axis=1)
                line_profile = sum_intensity / sum_rows
            else:
                err_msg = "Unknown reduce method: {}".format(reduce_method)
                raise(RuntimeError(err_msg))

            series_filename = "series_{:02d}.csv".format(n)
            save_line_profile(series_filename, line_profile, n)

            fh.write(csv_body(line_profile, n))
    def test_manual_image_creation_from_file(self):

        from jicbioimage.core.image import Image

        # Preamble: let us define the path to a TIFF file and create a numpy
        # array from it.
#       from libtiff import TIFF
#       tif = TIFF.open(path_to_tiff, 'r')
#       ar = tif.read_image()
        path_to_tiff = os.path.join(DATA_DIR, 'single-channel.ome.tif')
        use_plugin('freeimage')
        ar = imread(path_to_tiff)


        # It is possible to create an image from a file.
        image = Image.from_file(path_to_tiff)
        self.assertEqual(len(image.history), 0)
        self.assertEqual(image.history.creation,
                         'Created Image from {}'.format(path_to_tiff))

        # With name...
        image = Image.from_file(path_to_tiff, name='Test1')
        self.assertEqual(image.history.creation,
                         'Created Image from {} as Test1'.format(path_to_tiff))

        # Without history...
        image = Image.from_file(path_to_tiff, log_in_history=False)
        self.assertEqual(len(image.history), 0)

        # It is worth noting the image can support more multiple channels.
        # This is particularly important when reading in images in rgb format.
        fpath = os.path.join(DATA_DIR, 'tjelvar.png')
        image = Image.from_file(fpath)
        self.assertEqual(image.shape, (50, 50, 3))
def find_kilobots(image_filename, output_filename):
    """Find kilobots in a still image file."""

    kilobot_image = Image.from_file(image_filename)
    red_only = kilobot_image[:,:,0]

    imsave('red.png', red_only)
    edges = find_edges(red_only)
    blurred = gaussian_filter(edges, sigma=2)

    # bot_template = blurred[135:185,485:535]

    # imsave('bot_template.png', bot_template)

    bot_template = load_bot_template('bot_template.png')

    match_result = skimage.feature.match_template(blurred, bot_template, pad_input=True)

    imsave('match_result.png', match_result)

    selected_area = match_result > 0.6

    imsave('selected_area.png', selected_area)

    ccs = find_connected_components(selected_area)
    centroids = component_centroids(ccs)

    return centroids
def find_grains(input_file, output_dir=None):
    """Return tuple of segmentaitons (grains, difficult_regions)."""
    name = fpath2name(input_file)
    name = "grains-" + name + ".png"
    if output_dir:
        name = os.path.join(output_dir, name)

    image = Image.from_file(input_file)
    intensity = mean_intensity_projection(image)
    image = remove_scalebar(intensity, np.mean(intensity))
    image = threshold_abs(image, 75)
    image = invert(image)
    image = fill_holes(image, min_size=500)
    image = erode_binary(image, selem=disk(4))
    image = remove_small_objects(image, min_size=500)
    image = dilate_binary(image, selem=disk(4))

    dist = distance(image)
    seeds = local_maxima(dist)
    seeds = dilate_binary(seeds)  # Merge spurious double peaks.
    seeds = connected_components(seeds, background=0)

    segmentation = watershed_with_seeds(dist, seeds=seeds, mask=image)
    # Need a copy to avoid over-writing original.
    initial_segmentation = np.copy(segmentation)

    # Remove spurious blobs.
    segmentation = remove_large_segments(segmentation, max_size=3000)
    segmentation = remove_small_segments(segmentation, min_size=500)
    props = skimage.measure.regionprops(segmentation)
    segmentation = remove_non_round(segmentation, props, 0.6)

    difficult = initial_segmentation - segmentation

    return segmentation, difficult
Beispiel #8
0
def analyse_file(fpath, output_directory):
    """Analyse a single file."""
    logging.info("Analysing file: {}".format(fpath))
    image = Image.from_file(fpath)

    image_output_fpath = os.path.join(output_directory, 'original.png')
    with open(image_output_fpath, 'wb') as fh:
        fh.write(image.png())

    segmentation = preprocess_and_segment(image)

    false_colour_fpath = os.path.join(output_directory, 'false_color.png')
    with open(false_colour_fpath, 'wb') as fh:
        fh.write(segmentation.png())

    rgb_segmentation_fpath = os.path.join(output_directory, 'segmentation.png')
    write_segmented_image_as_rgb(segmentation, rgb_segmentation_fpath)

    cell_info = parameterise_cells(segmentation)
    csv_fpath = os.path.join(output_directory, 'results.csv')
    write_cell_info_to_csv(cell_info, csv_fpath)

    label_image = generate_label_image(segmentation)
    label_image_fpath = os.path.join(output_directory, 'labels.png')
    with open(label_image_fpath, 'wb') as fh:
        fh.write(label_image.png())
def find_single_seed(image_filename, output_filename):

    image = Image.from_file(image_filename)

    w, h = 500, 500
    tube_section = image[1024-w:1024+w,1024-h:1024+h]

    threshold = threshold_otsu(tube_section)

    thresholded = tube_section > threshold

    x, y, r = find_inner_circle_parameters(thresholded, 400, 500)

    # FIXME - think routine is finding outer circle

    stripped = strip_outside_circle(thresholded, (x, y), 300)

    eroded = binary_erosion(stripped, structure=np.ones((10, 10)))

    float_coords = map(np.mean, np.where(eroded > 0))
    ix, iy = map(int, float_coords)

    w, h = 100, 100
    selected = tube_section[ix-w:ix+w,iy-h:iy+h]

    with open(output_filename, 'wb') as f:
        f.write(selected.view(Image).png())
def process_single_identifier(dataset, identifier, output_path):

    print("Processing {}".format(identifier))

    image = Image.from_file(dataset.abspath_from_identifier(identifier))

    seeds = generate_seed_image(image, dataset, identifier)

    segmentation = segment(image, seeds)
    segmentation = filter_sides(segmentation)
    segmentation = filter_touching_border(segmentation)

    output_filename = generate_output_filename(
        dataset,
        identifier,
        output_path,
        '-segmented'
    )
    save_segmented_image_as_rgb(segmentation, output_filename)

    false_colour_filename = generate_output_filename(
        dataset,
        identifier,
        output_path,
        '-false_colour'
    )
    with open(false_colour_filename, 'wb') as fh:
        fh.write(segmentation.png())
def generate_composite_image(base_image, trajectory_image):
	still_image = Image.from_file(base_image)
	trajectories = Image.from_file(trajectory_image)[:,:,0]

	annotation_points = np.where(trajectories != 0)

	color = 255, 0, 0

	for x, y in zip(*annotation_points):
		still_image[x, y] = color
		still_image[x+1, y] = color
		still_image[x-1, y] = color
		still_image[x, y+1] = color
		still_image[x, y-1] = color								

	imsave('annotated_image.png', still_image)
def separate_plots(dataset, identifier, resource_dataset, working_dir):

    fpath = dataset.item_content_abspath(identifier)
    segmentation = load_segmentation_from_rgb_image(fpath)

    original_id = dataset.get_overlay('from')[identifier]
    original_fpath = resource_dataset.item_content_abspath(original_id)
    original_image = Image.from_file(original_fpath)

    approx_plot_locs = find_approx_plot_locs(dataset, identifier)

    sid_to_label = generate_segmentation_identifier_to_label_map(
        approx_plot_locs,
        segmentation
    )

    outputs = []

    for identifier in segmentation.identifiers:

        image_section = generate_region_image(
            original_image,
            segmentation,
            identifier
        )

        fname = 'region_{}.png'.format(sid_to_label[identifier])
        output_fpath = os.path.join(working_dir, fname)

        imsave(output_fpath, image_section)

        outputs.append((fname, {'plot_number': sid_to_label[identifier]}))

    return outputs
def find_grains(input_file, output_dir=None):
    """Return tuple of segmentaitons (grains, difficult_regions)."""
    name = fpath2name(input_file)
    name = "grains-" + name + ".png"
    if output_dir:
        name = os.path.join(output_dir, name)

    image = Image.from_file(input_file)
    intensity = mean_intensity_projection(image)

# Median filter seems more robust than Otsu.
#   image = threshold_otsu(intensity)
    image = threshold_median(intensity, scale=0.8)

    image = invert(image)
    image = erode_binary(image, selem=disk(2))
    image = dilate_binary(image, selem=disk(2))
    image = remove_small_objects(image, min_size=200)
    image = fill_holes(image, min_size=50)

    dist = distance(image)
    seeds = local_maxima(dist)
    seeds = dilate_binary(seeds)  # Merge spurious double peaks.
    seeds = connected_components(seeds, background=0)

    segmentation = watershed_with_seeds(dist, seeds=seeds, mask=image)

    # Remove spurious blobs.
    segmentation = remove_large_segments(segmentation, max_size=3000)
    segmentation = remove_small_segments(segmentation, min_size=100)

    return segmentation
    def test_creating_transformations_from_scratch(self):

        # What if the default names of images was just the order in which they
        # were created?
        # Or perhaps the order + the function name, e.g.
        # 1_gaussian.png
        # 2_sobel.png
        # 3_gaussian.png
        # The order could be tracked in a class variable in an AutoName
        # object. The AutoName object could also store the output directory
        # as a class variable.

        from jicbioimage.core.image import Image
        from jicbioimage.core.transform import transformation
        from jicbioimage.core.io import AutoName
        AutoName.directory = TMP_DIR

        @transformation
        def identity(image):
            return image

        image = Image.from_file(os.path.join(DATA_DIR, 'tjelvar.png'))
        image = identity(image)
        self.assertEqual(len(image.history), 1, image.history)
        self.assertEqual(str(image.history[-1]), '<History.Event(identity(image))>')
        created_fpath = os.path.join(TMP_DIR, '1_identity.png')
        self.assertTrue(os.path.isfile(created_fpath),
                        'No such file: {}'.format(created_fpath))
def annotate_single_identifier(dataset, identifier, output_path):
    file_path = dataset.abspath_from_identifier(identifier)

    image = Image.from_file(file_path)
    grayscale = np.mean(image, axis=2)

    annotated = AnnotatedImage.from_grayscale(grayscale)
    xdim, ydim, _ = annotated.shape

    def annotate_location(fractional_coords):

        xfrac, yfrac = fractional_coords

        ypos = int(ydim * xfrac)
        xpos = int(xdim * yfrac)
        for x in range(-2, 3):
            for y in range(-2, 3):
                annotated.draw_cross(
                    (xpos+x, ypos+y),
                    color=(255, 0, 0),
                    radius=50
                )

    for loc in find_approx_plot_locs(dataset, identifier):
        annotate_location(loc)

    output_basename = os.path.basename(file_path)
    full_output_path = os.path.join(output_path, output_basename)
    with open(full_output_path, 'wb') as f:
        f.write(annotated.png())
def analyse_file(fpath, output_directory):
    """Analyse a single file."""
    logging.info("Analysing file: {}".format(fpath))

    AutoName.directory = output_directory

    image = Image.from_file(fpath)
    image = identity(image)
    def labels_to_joined_image(labels):
        images = []
        for plot_label in labels:
            image_fpath = dataset.item_content_abspath(label_to_id[plot_label])
            image = Image.from_file(image_fpath)
            images.append(image)

        return join_horizontally(images)
def load_and_downscale(input_filename):
    """Load the image, covert to grayscale and downscale as needed."""

    image = Image.from_file(input_filename)
    blue_channel = image[:,:,2]
    downscaled = downscale_local_mean(blue_channel, (2, 2))

    return downscaled
    def identifiers_to_joined_image(identifiers):
        images = []
        for identifier in identifiers:
            image_fpath = dataset.item_content_abspath(identifier)
            image = Image.from_file(image_fpath)
            images.append(image)

        return join_horizontally(images)
def annotate(input_file, output_dir):
    """Write an annotated image to disk."""
    logger.info("---")
    logger.info('Input image: "{}"'.format(os.path.abspath(input_file)))
    image = Image.from_file(input_file)
    intensity = mean_intensity_projection(image)
    norm_intensity = normalise(intensity)
    norm_rgb = np.dstack([norm_intensity, norm_intensity, norm_intensity])

    name = fpath2name(input_file)
    png_name = name + ".png"
    csv_name = name + ".csv"
    png_path = os.path.join(output_dir, png_name)
    csv_path = os.path.join(output_dir, csv_name)

    tubes = find_tubes(input_file, output_dir)
    grains, difficult = find_grains(input_file, output_dir)
    tubes = remove_tubes_not_touching_grains(tubes, grains)
    tubes = remove_tubes_that_are_grains(tubes, grains)

    ann = AnnotatedImage.from_grayscale(intensity)

    num_grains = 0
    for n, i in enumerate(grains.identifiers):
        n = n + 1
        region = grains.region_by_identifier(i)
        ann.mask_region(region.inner.inner.inner.border.dilate(),
                        color=(0, 255, 0))
        num_grains = n

    num_tubes = 0
    for n, i in enumerate(tubes.identifiers):
        n = n + 1
        region = tubes.region_by_identifier(i)
        highlight = norm_rgb * pretty_color(i)
        ann[region] = highlight[region]
        ann.mask_region(region.dilate(3).border.dilate(3),
                        color=pretty_color(i))
        num_tubes = n

    ann.text_at("Num grains: {:3d}".format(num_grains), (10, 10),
                antialias=True, color=(0, 255, 0), size=48)
    logger.info("Num grains: {:3d}".format(num_grains))

    ann.text_at("Num tubes : {:3d}".format(num_tubes), (60, 10),
                antialias=True, color=(255, 0, 255), size=48)
    logger.info("Num tubes : {:3d}".format(num_tubes))

    logger.info('Output image: "{}"'.format(os.path.abspath(png_path)))
    with open(png_path, "wb") as fh:
        fh.write(ann.png())

    logger.info('Output csv: "{}"'.format(os.path.abspath(csv_path)))
    with open(csv_path, "w") as fh:
        fh.write("{},{},{}\n".format(png_name, num_grains, num_tubes))

    return png_name, num_grains, num_tubes
def annotate(input_file, output_dir):
    """Write an annotated image to disk."""
    logger.info("---")
    logger.info('Input image: "{}"'.format(os.path.abspath(input_file)))
    image = Image.from_file(input_file)
    intensity = mean_intensity_projection(image)

    name = fpath2name(input_file)
    png_name = name + ".png"
    csv_name = name + ".csv"
    png_path = os.path.join(output_dir, png_name)
    csv_path = os.path.join(output_dir, csv_name)

    grains = find_grains(input_file, output_dir)

    ann = AnnotatedImage.from_grayscale(intensity)

    # Determine the median grain size based on the segmented regions.
    areas = []
    for i in grains.identifiers:
        region = grains.region_by_identifier(i)
        areas.append(region.area)
    median_grain_size = np.median(areas)

    num_grains = 0
    for i in grains.identifiers:
        region = grains.region_by_identifier(i)
        color = pretty_color(i)
        num_grains_in_area = region.area / median_grain_size
        num_grains_in_area = int(round(num_grains_in_area))
        if num_grains_in_area == 0:
            continue

        outer_line = region.dilate().border
        outline = region.border.dilate() * np.logical_not(outer_line)
        ann.mask_region(outline, color=color)
        ann.text_at(str(num_grains_in_area), region.centroid,
                    color=(255, 255, 255))
        num_grains = num_grains + num_grains_in_area

    ann.text_at("Num grains: {:3d}".format(num_grains), (10, 10),
                antialias=True, color=(0, 255, 0), size=48)
    logger.info("Num grains: {:3d}".format(num_grains))

    logger.info('Output image: "{}"'.format(os.path.abspath(png_path)))
    with open(png_path, "wb") as fh:
        fh.write(ann.png())

    logger.info('Output csv: "{}"'.format(os.path.abspath(csv_path)))
    with open(csv_path, "w") as fh:
        fh.write("{},{}\n".format(png_name, num_grains))

    return png_name, num_grains
        def generate_column(numbers):
            images = []
            for i in numbers:
                i = selected[i]
                image_fpath = dataset.item_content_abspath(i)
                images.append(
                    downscale_local_mean(Image.from_file(image_fpath), (5, 5, 1))
                )

            column = join_horizontally(images)

            return column
Beispiel #23
0
def yield_stack_from_path(input_stack_path):
    """Yield individual images from stack path."""

    all_files = os.listdir(input_stack_path)
    image_files = filter(is_image_filename, all_files)
    sorted_image_files = sorted_nicely(image_files)
    full_image_paths = [os.path.join(input_stack_path, fn) 
                        for fn in sorted_image_files]

    images = (Image.from_file(f) for f in full_image_paths)

    return images
def generate_plot_image_list(dataset, dates_to_identifiers):

    images = []
    sorted_dates = sorted(dates_to_identifiers)

    for date in sorted_dates:
        identifier = dates_to_identifiers[date]
        image_abspath = dataset.item_content_abspath(identifier)
        image = Image.from_file(image_abspath)
        image = generate_image_with_colour_summary(image)
        images.append(image)

    return images
def load_segmentation_from_rgb_image(filename):

    rgb_image = Image.from_file(filename)

    ydim, xdim, _ = rgb_image.shape

    segmentation = np.zeros((ydim, xdim), dtype=np.uint32)

    segmentation += rgb_image[:, :, 2]
    segmentation += rgb_image[:, :, 1].astype(np.uint32) * 256
    segmentation += rgb_image[:, :, 0].astype(np.uint32) * 256 * 256

    return segmentation.view(SegmentedImage)
def generate_plots_image(dataset, dates_to_identifiers):

    images = []
    sorted_dates = sorted(dates_to_identifiers)

    for date in sorted_dates:
        identifier = dates_to_identifiers[date]
        image_abspath = dataset.item_content_abspath(identifier)
        image = Image.from_file(image_abspath)
        images.append(image)

    output_image = join_horizontally(images)

    return output_image
def test_plot_colour_summary(dataset, working_dir):

    identifiers = dataset.identifiers

    identifier = identifiers[2004]

    plot_image = Image.from_file(dataset.item_content_abspath(identifier))
    output_image = generate_image_with_colour_summary(plot_image)

    output_fpath = os.path.join(working_dir, 'colour.png')
    with open(output_fpath, 'wb') as fh:
        fh.write(output_image.view(Image).png())

    return [('colour.png', {})]
Beispiel #28
0
def analyse_file_org(fpath, output_directory):
    """Analyse a single file."""
    logging.info("Analysing file: {}".format(fpath))
    image = Image.from_file(fpath)
    image = identity(image)
    image = select_red(image)
    image = invert(image)
    image = threshold_otsu(image)

    seeds = remove_small_objects(image, min_size=1000)
    seeds = fill_small_holes(seeds, min_size=1000)
    seeds = erode_binary(seeds, selem=disk(30))
    seeds = connected_components(seeds, background=0)

    watershed_with_seeds(-image, seeds=seeds, mask=image)
def main():
    # Parse the command line arguments.
    parser = argparse.ArgumentParser(description=__doc__)
    parser.add_argument("input_file", help="Input file")
    parser.add_argument("mask_file", help="Mask file")
    parser.add_argument("parameters_file", help="Parameters file")
    parser.add_argument("output_dir", help="Output directory")
    parser.add_argument("--debug", default=False, action="store_true",
                        help="Write out intermediate images")
    args = parser.parse_args()

    # Check that the input file exists.
    if not os.path.isfile(args.input_file):
        parser.error("{} not a file".format(args.input_file))
    if not os.path.isfile(args.parameters_file):
        parser.error("{} not a file".format(args.parameters_file))

    # Read in the parameters.
    params = Parameters.from_file(args.parameters_file)

    # Create the output directory if it does not exist.
    if not os.path.isdir(args.output_dir):
        os.mkdir(args.output_dir)
    AutoName.directory = args.output_dir

    # Only write out intermediate images in debug mode.
    if not args.debug:
        AutoWrite.on = False

    # Setup a logger for the script.
    log_fname = "audit.log"
    log_fpath = os.path.join(args.output_dir, log_fname)
    logging_level = logging.INFO
    if args.debug:
        logging_level = logging.DEBUG
    logging.basicConfig(filename=log_fpath, level=logging_level)

    # Log some basic information about the script that is running.
    logging.info("Script name: {}".format(__file__))
    logging.info("Script version: {}".format(__version__))
    logging.info("Parameters: {}".format(params))

    # Run the analysis.
    mask_im = Image.from_file(args.mask_file)
    mask = Region.select_from_array(mask_im, 0)
    identity(mask)
    analyse_file(args.input_file, mask, args.output_dir, **params)
    def test_parse_manifest_raises_backwards_compatible_with_abs_paths(self):
        # Create manifest.json file without fpath.
        manifest_fp = os.path.join(TMP_DIR, 'manifest.json')
        shutil.copy(os.path.join(DATA_DIR, "tjelvar.png"), TMP_DIR)
        abs_im_fpath = os.path.join(TMP_DIR, 'tjelvar.png')
        entry = dict(filename=abs_im_fpath, series=0, channel=0, zslice=0,
                     timepoint=0)
        with open(manifest_fp, 'w') as fh:
            json.dump([entry], fh)

        from jicbioimage.core.image import ImageCollection, Image
        image_collection = ImageCollection()
        image_collection.parse_manifest(manifest_fp)
        im = image_collection[0].image
        expected_im = Image.from_file(abs_im_fpath)
        import numpy as np
        self.assertTrue(np.array_equal(im, expected_im))
def find_template_leader(filename):
    """Find template kilobot for matching. Currently hard-c"""

    kilobot_image = Image.from_file(filename)
    red_only = kilobot_image[:,:,0]

    edges = find_edges(red_only)
    blurred = gaussian_filter(edges, sigma=5)

    x1 = 255
    x2 = 325
    y1 = 650
    y2 = 710

    bot_template = blurred[x1:x2,y1:y2]

    return bot_template
def find_template(filename):
    """Find template kilobot for matching. Currently hard-c"""

    kilobot_image = Image.from_file(filename)
    red_only = kilobot_image[:,:,0]

    edges = find_edges(red_only)
    blurred = gaussian_filter(edges, sigma=2)

    x1 = 135
    x2 = 185
    y1 = 485
    y2 = 535

    bot_template = blurred[135:185,485:535]

    return bot_template
Beispiel #33
0
def read_image_and_output_json(input_segmentation_filename,
                               input_metadata_filename, input_has_filename):
    common_metadata = extract_common_metadata(input_metadata_filename)
    has_data = extract_has_data(input_has_filename)
    common_metadata.update(has_data)
    segmented_image = Image.from_file(input_segmentation_filename)
    identifier_image = convert_rgb_array_to_uint32(segmented_image)

    all_identifiers = np.unique(identifier_image)

    cell_dict = cell_dict_from_identifier_image(identifier_image)

    for cell in cell_dict.values():
        cell.update(common_metadata)

    all_cells = {"cells": cell_dict}

    print json.dumps(all_cells, indent=2)
def highlight_plot(input_file, ouput_file, plot_id):
    """Highlight a particular plot in a field image"""
    image = Image.from_file(input_file)

    # Debug speed up.
#   image = image[0:500, 0:500]  # Quicker run time for debugging purposes.

    name, ext = os.path.splitext(input_file)

    plots = segment(image)

    ann = get_grayscale_ann(image)
    ann = color_in_plots(ann, image, plots)
    ann = outline_plots(ann, image, plots)
    ann = red_outline(ann, plots, plot_id)

    with open(ouput_file, "wb") as fh:
        fh.write(ann.png())
    def test_storing_array_argument_as_string(self):
        import numpy as np
        from jicbioimage.core.image import Image
        from jicbioimage.core.transform import transformation
        from jicbioimage.core.io import AutoName
        AutoName.directory = TMP_DIR

        @transformation
        def red_channel(image):
            return image[:, :, 0]

        @transformation
        def green_channel(image):
            return image[:, :, 1]

        @transformation
        def channel_diff(im1, im2):
            return np.abs(im1 - im2)

        org_im = Image.from_file(os.path.join(DATA_DIR, 'tjelvar.png'))

        green = green_channel(org_im)
        red = red_channel(org_im)

        # Test with args.
        diff = channel_diff(red, green)
        last_event = diff.history[-1]
        self.assertEqual(last_event.args[0], repr(green))
        pos = hex(id(green))
        expected = """<History.Event(red_channel(image))>
<History.Event(channel_diff(image, '<Image object at {}, dtype=uint8>'))>""".format(
            pos)
        actual = "\n".join([str(e) for e in diff.history])
        self.assertEqual(actual, expected)

        # Test with kwargs.
        diff = channel_diff(red, im2=green)
        last_event = diff.history[-1]
        self.assertEqual(last_event.kwargs["im2"], repr(green))
        expected = """<History.Event(red_channel(image))>
<History.Event(channel_diff(image, im2='<Image object at {}, dtype=uint8>'))>""".format(
            pos)
        actual = "\n".join([str(e) for e in diff.history])
        self.assertEqual(actual, expected)
    def test_storing_array_argument_as_string(self):
        import numpy as np
        from jicbioimage.core.image import Image
        from jicbioimage.core.transform import transformation
        from jicbioimage.core.io import AutoName
        AutoName.directory = TMP_DIR

        @transformation
        def red_channel(image):
            return image[:, :, 0]

        @transformation
        def green_channel(image):
            return image[:, :, 1]

        @transformation
        def channel_diff(im1, im2):
            return np.abs(im1 - im2)

        org_im = Image.from_file(os.path.join(DATA_DIR, 'tjelvar.png'))

        green = green_channel(org_im)
        red = red_channel(org_im)


        # Test with args.
        diff = channel_diff(red, green)
        last_event = diff.history[-1]
        self.assertEqual(last_event.args[0], repr(green))
        pos = hex(id(green))
        expected = """<History.Event(red_channel(image))>
<History.Event(channel_diff(image, '<Image object at {}, dtype=uint8>'))>""".format(pos)
        actual = "\n".join([str(e) for e in diff.history])
        self.assertEqual(actual, expected)

        # Test with kwargs.
        diff = channel_diff(red, im2=green)
        last_event = diff.history[-1]
        self.assertEqual(last_event.kwargs["im2"], repr(green))
        expected = """<History.Event(red_channel(image))>
<History.Event(channel_diff(image, im2='<Image object at {}, dtype=uint8>'))>""".format(pos)
        actual = "\n".join([str(e) for e in diff.history])
        self.assertEqual(actual, expected)
Beispiel #37
0
def read_image_and_output_json(input_segmentation_filename,
                               input_metadata_filename,
                               input_has_filename):
    common_metadata = extract_common_metadata(input_metadata_filename)
    has_data = extract_has_data(input_has_filename)
    common_metadata.update(has_data)
    segmented_image = Image.from_file(input_segmentation_filename)
    identifier_image = convert_rgb_array_to_uint32(segmented_image)

    all_identifiers = np.unique(identifier_image)

    cell_dict = cell_dict_from_identifier_image(identifier_image)

    for cell in cell_dict.values():
        cell.update(common_metadata)

    all_cells = {"cells": cell_dict}

    print json.dumps(all_cells, indent=2)
Beispiel #38
0
    def test_parse_manifest_raises_backwards_compatible_with_abs_paths(self):
        # Create manifest.json file without fpath.
        manifest_fp = os.path.join(TMP_DIR, 'manifest.json')
        shutil.copy(os.path.join(DATA_DIR, "tjelvar.png"), TMP_DIR)
        abs_im_fpath = os.path.join(TMP_DIR, 'tjelvar.png')
        entry = dict(filename=abs_im_fpath,
                     series=0,
                     channel=0,
                     zslice=0,
                     timepoint=0)
        with open(manifest_fp, 'w') as fh:
            json.dump([entry], fh)

        from jicbioimage.core.image import ImageCollection, Image
        image_collection = ImageCollection()
        image_collection.parse_manifest(manifest_fp)
        im = image_collection[0].image
        expected_im = Image.from_file(abs_im_fpath)
        import numpy as np
        self.assertTrue(np.array_equal(im, expected_im))
    def test_repr_with_int_arg(self):

        from jicbioimage.core.image import Image
        from jicbioimage.core.transform import transformation

        from jicbioimage.core.io import AutoName
        AutoName.directory = TMP_DIR

        image = Image.from_file(os.path.join(DATA_DIR, 'tjelvar.png'))
        image = image[:, :, 0]

        @transformation
        def threshold_abs(image, cutoff):
            """Return thresholded image."""
            return image > cutoff

        image = threshold_abs(image, 50)

        event = image.history[0]
        self.assertEqual(repr(event),
                         "<History.Event(threshold_abs(image, 50))>")
Beispiel #40
0
def analyse_file(fpath, output_directory, test_data_only=False):
    """Analyse a single file."""
    logging.info("Analysing file: {}".format(fpath))
    AutoName.directory = output_directory

    image = Image.from_file(fpath)

    negative = get_negative_single_channel(image)
    seeds = find_seeds(negative)
    mask = find_mask(negative)

    eaten_leaf_segmentation = watershed_with_seeds(negative,
                                                   seeds=seeds,
                                                   mask=mask)
    whole_leaf_segmentation = post_process_segmentation(
        eaten_leaf_segmentation.copy())

    ann = annotate(image, whole_leaf_segmentation, eaten_leaf_segmentation)
    ann_fpath = os.path.join(output_directory, "annotated.png")
    with open(ann_fpath, "wb") as fh:
        fh.write(ann.png())
Beispiel #41
0
def analyse_file(fpath, output_directory):
    """Analyse a single file."""
    logging.info("Analysing file: {}".format(fpath))
    image = Image.from_file(fpath)
    image = identity(image)
 def test_16bit_tiff_file(self):
     from jicbioimage.core.image import Image
     im = Image.from_file(os.path.join(DATA_DIR, 'white-16bit.tiff'))
     self.assertEqual(im.dtype, np.uint16)
     self.assertEqual(np.max(im), np.iinfo(np.uint16).max)
 def test_png_type(self):
     from jicbioimage.core.image import Image
     fpath = os.path.join(DATA_DIR, 'tjelvar.png')
     image = Image.from_file(fpath)
     self.assertEqual(type(image.png()), bytes)