def write_cell_views(fpath_prefix, wall_projection, marker_projection, region, celldata): wall_ann = AnnotatedImage.from_grayscale(wall_projection, (1, 0, 0)) marker_ann = AnnotatedImage.from_grayscale(marker_projection, (0, 1, 0)) ann = wall_ann + marker_ann color = (200, 200, 200) ann.mask_region(region.border, color) ann.draw_cross(region.centroid, color) dilated_region = region.dilate(20) wall_ann[np.logical_not(dilated_region)] = (0, 0, 0) marker_ann[np.logical_not(dilated_region)] = (0, 0, 0) ann[np.logical_not(dilated_region)] = (0, 0, 0) # If rotation is not 0, 90, 180, 270 the image becomes larger than then input # and the scaling gets messed up. The scaling may not matter since downstream # processing will use unit vectors around the center of the cell (the centroid) # but for now I want to create annotations where the user clicks are reflected # in the ouput. # rotation = random.randrange(0, 360) rotation = random.choice([0, 90, 180, 270]) celldata["rotation"] = rotation for suffix, annotation in [("-wall", wall_ann), ("-marker", marker_ann), ("-combined", ann)]: fpath = fpath_prefix + suffix + ".png" annotation = post_process_annotation(annotation, dilated_region, celldata, rotation) scipy.misc.imsave(fpath, annotation)
def save_annotated_leaf(input_dir, input_image, output_file, random, **kwargs): """Write out annotated leaf image.""" microscopy_collection = get_microscopy_collection(input_image) wall_stack = microscopy_collection.zstack(c=kwargs["wall_channel"]) surface = surface_from_stack(wall_stack, **kwargs) wall_projection = project_wall(wall_stack, surface, **kwargs) marker_stack = microscopy_collection.zstack(c=kwargs["marker_channel"]) # Refactor with analysis script to ensure always in sync. marker_projection = project_marker(marker_stack, surface, **kwargs) wall_ann = AnnotatedImage.from_grayscale(wall_projection, (1, 0, 0)) marker_ann = AnnotatedImage.from_grayscale(marker_projection, (0, 1, 0)) ann = wall_ann + marker_ann json_fpaths = [ os.path.join(input_dir, f) for f in os.listdir(input_dir) if f.endswith(".json") ] y_key = "normalised_marker_y_coord" x_key = "normalised_marker_x_coord" for fpath in json_fpaths: with open(fpath) as fh: celldata = json.load(fh) if y_key not in celldata: continue if x_key not in celldata: continue print(fpath) frac_pt = celldata[y_key], celldata[x_key] rel_pt = tuple([i - 0.5 for i in frac_pt]) rotation = celldata["rotation"] if random: rotation = 0 marker_pt = original_image_point(rel_point=rel_pt, rotation=rotation, ydim=celldata["ydim"], xdim=celldata["xdim"], dy_offset=celldata["dy_offset"], dx_offset=celldata["dx_offset"]) ann.draw_line(marker_pt, celldata["centroid"], (255, 255, 255)) ann.draw_cross(celldata["centroid"], (255, 255, 255)) with open(output_file, "wb") as fh: fh.write(ann.png())
def generate_annotated_image(collection, cell_level_threshold): zstack = collection.zstack_array(s=0, c=2) probe_stack = collection.zstack_array(s=0, c=0) max_intensity_projection(probe_stack) seeds = find_seeds(zstack) #probe_stack2 = collection.zstack_array(s=0, c=1) #RI edit 2 zstack = zstack + probe_stack #+ probe_stack2#RI edit 3 segmentation = segment_from_seeds(zstack, seeds, cell_level_threshold) projection = max_intensity_projection(zstack) projection_as_uint8 = uint8ify(projection) annotated_projection = AnnotatedImage.from_grayscale(projection_as_uint8) rids = np.unique(segmentation) for rid in rids[1:]: x, y, z = map(np.mean, np.where(segmentation == rid)) size = len(np.where(segmentation == rid)[0]) annotated_projection.text_at(str(size), y - 10, x) annotation_filename = 'annotated_image.png' with open(annotation_filename, 'wb') as f: f.write(annotated_projection.png())
def post_process_annotation(ann, dilated_region, celldata, rotation): # Crop box around region. yis, xis = dilated_region.index_arrays ymin, ymax = np.min(yis), np.max(yis) xmin, xmax = np.min(xis), np.max(xis) ann = ann[ymin:ymax, xmin:xmax] celldata["dy_offset"] = ymin celldata["dx_offset"] = xmin # Pad cropped box. ydim, xdim, zdim = ann.shape p = 25 pydim = ydim + p + p pxdim = xdim + p + p padded = AnnotatedImage.blank_canvas(width=pxdim, height=pydim) padded[p:ydim + p, p:xdim + p] = ann ann = padded celldata["dy_offset"] -= p celldata["dx_offset"] -= p celldata["ydim"] = pydim celldata["xdim"] = pxdim # Enlarge padded cropped box. ann = scipy.misc.imresize(ann, 3.0, "nearest").view(AnnotatedImage) # Rotate the enlarged padded cropped box. ann = scipy.ndimage.rotate(ann, rotation, order=0).view(AnnotatedImage) return ann
def analyse_image(image): image = normalise(image) * 255 canvas = AnnotatedImage.from_grayscale(image) image = smooth_gaussian(image.astype(float), 5) image = threshold_abs(image, 30) image = erode_binary(image) image = remove_small_objects(image, 5) salem = skimage.morphology.disk(2) image = dilate_binary(image, salem) segmentation = connected_components(image, background=0) for i in segmentation.identifiers: color = pretty_color_from_identifier(i) region = segmentation.region_by_identifier(i) convex_hull = region.convex_hull outline = convex_hull.inner.border.dilate() canvas.mask_region(outline, color=color) return canvas
def annotate_with_set_of_points(image, points): grayscale = np.mean(image, axis=2) annotated = AnnotatedImage.from_grayscale(grayscale) xdim, ydim, _ = annotated.shape def annotate_location(fractional_coords): xfrac, yfrac = fractional_coords ypos = int(ydim * xfrac) xpos = int(xdim * yfrac) for x in range(-2, 3): for y in range(-2, 3): annotated.draw_cross( (xpos+x, ypos+y), color=(255, 0, 0), radius=50 ) for loc in points: annotate_location(loc) return annotated
def annotate_single_identifier(dataset, identifier, output_path): file_path = dataset.abspath_from_identifier(identifier) image = Image.from_file(file_path) grayscale = np.mean(image, axis=2) annotated = AnnotatedImage.from_grayscale(grayscale) xdim, ydim, _ = annotated.shape def annotate_location(fractional_coords): xfrac, yfrac = fractional_coords ypos = int(ydim * xfrac) xpos = int(xdim * yfrac) for x in range(-2, 3): for y in range(-2, 3): annotated.draw_cross( (xpos+x, ypos+y), color=(255, 0, 0), radius=50 ) for loc in find_approx_plot_locs(dataset, identifier): annotate_location(loc) output_basename = os.path.basename(file_path) full_output_path = os.path.join(output_path, output_basename) with open(full_output_path, 'wb') as f: f.write(annotated.png())
def quantify_yeast_growth(input_filename, annotation_filename, profile_filename): downscaled = load_and_downscale(input_filename) annotation = AnnotatedImage.from_grayscale(downscaled) circle = fit_central_circle(downscaled) circle_coords = circle_perimeter(*circle) annotation[circle_coords] = 0, 255, 0 x, y, r = circle center = (x, y) xdim, ydim, = downscaled.shape line_length = ydim - y mean_profile_line = find_mean_profile_line(downscaled, annotation, center, -math.pi/4, math.pi/4, line_length) record_line_profile(profile_filename, mean_profile_line) with open(annotation_filename, 'wb') as f: f.write(annotation.png())
def annotate_tensors(ydim, xdim, tensor_manager, fh): """Write out tensor image.""" ann = AnnotatedImage.blank_canvas(width=xdim, height=ydim) for i in tensor_manager.identifiers: tensor = tensor_manager[i] color = pretty_color_from_identifier(tensor.cell_id) ann.draw_line(tensor.centroid, tensor.marker, color) fh.write(ann.png())
def annotate(input_file, output_dir): """Write an annotated image to disk.""" logger.info("---") logger.info('Input image: "{}"'.format(os.path.abspath(input_file))) image = Image.from_file(input_file) intensity = mean_intensity_projection(image) norm_intensity = normalise(intensity) norm_rgb = np.dstack([norm_intensity, norm_intensity, norm_intensity]) name = fpath2name(input_file) png_name = name + ".png" csv_name = name + ".csv" png_path = os.path.join(output_dir, png_name) csv_path = os.path.join(output_dir, csv_name) tubes = find_tubes(input_file, output_dir) grains, difficult = find_grains(input_file, output_dir) tubes = remove_tubes_not_touching_grains(tubes, grains) tubes = remove_tubes_that_are_grains(tubes, grains) ann = AnnotatedImage.from_grayscale(intensity) num_grains = 0 for n, i in enumerate(grains.identifiers): n = n + 1 region = grains.region_by_identifier(i) ann.mask_region(region.inner.inner.inner.border.dilate(), color=(0, 255, 0)) num_grains = n num_tubes = 0 for n, i in enumerate(tubes.identifiers): n = n + 1 region = tubes.region_by_identifier(i) highlight = norm_rgb * pretty_color(i) ann[region] = highlight[region] ann.mask_region(region.dilate(3).border.dilate(3), color=pretty_color(i)) num_tubes = n ann.text_at("Num grains: {:3d}".format(num_grains), (10, 10), antialias=True, color=(0, 255, 0), size=48) logger.info("Num grains: {:3d}".format(num_grains)) ann.text_at("Num tubes : {:3d}".format(num_tubes), (60, 10), antialias=True, color=(255, 0, 255), size=48) logger.info("Num tubes : {:3d}".format(num_tubes)) logger.info('Output image: "{}"'.format(os.path.abspath(png_path))) with open(png_path, "wb") as fh: fh.write(ann.png()) logger.info('Output csv: "{}"'.format(os.path.abspath(csv_path))) with open(csv_path, "w") as fh: fh.write("{},{},{}\n".format(png_name, num_grains, num_tubes)) return png_name, num_grains, num_tubes
def annotate(image, segmentation): """Return annotated image.""" uint8_normalised = normalise(image) * 255 annotation = AnnotatedImage.from_grayscale(uint8_normalised) for i in segmentation.identifiers: region = segmentation.region_by_identifier(i) annotation.mask_region(region.dilate(1).border, color=pretty_color(i)) return annotation
def annotate_markers(markers, cells, fh): """Write out marker image.""" ydim, xdim = markers.shape ann = AnnotatedImage.blank_canvas(width=xdim, height=ydim) for i in markers.identifiers: m_region = markers.region_by_identifier(i) cell_id = marker_cell_identifier(m_region, cells) color = pretty_color_from_identifier(cell_id) ann.mask_region(m_region, color) fh.write(ann.png())
def test_from_grayscale(self): from jicbioimage.illustrate import AnnotatedImage as AnnIm grayscale = np.array([ [0, 10, 20], [30, 40, 50], [60, 70, 80]], dtype=np.uint8) zeros = np.zeros((3, 3), dtype=np.uint8) gray_expected = np.dstack([grayscale, grayscale, grayscale]) red_expected = np.dstack([grayscale, zeros, zeros]) cyan_expected = np.dstack([zeros, grayscale, grayscale]) gray_canvas = AnnIm.from_grayscale(grayscale) self.assertTrue(np.array_equal(gray_canvas, gray_expected)) red_canvas = AnnIm.from_grayscale(grayscale, (True, False, False)) self.assertTrue(np.array_equal(red_canvas, red_expected)) cyan_canvas = AnnIm.from_grayscale(grayscale, (False, True, True)) self.assertTrue(np.array_equal(cyan_canvas, cyan_expected))
def annotate(input_file, output_dir): """Write an annotated image to disk.""" logger.info("---") logger.info('Input image: "{}"'.format(os.path.abspath(input_file))) image = Image.from_file(input_file) intensity = mean_intensity_projection(image) name = fpath2name(input_file) png_name = name + ".png" csv_name = name + ".csv" png_path = os.path.join(output_dir, png_name) csv_path = os.path.join(output_dir, csv_name) grains = find_grains(input_file, output_dir) ann = AnnotatedImage.from_grayscale(intensity) # Determine the median grain size based on the segmented regions. areas = [] for i in grains.identifiers: region = grains.region_by_identifier(i) areas.append(region.area) median_grain_size = np.median(areas) num_grains = 0 for i in grains.identifiers: region = grains.region_by_identifier(i) color = pretty_color(i) num_grains_in_area = region.area / median_grain_size num_grains_in_area = int(round(num_grains_in_area)) if num_grains_in_area == 0: continue outer_line = region.dilate().border outline = region.border.dilate() * np.logical_not(outer_line) ann.mask_region(outline, color=color) ann.text_at(str(num_grains_in_area), region.centroid, color=(255, 255, 255)) num_grains = num_grains + num_grains_in_area ann.text_at("Num grains: {:3d}".format(num_grains), (10, 10), antialias=True, color=(0, 255, 0), size=48) logger.info("Num grains: {:3d}".format(num_grains)) logger.info('Output image: "{}"'.format(os.path.abspath(png_path))) with open(png_path, "wb") as fh: fh.write(ann.png()) logger.info('Output csv: "{}"'.format(os.path.abspath(csv_path))) with open(csv_path, "w") as fh: fh.write("{},{}\n".format(png_name, num_grains)) return png_name, num_grains
def generate_label_image(segmentation): base_for_ann = 100 * (segmentation > 0) ann = AnnotatedImage.from_grayscale(base_for_ann) for sid in segmentation.identifiers: c = segmentation.region_by_identifier(sid).centroid ann.text_at(str(sid), map(int, c), size=30, color=(255, 255, 0), center=True) return ann
def annotate_segmentation(image, segmentation): grayscale = normalise(image) * 255 canvas = AnnotatedImage.from_grayscale(grayscale) for i in segmentation.identifiers: region = segmentation.region_by_identifier(i) outline = region.inner.border.dilate() color = pretty_color_from_identifier(i) canvas.mask_region(outline, color=color) fpath = os.path.join(AutoName.directory, "segmentation.png") with open(fpath, "wb") as fh: fh.write(canvas.png())
def annotate_segmentation(image, segmentation): """Return annotated segmentation.""" annotation = AnnotatedImage.from_grayscale(image) for i in segmentation.identifiers: region = segmentation.region_by_identifier(i) color = pretty_color() annotation.mask_region(region.border.dilate(), color) props = skimage.measure.regionprops(segmentation) for p in props: try: minr, minc, maxr, maxc = p.bbox cval = int(p.centroid[1]) line = skimage.draw.line(minr, cval, maxr, cval) annotation.mask_region(line, (0, 255, 0)) except IndexError: # Don't draw line if it falls outside of the image. pass return annotation
def label_plots(dataset): identifier = "384b5421bc782259b218eaab39171d51462202fd" segmentation_file = "/output/DJI_0118-segmented.JPG" segmentation = load_segmentation_from_rgb_image(segmentation_file) approx_plot_locs = find_approx_plot_locs(dataset, identifier) xdim, ydim = segmentation.shape def image_coords_to_rel_coords(image, point): ydim, xdim = image.shape y_abs, x_abs = point x_rel = float(x_abs) / xdim y_rel = float(y_abs) / ydim return Point2D(x_rel, y_rel) centroids = [] for sid in segmentation.identifiers: c = segmentation.region_by_identifier(sid).centroid centroids.append(image_coords_to_rel_coords(segmentation, c)) loc_labels = {l: str(n) for n, l in enumerate(approx_plot_locs)} image = Image.from_file(dataset.abspath_from_identifier(identifier)) annotated = annotate_with_set_of_points(image, centroids) def rel_coords_to_image_coords(image, point): ydim, xdim = image.shape x_rel, y_rel = point return Point2D(int(y_rel * ydim), int(x_rel * xdim)) for l in approx_plot_locs: annotated.text_at( loc_labels[l], rel_coords_to_image_coords(segmentation, l), size=60, color=(0, 255, 0)) def closest_loc_label(p): dists = [(p.distance(l), l) for l in approx_plot_locs] dists.sort() return loc_labels[dists[0][1]] for c in centroids: label = closest_loc_label(c) annotated.text_at( label, rel_coords_to_image_coords(segmentation, c) + Point2D(20, 20), size=60, color=(0, 255, 255)) with open('/output/ann.png', 'wb') as f: f.write(annotated.png()) grayscale = np.mean(image, axis=2) annotated2 = AnnotatedImage.from_grayscale(grayscale) for sid in segmentation.identifiers: region = segmentation.region_by_identifier(sid) annotated2.mask_region(region.border.dilate(), [255, 255, 0]) def closest_loc_label(p): dists = [(p.distance(l), l) for l in approx_plot_locs] dists.sort() return loc_labels[dists[0][1]] for c in centroids: label = closest_loc_label(c) annotated2.text_at( label, rel_coords_to_image_coords(segmentation, c) - Point2D(30, 30), size=60, color=(0, 255, 255)) with open('/output/ann_plots.png', 'wb') as f: f.write(annotated2.png())