def test_rectangle_perimiter_clip_bottom_right(): # clip=False expected = np.array([[0, 0, 0, 0, 0], [0, 1, 1, 1, 1], [0, 1, 0, 0, 0], [0, 1, 0, 0, 0], [0, 1, 0, 0, 0]], dtype=np.uint8) img = np.zeros((5, 5), dtype=np.uint8) start = (2, 2) extent = (10, 10) rr, cc = rectangle_perimeter(start, extent=extent, shape=img.shape, clip=False) img[rr, cc] = 1 assert_array_equal(img, expected) # clip=True expected = np.array([[0, 0, 0, 0, 0], [0, 1, 1, 1, 1], [0, 1, 0, 0, 1], [0, 1, 0, 0, 1], [0, 1, 1, 1, 1]], dtype=np.uint8) img = np.zeros((5, 5), dtype=np.uint8) rr, cc = rectangle_perimeter(start, extent=extent, shape=img.shape, clip=True) img[rr, cc] = 1 assert_array_equal(img, expected)
def test_rectangle_perimiter_clip_top_left(): # clip=False expected = np.array([[0, 0, 0, 1, 0], [0, 0, 0, 1, 0], [0, 0, 0, 1, 0], [1, 1, 1, 1, 0], [0, 0, 0, 0, 0]], dtype=np.uint8) img = np.zeros((5, 5), dtype=np.uint8) start = (-5, -5) end = (2, 2) rr, cc = rectangle_perimeter(start, end=end, shape=img.shape, clip=False) img[rr, cc] = 1 assert_array_equal(img, expected) # clip=True expected = np.array([[1, 1, 1, 1, 0], [1, 0, 0, 1, 0], [1, 0, 0, 1, 0], [1, 1, 1, 1, 0], [0, 0, 0, 0, 0]], dtype=np.uint8) img = np.zeros((5, 5), dtype=np.uint8) rr, cc = rectangle_perimeter(start, end=end, shape=img.shape, clip=True) img[rr, cc] = 1 assert_array_equal(img, expected)
def test_rectangle_extent_negative(): # These two tests should be done together. expected = np.array([[0, 0, 0, 0, 0, 0], [0, 0, 1, 1, 1, 1], [0, 0, 1, 2, 2, 1], [0, 0, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0]], dtype=np.uint8) start = (3, 5) extent = (-1, -2) img = np.zeros(expected.shape, dtype=np.uint8) rr, cc = rectangle_perimeter(start, extent=extent, shape=img.shape) img[rr, cc] = 1 rr, cc = rectangle(start, extent=extent, shape=img.shape) img[rr, cc] = 2 assert_array_equal(img, expected) # Ensure that rr and cc have no overlap img = np.zeros(expected.shape, dtype=np.uint8) rr, cc = rectangle(start, extent=extent, shape=img.shape) img[rr, cc] = 2 rr, cc = rectangle_perimeter(start, extent=extent, shape=img.shape) img[rr, cc] = 1 assert_array_equal(img, expected)
def test_rectangle_perimiter(): expected = np.array([[0, 0, 0, 0, 0, 0], [0, 0, 1, 1, 1, 1], [0, 0, 1, 0, 0, 1], [0, 0, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0]], dtype=np.uint8) start = (2, 3) end = (2, 4) img = np.zeros(expected.shape, dtype=np.uint8) # Test that the default parameter is indeed end rr, cc = rectangle_perimeter(start, end, shape=img.shape) img[rr, cc] = 1 assert_array_equal(img, expected) # Swap start and end img = np.zeros(expected.shape, dtype=np.uint8) rr, cc = rectangle_perimeter(end=start, start=end, shape=img.shape) img[rr, cc] = 1 assert_array_equal(img, expected) img = np.zeros(expected.shape, dtype=np.uint8) start = (2, 3) extent = (1, 2) rr, cc = rectangle_perimeter(start, extent=extent, shape=img.shape) img[rr, cc] = 1 assert_array_equal(img, expected)
decoder = Decoder() with open("examples/big_buck_bunny_360p24.h265", 'rb') as file: for k, image in enumerate(decoder.decode(file)): image: Image reconstruction = image.get_image() if k == 19: code_structure = CodeStructure(image) reconstruction = image.get_image() h, w, _ = reconstruction.shape cb_info = ycbcr2rgb(reconstruction) for cb in code_structure.iter_code_blocks(): rr, cc = rectangle_perimeter(start=cb.position + 1, extent=cb.size - 1) rr = np.clip(rr, a_min=0, a_max=h - 1) cc = np.clip(cc, a_min=0, a_max=w - 1) cb_info[rr, cc] = (255, 25, 25) pb_info = ycbcr2rgb(reconstruction) for pb in code_structure.iter_prediction_blocks(): rr, cc = rectangle_perimeter(start=pb.position + 1, extent=pb.size - 1) rr = np.clip(rr, a_min=0, a_max=h - 1) cc = np.clip(cc, a_min=0, a_max=w - 1) pb_info[rr, cc] = (25, 255, 25) pb_center = pb.position + pb.size / 2 pb_source = pb_center + pb.vec0 rr, cc, val = line_aa(int(pb_center[0]), int(pb_center[1]), int(pb_source[0]), int(pb_source[1]))
dl = Loader('/home/ubelix/lejeune/data/medical-labeling/Dataset30') label = 700 frame = 40 scale_factor = 2 sample = dl[frame] im = sample['image'] labels = sample['labels'][..., 0] sp = labels == label contour = segmentation.find_boundaries(labels) im[contour, ...] = (255, 0, 0) bbox = sample['bboxes'][label] rr, cc = draw.rectangle_perimeter(start=(bbox[1], bbox[0]), end=(bbox[3], bbox[2]), shape=labels.shape) im[rr, cc, ...] = (0, 0, 255) bbox_scaled = scale_boxes(sample['bboxes'], scale_factor)[label] rr, cc = draw.rectangle_perimeter(start=(bbox_scaled[1], bbox_scaled[0]), end=(bbox_scaled[3], bbox_scaled[2]), shape=labels.shape) im[rr, cc, ...] = (0, 255, 0) print(bbox) print(bbox_scaled) plt.subplot(121) plt.imshow(im) plt.subplot(122)
def demo(path_to_data): """ Demonstration of the pre-processing pipeline. Iterates over the test data and displays the original next to the pre-processed version of each image. :param path_to_data: Path to the folder containing "test.json", "training.json", and "images/" """ color_mapping = { 'difficult': [185, 51, 173], # purple 'gametocyte': [255, 99, 25], # orange 'leukocyte': [0, 0, 255], # blue 'red blood cell': [255, 0, 0], # red 'schizont': [252, 204, 10], # yellow 'ring': [153, 102, 51], # brown 'trophozoite': [0, 147, 60] # green } with open(path_to_data + "/training.json") as json_file: files = json.load(json_file) fig, axs = plt.subplots(ncols=2, nrows=2) for file in files: # read the image file_path = file['image']['pathname'] fig.suptitle(file_path + "\nclick on plot to see next image") print(f"processing file: {file_path}") start_time = timeit.default_timer() image = io.imread(path_to_data + file_path) # no background removal (top right) processed1 = apply_pre_processing(image, background_rem=False, morph_filter=False) axs[0][1].imshow(processed1) axs[0][1].set_title("no background rem.") # background removal, w/o morphological filter on background mask (bottom left) processed2 = apply_pre_processing(image, background_rem=True, morph_filter=False) axs[1][0].imshow(processed2) axs[1][0].set_title("background rem. (no morph)") # background removal with morph. filters on background mask (bottom right) processed3 = apply_pre_processing(image, background_rem=True, morph_filter=True) axs[1][1].imshow(processed3) axs[1][1].set_title("background rem. (with morph)") # draw bounding boxes for obj in file['objects']: bounding_box = obj['bounding_box'] minimum = bounding_box['minimum'] start = (minimum['r'], minimum['c']) maximum = bounding_box['maximum'] stop = (maximum['r'] - 2, maximum['c'] - 2) rr, cc = draw.rectangle_perimeter(start=start, end=stop) image[rr, cc] = color_mapping[obj['category']] # image before pre-processing with bounding boxes (top left) axs[0][0].imshow(image) axs[0][0].set_title("input with ground truth") fig.show() print( f"elapsed time: {timeit.default_timer() - start_time} seconds") plt.ginput() # wait for click before doing more processing
def coords(self): top_left = self.x - (self.height // 2), self.y - (self.width // 2) bottom_right = self.x + (self.height // 2), self.y + (self.width // 2) coords = draw.rectangle_perimeter(top_left, extent=(self.height, self.width)) return coords
def get_image_preview(self): """ Get a preview of the image mask. Returns: An image representing the preview of the mask. """ img = self._image.create_overlay_img() patch_size_x = self\ ._image.patches[self._current_patch_index].patch.shape[0] patch_size_y = self\ ._image.patches[self._current_patch_index].patch.shape[1] # Draw patch grid if self._grid_img is None: self._grid_img = np.zeros(img.shape, dtype=np.bool) for i in range(self.NUM_PATCHES): for j in range(self.NUM_PATCHES): start_x = i * patch_size_x stop_x = start_x + patch_size_x start_y = j * patch_size_y stop_y = start_y + patch_size_y rec_start = (start_x, start_y) rec_end = (stop_x, stop_y) rr, cc = rectangle_perimeter(rec_start, end=rec_end, shape=self._grid_img.shape) self._grid_img[rr, cc] = True img[self._grid_img] = 207 # Draw current Patch start_x = self._image\ .patches[self._current_patch_index].patch_index[0] * patch_size_x stop_x = start_x + patch_size_x start_y = self\ ._image.patches[self._current_patch_index]\ .patch_index[1] * patch_size_y stop_y = start_y + patch_size_y rec_start = (start_x, start_y) rec_end = (stop_x, stop_y) rr, cc = rectangle_perimeter(rec_start, end=rec_end, shape=self._image.image.shape) img[rr, cc] = [255, 255, 0] for i in range(4): rec_start = (rec_start[0] + 1, rec_start[1] + 1) rec_end = (rec_end[0] - 1, rec_end[1] - 1) rr, cc = rectangle_perimeter(rec_start, end=rec_end, shape=self._image.image.shape) img[rr, cc] = [255, 255, 0] return img
def optimum_heading(self, target, additional_coords, display=False): flattened = np.copy(self.occupancy_grid) flattened[flattened > 0.8] = 1 flattened[flattened <= 0.8] = 0 rr, cc = rectangle_perimeter((1, 1), (78, 78), shape=flattened.shape) flattened[rr, cc] = 0 rr, cc = rectangle((0, 0), (13, 13), shape=flattened.shape) flattened[rr, cc] = 0 rr, cc = rectangle((0, 65), (13, 79), shape=flattened.shape) flattened[rr, cc] = 0 # coords = np.where(flattened > 0.8) # rr, cc = ellipse(target[0][0] , target[1][0], 4,4) # #print(np.array(coords).shape) # for i in range(len(rr)): # k1 = np.where(coords[0] == rr[i], True, False) # k2 = np.where(coords[1] == cc[i], True, False) # inter = np.logical_not(np.logical_and(k1, k2)) # #print(inter) # coords = np.compress(inter, coords, axis = 1) coords = [] rr, cc = ellipse(target[0], target[1], 4, 4) for i in range(len(self.blocks)): if np.linalg.norm(target[0:2] - self.blocks[i][0:2]) > 4: coords.append([self.blocks[i][0], self.blocks[i][1]]) #coords.append(additional_coords) coords = np.array(coords) # vectors = [] # for i in range(len(self.blocks)): # vectors.append([target[0] - self.blocks[i][0], target[1] - self.blocks[i][1]]) # vectors.append([target[0], 0]) # vectors.append([0, target[1]]) # vectors.append([79 - target[0], 0]) # vectors.append([0, 79 - target[1]]) if len(coords) > 0: vectors = np.subtract(target, coords) vectors = np.concatenate((vectors, np.array([[target[0], 0]]))) else: vectors = np.array([[target[0], 0]]) vectors = np.concatenate((vectors, np.array([[0, target[1]]]))) vectors = np.concatenate((vectors, np.array([[-79 + target[0], 0]]))) vectors = np.concatenate((vectors, np.array([[0, -79 + target[1]]]))) #print(vectors) #distances = np.sqrt(np.add(np.power(vectors[0,:], 2), np.power(vectors[1,:], 2))) distances = np.linalg.norm(vectors, axis=1) #print(distances) w = 200 distance_weighting = 1 - np.tanh(np.divide(np.power(distances, 2), w)) #probability_weighting = (np.exp(np.power(self.occupancy_grid[coords[0], coords[1]], 6)) - 1)/1.718 probability_weighting = 100 overall_weighting = distance_weighting * probability_weighting + 0.0000001 overall_weighting = np.expand_dims(overall_weighting, axis=1) #print(overall_weighting) normed = [] for i in range(len(vectors)): norm = vectors[i] / np.linalg.norm(vectors[i]) normed.append(norm) normed = np.array(normed) optimum_heading = np.sum(normed * overall_weighting, axis=0) / (len(overall_weighting)) scaling_factor = 8 normed_optimum_heading = scaling_factor * optimum_heading / np.linalg.norm( optimum_heading) #print(optimum_heading) optimum_target = np.array([ target[0] + normed_optimum_heading[0] + 0.00001, target[1] + normed_optimum_heading[1] + 0.00001 ]) #print(optimum_target) #flattened[rr,cc] = 0.5 #flattened[int(optimum_target[0]), int(optimum_target[1])] = 0.8 #print('target_danger: ' + str()) target_danger = np.linalg.norm(optimum_heading) if np.isnan(target_danger): target_danger = 0 #print(target_danger) test = flattened if display == False: return optimum_target, target_danger else: return optimum_target, target_danger, flattened
def plot_grid(imgs, FOCUS=None, ZOOM=None, number_of_rows=1, margin=None, show=False, save_name=None, plot1d=None, dpi=300): """Grid plotter function for Args: imgs ([type]): Array of Numpy ndarrays dim should be 2D FOCUS (func, optional): Focus function creates a rectangle and show zooomed version there. Defaults to None. ZOOM (func, optional): Zoom function creates a rectange and crop the images to that area. Defaults to None. number_of_rows (int, optional): Number of row for images, columns will be calculated automatically. Defaults to 1. margin (int, optional): number of pixel for the margin. Defaults to None. show (bool, optional): show the image grid or not. Defaults to False. save_name (str, optional): grid save img directory if not defined images will not be saved . Defaults to None. plot1d (int, optional): 1d plot position, Defauls None. dpi (int, optional): DPI of save image. Defaults to 300. """ updated_imgs = [] ### FOCUS and Zoom if FOCUS: for img in imgs: # find focus focused_img, coor = FOCUS(img) f_size_x, f_size_y = focused_img.shape focused_img = resize(focused_img, (f_size_x * 2, f_size_y * 2)) f_size_x, f_size_y = focused_img.shape size_x, size_y = img.shape uimg = img.copy() # draw focus rect rr, cc = rectangle_perimeter(coor[0:2], coor[2:4]) uimg[rr, cc] = 1 # draw big focus rect rr, cc = rectangle_perimeter( (size_x - f_size_x, size_y - f_size_y), (size_x - 2, size_y - 2), ) uimg[size_x - f_size_x:size_x, size_y - f_size_y:size_y] = focused_img uimg[rr, cc] = 1 # append img updated_imgs.append(uimg) elif ZOOM: for img in imgs: focused_img, coor = ZOOM(img) # find focus updated_imgs.append(focused_img) else: updated_imgs = imgs # GRID SHAPE number_of_columns = len(imgs) // number_of_rows updated_imgs = np.vstack([ np.hstack(updated_imgs[i:i + number_of_columns]) for i in range(0, len(imgs), number_of_columns) ]) # 1D plot if plot1d: h, w = updated_imgs.shape plt.figure(figsize=(5, 1)) plt.plot(updated_imgs[plot1d, :]) plt.show() th_h = h // 20 th_w = (w // number_of_columns) // 20 for th in range(-th_h // 2, th_h // 2): updated_imgs[plot1d + th, [i for i in range(0, w, th_w)]] = 1 updated_imgs[plot1d + th, [i + 1 for i in range(0, w, th_w)]] = 1 # updated_imgs[plot1d+2,:] = 1 # Save and Show ims = np.clip(updated_imgs, 0, 1) * 255 ims = ims.astype(np.uint8) if show: plt.figure() imshow(ims) plt.show() if save_name: imsave(save_name, ims)
# Center cross, 2 pixel width xc = int(width / 2) yc = int(height / 2) length = int(pixel_per_um * 8) gap = int(pixel_per_um * 3) # Vertical rr, cc = rectangle((yc - length, xc - 1), (yc - gap, xc)) img[rr, cc] = 0 rr, cc = rectangle((yc + length, xc - 1), (yc + gap, xc)) img[rr, cc] = 0 # Horizontal rr, cc = rectangle((yc - 1, xc - length), (yc, xc - gap)) img[rr, cc] = 0 rr, cc = rectangle((yc - 1, xc + length), (yc, xc + gap)) img[rr, cc] = 0 # ROI for ROI in (ROI_1, ROI_2): ROI_width = int(ROI * pixel_per_um) ROI_height = int(ROI * pixel_per_um) x1 = int(width / 2 - ROI_width / 2) y1 = int(height / 2 - ROI_height / 2) x2 = int(width / 2 + ROI_width / 2) y2 = int(height / 2 + ROI_height / 2) rr, cc = rectangle_perimeter((y1, x1), (y2, x2)) img[rr, cc] = 0 # Save imageio.imsave(name + '.bmp', img)
# rr, cc = draw.disk((pos[1],pos[0]),5) # image[rr, cc] = [0,255,0] # rr, cc = draw.rectangle_perimeter((boxmin[1],boxmin[0]),(boxmax[1],boxmax[0])) # image[rr,cc] = [255,0,0] # image[mask[1],mask[0]] = [100,100,100] # ax = fig.add_subplot(5,1,i+1) # ax.imshow(image) # plt.show() # gen = AugGenerator(img, data, (400,400)) # s = next(gen) ds = create_train_dataset(img, data, (400, 300), 1) sample = ds.take(5).as_numpy_iterator() fig = plt.figure() for i, s in enumerate(sample): ax = fig.add_subplot(5, 1, i + 1) img = s[0]['image'][0].swapaxes(0, 1) height, width = img.shape[:2] pos = s[0]['pos'][0] * [width, height] xmin, ymin, xmax, ymax = s[1][0] * np.array( [width, height, width, height]) rr, cc = draw.disk((pos[1], pos[0]), 5, shape=img.shape[:2]) img[rr, cc] = [0, 255, 0] rr, cc = draw.rectangle_perimeter((ymin, xmin), (ymax, xmax), shape=img.shape[:2]) img[rr, cc] = [255, 0, 0] ax.imshow(img) plt.show()
def quantify_single_image_redness(orig_image, grid, auto, t=1, d=3, s=1, negate=True, reportAll=False, hardImageThreshold=None, hardSizeThreshold=None): ''' Process a single image (phloxine mode). ''' #Prepare image image = prepare_redness_image(orig_image) #Create grid if auto: grid, griddist = make_grid_auto(image, grid) else: grid, griddist = make_grid(grid) #Make mask #Adjust threshold for redness images slightly, just what works in practise. t parameter is still applied as additional coefficient mask = make_mask(image, t=1.02 * t, s=s, hardImageThreshold=hardImageThreshold, hardSizeThreshold=hardSizeThreshold, local=True) #Measure regionprobs data = { r.label: { p: r[p] for p in ['label', 'area', 'centroid', 'mean_intensity', 'perimeter'] } for r in regionprops(mask, intensity_image=image) } data = pd.DataFrame(data).transpose() blob_to_pos = match_to_grid(data['label'], data['centroid'], grid, griddist, d=d, reportAll=reportAll) #Select only those blobs which have a corresponding grid position data = data.loc[[l in blob_to_pos for l in data['label']]] #Add grid position information to table data['row'] = data['label'].map(lambda x: blob_to_pos[x].split('-')[0]) data['column'] = data['label'].map(lambda x: blob_to_pos[x].split('-')[1]) #Add circularity data['circularity'] = (4 * math.pi * data['area']) / (data['perimeter']**2) #Make qc image, add bounding boxes to blobs with grid assigned qc = np.copy(orig_image) for region in regionprops(mask): if region.label in data['label']: minr, minc, maxr, maxc = region.bbox bboxrows, bboxcols = rectangle_perimeter([minr, minc], end=[maxr, maxc], shape=image.shape, clip=True) qc[bboxrows, bboxcols, :] = np.array((255, 255, 255)) return (data, qc)