def layer_dict_2_mask(rootDir, layer_dict, show=False, save_dir=None, show_circle=False): ''' This function is used to plot the beads on the real tissue image :param rootDir: :param layer_dict: :return: ''' img_dir = os.path.join(rootDir, "3 - Processed Images", "7 - Counted Reoriented Stacks Renamed") def z_key(elem): return float(elem.split(',')[-1].strip().split('.tif')[0]) tif_list = os.listdir(img_dir) tif_list.sort(key=z_key) img = cv2.imread(os.path.join(img_dir, tif_list[0])) canvas = get_blank_canvas(img) save_img_list = [] for tif in tif_list: temp_canvas = canvas.copy() if show: show_canvas = temp_canvas.copy() if z_key(tif) in layer_dict.keys(): layer = layer_dict[z_key(tif)] for point in layer: temp_canvas[point[1]][point[0]] = 255 if show_circle: cv2.circle(temp_canvas, point, 15, (255, 0, 0), thickness=5) if show: cv2.circle(show_canvas, point, 15, (255, 0, 0), thickness=5) if show: show_img(show_canvas, tif, True) temp_canvas = cv2.resize( temp_canvas, (int(temp_canvas.shape[1] * 0.5), int(temp_canvas.shape[0] * 0.5))) save_img_list.append(temp_canvas) if save_dir is not None: save_dir = os.path.join(save_dir, "bead") index = 0 if not os.path.exists(save_dir): os.mkdir(save_dir) for i in range(len(save_img_list) - 1, -1, -1): imsave(os.path.join(save_dir, str(index) + ".tif"), save_img_list[i]) index += 1
def plot_layer_dict_on_img(rootDir, layer_dict): ''' This function is used to plot the beads on the real tissue image :param rootDir: :param layer_dict: :return: ''' img_dir = os.path.join(rootDir, "processed") def z_key(elem): return float(elem.split(',')[-1].strip().split('.tif')[0]) tif_list = os.listdir(img_dir) tif_list.sort(key=z_key) for tif in tif_list: if z_key(tif) in layer_dict.keys(): layer = layer_dict[z_key(tif)] img = cv2.imread(os.path.join(img_dir, tif)) for point in layer: cv2.circle(img, point, 15, (255, 0, 0), thickness=5) show_img(img, "bead location", False)
def preprocess_pair(img_frame, atlas_frame, ann_frame, show=False): ''' Transform the position of the brain in the atlas frame to adapt image frame :param img_frame: :param atlas_frame: :return: ''' img_frame = cv2.cvtColor(img_frame, cv2.COLOR_BGR2GRAY) threshold = get_adaptive_threshold(img_frame) ret, th = cv2.threshold(img_frame, threshold, 255, cv2.THRESH_BINARY) kernel = np.ones((7, 7), np.uint8) th = cv2.erode(th, kernel, iterations=2) _, contours, _ = cv2.findContours(th, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) tissue_frame = img_frame.copy() for i in range(len(contours)): if cv2.contourArea(contours[i]) > 2e5 and cv2.contourArea( contours[i]) < 1e7: x_t, y_t, w_t, h_t = cv2.boundingRect(contours[i]) point_t = (int(x_t + 0.5 * w_t), int(y_t + 0.5 * h_t)) if is_in_center(point_t, tissue_frame): x, y, w, h = cv2.boundingRect(contours[i]) tissue_frame[:, 0:x] = 0 tissue_frame[:, x + w:] = 0 tissue_frame[0:y, :] = 0 tissue_frame[y + h:, :] = 0 mask = np.zeros(tissue_frame.shape).astype(np.uint8) cv2.drawContours(mask, [contours[i]], -1, 255, -1) tissue_frame = cv2.bitwise_and(tissue_frame, tissue_frame, mask=mask) if show: cv2.rectangle(tissue_frame, (x, y), (x + w, y + h), (255, 255, 0), 5) show_img(tissue_frame, False) cur_h = 0 cur_w = 0 (height, length) = atlas_frame.shape for row in range(height): if np.asarray(atlas_frame[row, :]).sum() > 0: cur_h += 1 for col in range(length): if np.asarray(atlas_frame[:, col]).sum() > 0: cur_w += 1 try: w_factor = float(w) / float(cur_w) h_factor = float(h) / float(cur_h) except: tissue_frame = img_frame.copy() for i in range(len(contours)): x, y, w, h = cv2.boundingRect(contours[i]) cv2.rectangle(tissue_frame, (x, y), (x + w, y + h), (255, 255, 0), 5) show_img(tissue_frame, False) refactored_atlas_center = (int(ATLAS_CERTER_POSITION[2] * w_factor), int(ATLAS_CERTER_POSITION[1] * h_factor)) atlas_frame = cv2.resize(atlas_frame, (int(atlas_frame.shape[1] * w_factor), int(atlas_frame.shape[0] * h_factor)), interpolation=cv2.INTER_NEAREST) ann_frame = cv2.resize(ann_frame, (int( ann_frame.shape[1] * w_factor), int(ann_frame.shape[0] * h_factor)), interpolation=cv2.INTER_NEAREST) atlas_frame, atlas_center = get_pure_brain_atlas(atlas_frame, refactored_atlas_center) ann_frame, _ = get_pure_brain_atlas(ann_frame, refactored_atlas_center) canvas_atlas = np.zeros( (img_frame.shape[0], img_frame.shape[1])).astype(np.uint8) canvas_ann = np.zeros( (img_frame.shape[0], img_frame.shape[1])).astype(np.uint16) atlas_size = atlas_frame.shape canvas_atlas[0:atlas_size[0], 0:atlas_size[1]] = atlas_frame ann_size = ann_frame.shape canvas_ann[0:ann_size[0], 0:ann_size[1]] = ann_frame canvas_center = (int(canvas_atlas.shape[1] * 0.5), int(canvas_atlas.shape[0] * 0.5)) shift_col = canvas_center[0] - atlas_center[0] shift_row = canvas_center[1] - atlas_center[1] M = np.float32([[1, 0, shift_col], [0, 1, shift_row]]) canvas_atlas = cv2.warpAffine( canvas_atlas, M, (canvas_atlas.shape[1], canvas_atlas.shape[0])) canvas_ann = cv2.warpAffine(canvas_ann, M, (canvas_ann.shape[1], canvas_ann.shape[0])) return tissue_frame, canvas_atlas, canvas_ann
def preprocess_pair(img_frame, atlas_frame, ann_frame, show=False, strict=0.): ''' Transform the position of the brain in the atlas frame to adapt image frame :param img_frame: :param atlas_frame: :return: ''' assert strict <= 1.0 and strict >= 0. img_frame = cv2.cvtColor(img_frame, cv2.COLOR_BGR2GRAY) tissue_frame = copy.deepcopy(img_frame) threshold = get_adaptive_threshold(img_frame) ret, th = cv2.threshold(img_frame, threshold, 255, cv2.THRESH_BINARY) kernel = np.ones((7, 7), np.uint8) th = cv2.erode(th, kernel, iterations=2) _, contours, _ = cv2.findContours(th, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) contours.sort(key=cv2.contourArea) # show_img(th, False) # show_frame = copy.deepcopy(img_frame) # show_frame = cv2.cvtColor(show_frame, cv2.COLOR_GRAY2RGB) for i in range(len(contours)): if cv2.contourArea(contours[i]) > 2e5 and cv2.contourArea(contours[i]) < 5e6: # x_t, y_t, w_t, h_t = cv2.boundingRect(contours[i]) # point_t = (int(x_t + 0.5 * w_t), int(y_t + 0.5 * h_t)) M = cv2.moments(contours[i]) cX = int(M["m10"] / M["m00"]) cY = int(M["m01"] / M["m00"]) # show_frame = cv2.drawContours(show_frame, [contours[i]], 0, (0,255,0), 3) # show_frame = cv2.circle(show_frame, (cX, cY), 3, (0, 0, 255), 3) if is_in_center((cX, cY), tissue_frame): x, y, w, h = cv2.boundingRect(contours[i]) # cv2.rectangle(show_frame, (x - 75, y - 75), (x + w + 150, y + h + 150), (255, 0, 0), thickness=3, lineType=8) # show_img(th, False) # convex_contours = cv2.convexHull(contours[i]) # mask = np.zeros(tissue_frame.shape).astype(np.uint8) # cv2.drawContours(mask, [convex_contours], -1, 255, -1) # tissue_frame = cv2.bitwise_and(tissue_frame, tissue_frame, mask=mask) color_frame = cv2.cvtColor(tissue_frame, cv2.COLOR_GRAY2RGB) # rect = (x, y, w + x, h + y) rect = (x - 75, y - 75, w + 150, h + 150) mask = np.zeros(tissue_frame.shape,np.uint8) bgdModel = np.zeros((1,65),np.float64) fgdModel = np.zeros((1,65),np.float64) cv2.grabCut(color_frame, mask, rect, bgdModel, fgdModel, 5, cv2.GC_INIT_WITH_RECT) # mask2 = np.where((mask==2)|(mask==0),0,1).astype('uint8') mask2 = np.where((mask==2)|(mask==0), 1.0 - strict, 1) tissue_frame[:, 0: x - 75] = 0 tissue_frame[:, x + w + 75:] = 0 tissue_frame[0:y - 75, :] = 0 tissue_frame[y + h + 75:, :] = 0 tissue_frame = tissue_frame * mask2[:,:] tissue_frame = np.asarray(tissue_frame).astype(np.uint8) if show: print("normal") cv2.rectangle(tissue_frame, (x, y), (x + w, y + h), (255, 255, 0), 5) show_img(tissue_frame, False) # show_img(tissue_frame, False) atlas_frame = np.asarray(atlas_frame, dtype=np.float32) atlas_frame = ((atlas_frame - atlas_frame.min()) / (atlas_frame.max() - atlas_frame.min())) * 255 atlas_frame = atlas_frame.astype(np.uint8) ann_frame, (row1, row2, col1, col2) = get_pure_brain_atlas(ann_frame, threshold=0) atlas_frame = atlas_frame[row1:row2, col1:col2] ori_ann_center = (ATLAS_CERTER_POSITION[2] - row1, ATLAS_CERTER_POSITION[1] - col1) cur_w = col2 - col1 cur_h = row2 - row1 print(ori_ann_center) try: w_factor = float(w) / float(cur_w) h_factor = float(h) / float(cur_h) except: tissue_frame = img_frame.copy() for i in range(len(contours)): x, y, w, h = cv2.boundingRect(contours[i]) cv2.rectangle(tissue_frame, (x, y), (x + w, y + h), (255, 255, 0), 5) print("exception") show_img(tissue_frame, False) # print(w_factor, h_factor) refactored_atlas_center = (int(ori_ann_center[1] * w_factor), int(ori_ann_center[0] * h_factor)) atlas_frame = cv2.resize(atlas_frame, (int(atlas_frame.shape[1] * w_factor), int(atlas_frame.shape[0] * h_factor)), interpolation=cv2.INTER_NEAREST) ann_frame = cv2.resize(ann_frame, (int(ann_frame.shape[1] * w_factor), int(ann_frame.shape[0] * h_factor)), interpolation=cv2.INTER_NEAREST) canvas_atlas = np.zeros((img_frame.shape[0], img_frame.shape[1])).astype(np.uint8) canvas_ann = np.zeros((img_frame.shape[0], img_frame.shape[1])).astype(np.uint16) atlas_size = atlas_frame.shape canvas_atlas[0:atlas_size[0], 0:atlas_size[1]] = atlas_frame ann_size = ann_frame.shape canvas_ann[0:ann_size[0], 0:ann_size[1]] = ann_frame # show_img(canvas_atlas, False) canvas_center = (int(canvas_atlas.shape[1] * 0.5), int(canvas_atlas.shape[0] * 0.5)) shift_col = canvas_center[0] - refactored_atlas_center[0] shift_row = canvas_center[1] - refactored_atlas_center[1] M = np.float32([[1, 0, shift_col], [0, 1, shift_row]]) canvas_atlas = cv2.warpAffine(canvas_atlas, M, (canvas_atlas.shape[1], canvas_atlas.shape[0])) canvas_ann = cv2.warpAffine(canvas_ann, M, (canvas_ann.shape[1], canvas_ann.shape[0])) # show_img(canvas_atlas, False) return tissue_frame, canvas_atlas, canvas_ann