def plot_segmentation(self, tag, epoch, label, store, method='def', jupyter=False): if label.dtype == np.bool: col_map = SEG_COLORS_BIN else: col_map = SEG_COLORS if label.dtype == np.float32: label = label.round() image_out = np.zeros((label.shape[0], label.shape[1], 3), dtype=np.uint8) for h in range(label.shape[0]): for w in range(label.shape[1]): image_out[h, w, :] = col_map[int(label[h, w])][:3] if method != 'def': return image_out.astype(np.uint8) if store: save_image(image_out, tag=f"{epoch}_{tag}", p_store=self.p_visu) if self.writer is not None: self.writer.add_image(tag, image_out, global_step=epoch, dataformats="HWC") if jupyter: display(Image.fromarray(image_out.astype(np.uint8)))
def plot_bounding_box(self, tag, epoch, img, rmin=0, rmax=0, cmin=0, cmax=0, str_width=2, store=False, jupyter=False, b=None): """ tag := tensorboard tag epoch := tensorboard epoche store := ture -> stores the image to standard path path := != None creats the path and store to it path/tag.png img:= original_image, [widht,height,RGB] """ if isinstance(b, dict): rmin = b['rmin'] rmax = b['rmax'] cmin = b['cmin'] cmax = b['cmax'] # ToDo check Input data img_d = np.array(copy.deepcopy(img)) c = [0, 0, 255] rmin_mi = max(0, rmin - str_width) rmin_ma = min(img_d.shape[0], rmin + str_width) rmax_mi = max(0, rmax - str_width) rmax_ma = min(img_d.shape[0], rmax + str_width) cmin_mi = max(0, cmin - str_width) cmin_ma = min(img_d.shape[1], cmin + str_width) cmax_mi = max(0, cmax - str_width) cmax_ma = min(img_d.shape[1], cmax + str_width) img_d[rmin_mi:rmin_ma, cmin:cmax, :] = c img_d[rmax_mi:rmax_ma, cmin:cmax, :] = c img_d[rmin:rmax, cmin_mi:cmin_ma, :] = c img_d[rmin:rmax, cmax_mi:cmax_ma, :] = c print("STORE", store) img_d = img_d.astype(np.uint8) if store: #store_ar = (img_d* 255).round().astype(np.uint8) save_image(img_d, tag=str(epoch) + tag, p_store=self.p_visu) if jupyter: display(Image.fromarray(img_d)) if self.writer is not None: self.writer.add_image(tag, img_d.astype(np.uint8), global_step=epoch, dataformats='HWC')
def plot_estimated_pose(self, tag, epoch, img, points, trans=[[0, 0, 0]], rot_mat=[[1, 0, 0], [0, 1, 0], [0, 0, 1]], cam_cx=0, cam_cy=0, cam_fx=0, cam_fy=0, store=False, jupyter=False, w=2): """ tag := tensorboard tag epoch := tensorboard epoche store := ture -> stores the image to standard path path := != None creats the path and store to it path/tag.png img:= original_image, [widht,height,RGB] points:= points of the object model [length,x,y,z] trans: [1,3] rot: [3,3] """ if type(rot_mat) == list: rot_mat = np.array(rot_mat) if type(trans) == list: trans = np.array(trans) img_d = copy.deepcopy(img) points = np.dot(points, rot_mat.T) points = np.add(points, trans[0, :]) for i in range(0, points.shape[0]): p_x = points[i, 0] p_y = points[i, 1] p_z = points[i, 2] u = int(((p_x / p_z) * cam_fx) + cam_cx) v = int(((p_y / p_z) * cam_fy) + cam_cy) try: img_d[v - w:v + w + 1, u - w:u + w + 1, 0] = 0 img_d[v - w:v + w + 1, u - w:u + w + 1, 1] = 255 img_d[v - w:v + w + 1, u - w:u + w + 1, 0] = 0 except: #print("out of bounce") pass if jupyter: display(Image.fromarray(img_d)) if store: #store_ar = (img_d* 255).round().astype(np.uint8) #print("IMAGE D:" ,img_d,img_d.shape ) save_image(img_d, tag=str(epoch) + '_' + tag, p_store=self.p_visu) if self.writer is not None: self.writer.add_image(tag, img_d.astype(np.uint8), global_step=epoch, dataformats='HWC')
def plot_segmentation(self, tag, epoch, label, store): if label.dtype == np.float32: label = label.round() image_out = np.zeros((label.shape[0], label.shape[1], 3), dtype=np.uint8) for h in range(label.shape[0]): for w in range(label.shape[1]): image_out[h, w, :] = SEG_COLORS[int(label[h, w])][:3] if store: save_image(image_out, tag=f"{epoch}_{tag}", p_store=self.p_visu) if self.writer is not None: self.writer.add_image(tag, image_out, global_step=epoch, dataformats="HWC")
def wrap(*args, **kwargs): if kwargs.get('method', 'def') == 'def': return func(*args, **kwargs) elif kwargs.get('method', 'def') == 'left': res = func(*args, **kwargs) args[0].storage_left = res elif kwargs.get('method', 'def') == 'right': res = func(*args, **kwargs) args[0].storage_right = res if args[0].storage_right is not None and args[ 0].storage_left is not None: s = args[0].storage_right.shape img_f = np.zeros((int(s[0]), int(s[1] * 2), s[2]), dtype=np.uint8) img_f[:, :s[1]] = args[0].storage_left img_f[:, s[1]:] = args[0].storage_right args[0].storage_left = None args[0].storage_right = None if kwargs.get('store', True): save_image( img_f, tag=str( kwargs.get('epoch', 'Epoch_Is_Not_Defined_By_Pos_Arg')) + '_' + kwargs.get('tag', 'Tag_Is_Not_Defined_By_Pos_Arg'), p_store=args[0].p_visu) if args[0].writer is not None: args[0].writer.add_image( kwargs.get('tag', 'Tag_Is_Not_Defined_By_Pos_Arg'), img_f.astype(np.uint8), global_step=kwargs.get('epoch', 'Epoch_Is_Not_Defined_By_Pos_Arg'), dataformats='HWC') if kwargs.get('jupyter', False): display(Image.fromarray(img_f.astype(np.uint8))) return func(*args, **kwargs)
def plot_estimated_pose_on_bb(self, tag, epoch, img, points, tl, br, trans=[[0, 0, 0]], rot_mat=[[1, 0, 0], [0, 1, 0], [0, 0, 1]], cam_cx=0, cam_cy=0, cam_fx=0, cam_fy=0, store=False, jupyter=False, w=2, K=None, H=None, method='def'): """ tag := tensorboard tag epoch := tensorboard epoche store := ture -> stores the image to standard path path := != None creats the path and store to it path/tag.png img:= original_image, [widht,height,RGB] points:= points of the object model [length,x,y,z] trans: [1,3] rot: [3,3] """ if K is not None: cam_cx = K[0, 2] cam_cy = K[1, 2] cam_fx = K[0, 0] cam_fy = K[1, 1] if H is not None: rot_mat = H[:3, :3] trans = H[:3, 3][None, :] if H[3, 3] != 1: raise Exception if H[3, 0] != 0 or H[3, 1] != 0 or H[3, 2] != 0: raise Exception if type(rot_mat) == list: rot_mat = np.array(rot_mat) if type(trans) == list: trans = np.array(trans) img_d = copy.deepcopy(img) points = np.dot(points, rot_mat.T) points = np.add(points, trans[0, :]) width = int(br[1] - tl[1]) height = int(br[0] - tl[0]) off_h = int(tl[0]) off_w = int(tl[1]) for i in range(0, points.shape[0]): p_x = points[i, 0] p_y = points[i, 1] p_z = points[i, 2] u = int( (int(((p_x / p_z) * cam_fx) + cam_cx) - off_w) / width * 640) v = int( (int(((p_y / p_z) * cam_fy) + cam_cy) - off_h) / height * 480) try: img_d[v - w:v + w + 1, u - w:u + w + 1, 0] = 0 img_d[v - w:v + w + 1, u - w:u + w + 1, 1] = 255 img_d[v - w:v + w + 1, u - w:u + w + 1, 0] = 0 except: #print("out of bounce") pass if method != 'def': return img_d.astype(np.uint8) if jupyter: display(Image.fromarray(img_d.astype(np.uint8))) if store: #store_ar = (img_d* 255).round().astype(np.uint8) #print("IMAGE D:" ,img_d,img_d.shape ) save_image(img_d, tag=str(epoch) + '_' + tag, p_store=self.p_visu) if self.writer is not None: self.writer.add_image(tag, img_d.astype(np.uint8), global_step=epoch, dataformats='HWC')
def plot_contour(self, tag, epoch, img, points, cam_cx=0, cam_cy=0, cam_fx=0, cam_fy=0, trans=[[0, 0, 0]], rot_mat=[[1, 0, 0], [0, 1, 0], [0, 0, 1]], store=False, jupyter=False, thickness=2, color=(0, 255, 0), method='def'): """ tag := tensorboard tag epoch := tensorboard epoche store := ture -> stores the image to standard path path := != None creats the path and store to it path/tag.png img:= original_image, [widht,height,RGB], torch points:= points of the object model [length,x,y,z] trans: [1,3] rot: [3,3] """ rot_mat = np.array(rot_mat) trans = np.array(trans) img_f = copy.deepcopy(img).astype(np.uint8) points = np.dot(points, rot_mat.T) points = np.add(points, trans[0, :]) h = img_f.shape[0] w = img_f.shape[1] acc_array = np.zeros((h, w, 1), dtype=np.uint8) # project pointcloud onto image for i in range(0, points.shape[0]): p_x = points[i, 0] p_y = points[i, 1] p_z = points[i, 2] u = int(((p_x / p_z) * cam_fx) + cam_cx) v = int(((p_y / p_z) * cam_fy) + cam_cy) try: a = 10 acc_array[v - a:v + a + 1, u - a:u + a + 1, 0] = 1 except: pass kernel = np.ones((a * 2, a * 2, 1), np.uint8) erosion = cv2.erode(acc_array, kernel, iterations=1) try: # problem cause by different cv2 version > 4.0 contours, hierarchy = cv2.findContours(np.expand_dims(erosion, 2), cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE) except: # version < 4.0 _, contours, hierarchy = cv2.findContours( np.expand_dims(erosion, 2), cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE) out = np.zeros((h, w, 3), dtype=np.uint8) cv2.drawContours(out, contours, -1, (0, 255, 0), 3) for i in range(h): for j in range(w): if out[i, j, 1] == 255: img_f[i, j, :] = out[i, j, :] if method != 'def': return img_f.astype(np.uint8) if jupyter: display(Image.fromarray(img_f)) if store: save_image(img_f, tag=str(epoch) + '_' + tag, p_store=self.p_visu) if self.writer is not None: self.writer.add_image(tag, img_f.astype(np.uint8), global_step=epoch, dataformats='HWC')