def _add_gt_image_summary(self): # use a customized visualization function to visualize the boxes self._add_gt_image() image = draw_bounding_boxes(\ self._gt_image, self._image_gt_summaries['gt_boxes'], self._image_gt_summaries['im_info']) return tb.summary.image('GROUND_TRUTH', torch.from_numpy(image[0].astype('float32')/ 255.0).permute(2,0,1))
def demo(sess, net, image_name): # Load the demo image im_path = os.path.join(cfg.DATA_DIR, 'horus-test', 'images') im_file = os.path.join(im_path, image_name) im = cv2.imread(im_file) # Detect all object classes and regress object bounds timer = Timer() timer.tic() scores, boxes = im_detect(sess, net, im) timer.toc() print('Detection took {:.3f}s for {:d} object proposals'.format( timer.total_time, boxes.shape[0])) # Visualize detections for each class CONF_THRESH = 0.6 for cls_ind, cls in enumerate(cfg.CLASSES[1:]): cls_ind += 1 # because we skipped background inds = np.where(scores[:, cls_ind] >= CONF_THRESH)[0] cls_boxes = boxes[inds, 4 * cls_ind:4 * (cls_ind + 1)] cls_scores = scores[inds, cls_ind] dets = np.hstack( (cls_boxes, cls_scores[:, np.newaxis])).astype(np.float32) keep = nms(dets, cfg.TEST.NMS) dets = dets[keep, :] print(dets) # vis_detections(im, cls, dets, thresh=CONF_THRESH) # inds = np.where(dets[:, -1] >= CONF_THRESH)[0] if len(dets) != 0: im = draw_bounding_boxes(im, dets, cls_ind, cls) cv2.imwrite(cfg.DATA_DIR + '/output/output_' + image_name, im)
def _add_gt_image_summary(self): # use a customized visualization function to visualize the boxes self._add_gt_image() image = draw_bounding_boxes(\ self._gt_image, self._image_gt_summaries['gt_boxes'], self._image_gt_summaries['im_info']) return tb.summary.image('GROUND_TRUTH', image[0].astype('float32').swapaxes(1,0).swapaxes(2,0)/255.0)
def _add_gt_image_summary(self): # use a customized visualization function to visualize the boxes self._add_gt_image() image = draw_bounding_boxes(\ self._gt_image, self._image_gt_summaries['gt_boxes'], self._image_gt_summaries['im_info']) return tb.summary.image('GROUND_TRUTH', image[0].astype('float32')/255.0)
def _add_gt_image_summary(self): # use a customized visualization function to visualize the boxes self._add_gt_image() image = draw_bounding_boxes(\ self._gt_image, np.zeros((0,5)), self._image_gt_summaries['im_info']) #no bounding_box ground_truth return tb.summary.image('GROUND_TRUTH', image[0].astype('float32') / 255.0)
imdb.set_proposal_method('gt') print('Set proposal method: {:s}'.format("gt")) # roidb = get_training_roidb(imdb) import roi_data_layer.roidb as rdl_roidb print('Preparing training data...') rdl_roidb.prepare_roidb(imdb) print('done') from utils.visualization import draw_bounding_boxes roidb = imdb.roidb print("sds") import cv2 for im_info in roidb: new_img = draw_bounding_boxes() cv2.imwrite("data/table/" ,new_img ) """ [{'boxes': array([[1079, 442, 1497, 517], [1101, 476, 1499, 549], [1171, 532, 1497, 642]], dtype=uint16), 'gt_classes': array([1, 1, 1], dtype=int32), 'gt_overlaps': <3x2 sparse matrix of type '<class 'numpy.float32'>' with 3 stored elements in Compressed Sparse Row format>, 'flipped': False, 'seg_areas': array([31844., 29526., 36297.], dtype=float32), 'image': '/1.jpg', 'width': 1463, 'height': 1198, 'max_classes': array([1, 1, 1]), 'max_overlaps': array([1., 1., 1.], dtype=float32)}] """