def demo(): model = build_model() if os.path.isdir(data_f): all_imgs = glob.glob(os.path.join(data_f, '*.jpg')) for img in all_imgs: print('~~~~~ predict on img: {}'.format(img)) im = cv2.imread(img) ori_im = im.copy() height, width, _ = im.shape transform = ValTransform(rgb_means=( 0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)) im_input, _ = transform(im, None, target_size) im_input = im_input.to(device).unsqueeze(0) with torch.no_grad(): out = model(im_input) outputs = postprocess(out, num_classes, 0.01, 0.65) outputs = outputs[0].cpu().data bboxes = outputs[:, 0:4] bboxes[:, 0::2] *= width / target_size[0] bboxes[:, 1::2] *= height / target_size[1] cls = outputs[:, 6] scores = outputs[:, 4] * outputs[:, 5] if isinstance(bboxes, torch.Tensor): bboxes = bboxes.cpu().numpy() res = visualize_det_cv2_part( im, scores, cls, bboxes, coco_label_map_list[1:], 0.1) cv2.imshow('rr', res) cv2.waitKey(0)
if len(sys.argv) > 1: data_f = sys.argv[1] else: data_f = './images' if os.path.isdir(data_f): img_files = glob.glob(os.path.join(data_f, '*.jpg')) for img_f in img_files: ori_img = cv2.imread(img_f) b = predictor(ori_img)['instances'] boxes = b.pred_boxes.tensor.cpu().numpy() scores = b.scores.cpu().numpy() classes = b.pred_classes.cpu().numpy() print('b.pred_boxes: {}'.format(boxes)) print('b.scores: {}'.format(scores)) print('b.pred_classes: {}'.format(classes)) visualize_det_cv2_part(ori_img, scores, classes, boxes, class_names=coco_label_map_list, thresh=0.16, is_show=True) exit(0) else: ori_img = cv2.imread(data_f) b = predictor(ori_img)['instances'] boxes = b.pred_boxes.tensor.cpu().numpy() scores = b.scores.cpu().numpy() classes = b.pred_classes.cpu().numpy() print('b.pred_boxes: {}'.format(boxes)) print('b.scores: {}'.format(scores)) print('b.pred_classes: {}'.format(classes)) visualize_det_cv2_part(ori_img, scores, classes, boxes, class_names=coco_label_map_list, thresh=0.16, is_show=True)
data_f = sys.argv[1] else: data_f = './images' if os.path.isdir(data_f): img_files = glob.glob(os.path.join(data_f, '*.jpg')) for img_f in img_files: ori_img = cv2.imread(img_f) b = predictor(ori_img)['instances'] boxes = b.pred_boxes.tensor.cpu().numpy() scores = b.scores.cpu().numpy() classes = b.pred_classes.cpu().numpy() visualize_det_cv2_part(ori_img, scores, classes, boxes, class_names=categories, thresh=0.16, force_color=force_color, line_thickness=1, is_show=True, wait_t=100) else: ori_img = cv2.imread(data_f) b = predictor(ori_img)['instances'] boxes = b.pred_boxes.tensor.cpu().numpy() scores = b.scores.cpu().numpy() classes = b.pred_classes.cpu().numpy() visualize_det_cv2_part(ori_img, scores, classes, boxes, class_names=categories,