def model_test(test_folder,
               cfg_path,
               checkpoint,
               save_folder,
               opt_shape_param=None,
               max_workspace_size=1 << 25,
               device="cuda:0",
               score_thr=0.3,
               fp16=True,
               enable_mask=False):

    if not osp.exists(save_folder):
        os.mkdir(save_folder)
    trt_model_path = osp.join(save_folder, 'trt_model.pth')

    logger.info("creating {} trt model.".format(cfg_path))
    trt_model = mmdet2trt(cfg_path,
                          checkpoint,
                          opt_shape_param=opt_shape_param,
                          max_workspace_size=int(max_workspace_size),
                          fp16_mode=fp16,
                          device=device,
                          enable_mask=enable_mask)
    logger.info("finish, save trt_model in {}".format(trt_model_path))
    torch.save(trt_model.state_dict(), trt_model_path)

    trt_model = init_detector(trt_model_path)

    file_list = os.listdir(test_folder)

    for file_name in tqdm.tqdm(file_list):
        if not file_name.lower().endswith('.jpg') or file_name.lower(
        ).endswith('.png'):
            continue

        image_path = osp.join(test_folder, file_name)

        result = inference_detector(trt_model, image_path, cfg_path, device)

        num_detections = result[0].item()
        trt_bbox = result[1][0]
        trt_score = result[2][0]
        trt_cls = result[3][0]

        image = cv2.imread(image_path)
        for i in range(num_detections):
            scores = trt_score[i].item()
            classes = int(trt_cls[i].item())
            if scores < score_thr:
                continue
            bbox = tuple(trt_bbox[i])
            bbox = tuple(int(v) for v in bbox)

            color = ((classes >> 2 & 1) * 128 + (classes >> 5 & 1) * 128,
                     (classes >> 1 & 1) * 128 + (classes >> 4 & 1) * 128,
                     (classes >> 0 & 1) * 128 + (classes >> 3 & 1) * 128)
            cv2.rectangle(image, bbox[:2], bbox[2:], color, thickness=5)
        cv2.imwrite(osp.join(save_folder, file_name), image)
Пример #2
0
def main():
    parser = ArgumentParser()
    parser.add_argument('img', help='Image file')
    parser.add_argument('config', help='mmdet Config file')
    parser.add_argument('checkpoint', help='mmdet Checkpoint file')
    parser.add_argument('save_path', help='tensorrt model save path')
    parser.add_argument('--device',
                        default='cuda:0',
                        help='Device used for inference')
    parser.add_argument('--score-thr',
                        type=float,
                        default=0.3,
                        help='bbox score threshold')
    parser.add_argument("--fp16",
                        type=bool,
                        default=True,
                        help="enable fp16 inference")
    args = parser.parse_args()

    cfg_path = args.config

    trt_model = mmdet2trt(cfg_path,
                          args.checkpoint,
                          fp16_mode=args.fp16,
                          device=args.device)
    torch.save(trt_model.state_dict(), args.save_path)

    trt_model = init_detector(args.save_path)

    image_path = args.img

    result = inference_detector(trt_model, image_path, cfg_path, args.device)

    num_detections = result[0].item()
    trt_bbox = result[1][0]
    trt_score = result[2][0]
    trt_cls = result[3][0]

    image = cv2.imread(image_path)
    input_image_shape = image.shape
    for i in range(num_detections):
        scores = trt_score[i].item()
        classes = int(trt_cls[i].item())
        if scores < args.score_thr:
            continue
        bbox = tuple(trt_bbox[i])
        bbox = tuple(int(v) for v in bbox)

        color = ((classes >> 2 & 1) * 128 + (classes >> 5 & 1) * 128,
                 (classes >> 1 & 1) * 128 + (classes >> 4 & 1) * 128,
                 (classes >> 0 & 1) * 128 + (classes >> 3 & 1) * 128)
        cv2.rectangle(image, bbox[:2], bbox[2:], color, thickness=5)

    if input_image_shape[0] > 1280 or input_image_shape[1] > 720:
        scales = min(720 / image.shape[0], 1280 / image.shape[1])
        image = cv2.resize(image, (0, 0), fx=scales, fy=scales)
    cv2.imshow('image', image)
    c = cv2.waitKey()
def inference_test(trt_model,
                   cfg_path,
                   device,
                   test_folder,
                   save_folder,
                   score_thr=0.3):
    file_list = os.listdir(test_folder)

    for file_name in tqdm.tqdm(file_list):
        if not file_name.lower().endswith('.jpg') or file_name.lower(
        ).endswith('.png'):
            continue

        image_path = osp.join(test_folder, file_name)

        result = inference_detector(trt_model, image_path, cfg_path, device)

        num_detections = result[0].item()
        trt_bbox = result[1][0]
        trt_score = result[2][0]
        trt_cls = result[3][0]

        image = cv2.imread(image_path)
        for i in range(num_detections):
            scores = trt_score[i].item()
            classes = int(trt_cls[i].item())
            if scores < score_thr:
                continue
            bbox = tuple(trt_bbox[i])
            bbox = tuple(int(v) for v in bbox)

            color = ((classes >> 2 & 1) * 128 + (classes >> 5 & 1) * 128,
                     (classes >> 1 & 1) * 128 + (classes >> 4 & 1) * 128,
                     (classes >> 0 & 1) * 128 + (classes >> 3 & 1) * 128)
            cv2.rectangle(image, bbox[:2], bbox[2:], color, thickness=5)
        cv2.imwrite(osp.join(save_folder, file_name), image)
Пример #4
0
def main():
    parser = ArgumentParser()
    parser.add_argument('img', help='Image file')
    parser.add_argument('config', help='mmdet Config file')
    parser.add_argument('checkpoint', help='mmdet Checkpoint file')
    parser.add_argument('save_path', help='tensorrt model save path')
    parser.add_argument('--device',
                        default='cuda:0',
                        help='Device used for inference')
    parser.add_argument('--score-thr',
                        type=float,
                        default=0.3,
                        help='bbox score threshold')
    parser.add_argument('--fp16',
                        action='store_true',
                        help="enable fp16 inference")
    args = parser.parse_args()

    cfg_path = args.config
    opt_shape_param = None

    if opt_shape_param is None:
        #img_scale = cfg_path.test_pipeline[1]['img_scale']
        #min_scale = min(img_scale)
        #max_scale = max(img_scale)
        #print(min_scale)

    opt_shape_param=[
        [
            [1,3,224,224],    # min tensor shape
            [1,3,800,1312],  # shape used to do int8 calib
            [1,3,1344,1344], # max tensor shape
        ]
    ]
    trt_model = mmdet2trt(cfg_path,
                          args.checkpoint,
                          opt_shape_param=opt_shape_param,
                          fp16_mode=args.fp16,
                          device=args.device)
    torch.save(trt_model.state_dict(), args.save_path)

    trt_model = init_detector(args.save_path)

    image_path = args.img

    result = inference_detector(trt_model, image_path, cfg_path, args.device)

    num_detections = result[0].item()
    trt_bbox = result[1][0]
    trt_score = result[2][0]
    trt_cls = result[3][0]

    image = cv2.imread(image_path)
    input_image_shape = image.shape
    for i in range(num_detections):
        scores = trt_score[i].item()
        classes = int(trt_cls[i].item())
        if scores < args.score_thr:
            continue
        bbox = tuple(trt_bbox[i])
        bbox = tuple(int(v) for v in bbox)

        color = ((classes >> 2 & 1) * 128 + (classes >> 5 & 1) * 128,
                 (classes >> 1 & 1) * 128 + (classes >> 4 & 1) * 128,
                 (classes >> 0 & 1) * 128 + (classes >> 3 & 1) * 128)
        cv2.rectangle(image, bbox[:2], bbox[2:], color, thickness=5)

    if input_image_shape[0] > 1280 or input_image_shape[1] > 720:
        scales = min(720 / image.shape[0], 1280 / image.shape[1])
        image = cv2.resize(image, (0, 0), fx=scales, fy=scales)
    cv2.imwrite('image', image)
    #c = cv2.waitKey()


if __name__ == '__main__':
    main()