Пример #1
0
def run_demo(cfg, ckpt, score_threshold, images_dir, output_dir, dataset_type):
    if dataset_type == "voc":
        class_names = VOCDataset.class_names
    elif dataset_type == 'coco':
        class_names = COCODataset.class_names
    else:
        raise NotImplementedError('Not implemented now.')
    device = torch.device(cfg.MODEL.DEVICE)

    model = build_detection_model(cfg)
    model = model.to(device)
    checkpointer = CheckPointer(model, save_dir=cfg.OUTPUT_DIR)
    checkpointer.load(ckpt, use_latest=ckpt is None)
    weight_file = ckpt if ckpt else checkpointer.get_checkpoint_file()
    print('Loaded weights from {}'.format(weight_file))

    # dummy_input = torch.randn(1, 3, 300, 300, device='cuda')
    # input_names = ["input"]
    # output_names = ["output"]
    # torch.onnx.export(model, dummy_input, "vgg_ssd300_voc.onnx", verbose=True, input_names=input_names, output_names=output_names)

    image_paths = glob.glob(os.path.join(images_dir, '*.jpg'))
    mkdir(output_dir)

    cpu_device = torch.device("cpu")
    transforms = build_transforms(cfg, is_train=False)
    model.eval()
    for i, image_path in enumerate(image_paths):
        start = time.time()
        image_name = os.path.basename(image_path)

        image = np.array(Image.open(image_path).convert("RGB"))
        height, width = image.shape[:2]
        images = transforms(image)[0].unsqueeze(0)
        load_time = time.time() - start

        start = time.time()
        result = model(images.to(device))[0]
        inference_time = time.time() - start

        result = result.resize((width, height)).to(cpu_device).numpy()
        boxes, labels, scores = result['boxes'], result['labels'], result[
            'scores']

        indices = scores > score_threshold
        boxes = boxes[indices]
        labels = labels[indices]
        scores = scores[indices]
        meters = ' | '.join([
            'objects {:02d}'.format(len(boxes)),
            'load {:03d}ms'.format(round(load_time * 1000)),
            'inference {:03d}ms'.format(round(inference_time * 1000)),
            'FPS {}'.format(round(1.0 / inference_time))
        ])
        print('({:04d}/{:04d}) {}: {}'.format(i + 1, len(image_paths),
                                              image_name, meters))

        drawn_image = draw_boxes(image, boxes, labels, scores,
                                 class_names).astype(np.uint8)
        Image.fromarray(drawn_image).save(os.path.join(output_dir, image_name))
Пример #2
0
def do_run(cfg, model, distributed, **kwargs):
    if isinstance(model, torch.nn.parallel.DistributedDataParallel):
        model = model.module
    model.eval()
    device = torch.device(cfg.MODEL.DEVICE)
    data_loaders_val = make_data_loader(cfg,
                                        is_train=False,
                                        distributed=distributed)
    for dataset_name, data_loader in zip(cfg.DATASETS.TEST, data_loaders_val):
        output_folder = os.path.join(cfg.OUTPUT_DIR, "inference", dataset_name)
        if not os.path.exists(output_folder):
            mkdir(output_folder)
        run(model, data_loader, dataset_name, device, output_folder, **kwargs)
Пример #3
0
def do_evaluation(cfg, model, distributed, **kwargs):
    if isinstance(model, torch.nn.parallel.DistributedDataParallel):
        model = model.module
    model.eval()
    device = torch.device(cfg.MODEL.DEVICE)
    data_loaders_val = make_data_loader(cfg,
                                        is_train=False,
                                        distributed=distributed)
    eval_results = []
    timer = Timer()
    timer.tic()
    for dataset_name, data_loader in zip(cfg.DATASETS.TEST, data_loaders_val):
        output_folder = os.path.join(cfg.OUTPUT_DIR, "inference", dataset_name)
        if not os.path.exists(output_folder):
            mkdir(output_folder)

        eval_result = inference(model, data_loader, dataset_name, device,
                                output_folder, **kwargs)
        eval_results.append(eval_result)
    print("\nTotal detection speed1: %.1f FPS" % (4952 / timer.toc()))
    return eval_results
Пример #4
0
def run_demo(cfg, ckpt, score_threshold, images_dir, output_dir, onnx_dir, dataset_type):
    if dataset_type == "voc":
        class_names = VOCDataset.class_names
    elif dataset_type == 'coco':
        class_names = COCODataset.class_names
    else:
        raise NotImplementedError('Not implemented now.')
    device = torch.device(cfg.MODEL.DEVICE)
    device = "cpu" if not torch.cuda.is_available() else device

    model = build_detection_model(cfg)
    model = model.to(device)
    checkpointer = CheckPointer(model, save_dir=cfg.OUTPUT_DIR)
    checkpointer.load(ckpt, use_latest=ckpt is None)
    weight_file = ckpt if ckpt else checkpointer.get_checkpoint_file()
    print('Loaded weights from {}'.format(weight_file))

    image_paths = glob.glob(os.path.join(images_dir, '*.jpg'))
    mkdir(output_dir)

    cpu_device = torch.device("cpu")
    transforms = build_transforms(cfg, is_train=False)
    model.eval()

    # get model ready for onnx export
    mkdir(onnx_dir)
    model_onnx = build_detection_model(cfg)
    model_onnx = model_onnx.to(device)
    checkpointer_onnx = CheckPointer(model_onnx, save_dir=cfg.OUTPUT_DIR)
    checkpointer_onnx.load(ckpt, use_latest=ckpt is None)
    # replace the SSD box head postprocessor with the onnx version for exporting
    model_onnx.box_head.post_processor = PostProcessorOnnx(cfg)
    model_onnx.eval()

    # export with ONNX
    # onnx modle takes the name of the pth ckpt file
    model_onnx_name = os.path.basename(ckpt).split('.')[0] + ".onnx"
    model_onnx_path = os.path.join(onnx_dir, model_onnx_name)
    if not os.path.exists(model_onnx_path):
        print(f'Model exported as onnx to {model_onnx_path}')
        dummy_input = torch.zeros(
            [1, 3, cfg.INPUT.IMAGE_SIZE, cfg.INPUT.IMAGE_SIZE]).to(device)
        torch.onnx.export(model_onnx,
                          dummy_input,
                          model_onnx_path,
                          export_params=True,
                          do_constant_folding=True,
                          opset_version=11,
                          input_names=['input'],
                          output_names=['boxes', 'scores', 'labels'],
                          dynamic_axes={
                              'input': {0: 'batch_size', 2: "height", 3: "width"}},
                          verbose=False)

    # load exported onnx model for inference test
    print(
        f'Loading exported onnx model from {model_onnx_path} for inference comparison test')
    onnx_runtime_sess = onnxruntime.InferenceSession(model_onnx_path)

    for i, image_path in enumerate(image_paths):
        start = time.time()
        image_name = os.path.basename(image_path)

        image = np.array(Image.open(image_path).convert("RGB"))
        height, width = image.shape[:2]
        images = transforms(image)[0].unsqueeze(0)

        load_time = time.time() - start

        start = time.time()
        result = model(images.to(device))[0]
        inference_time = time.time() - start
        result = result.resize((width, height)).to(cpu_device).numpy()
        boxes, labels, scores = result['boxes'], result['labels'], result['scores']

        indices = scores > score_threshold
        boxes, labels, scores = boxes[indices], labels[indices], scores[indices]
        meters = ' | '.join(
            [
                'objects {:02d}'.format(len(boxes)),
                'load {:03d}ms'.format(round(load_time * 1000)),
                'inference {:03d}ms'.format(round(inference_time * 1000)),
                'FPS {}'.format(round(1.0 / inference_time))
            ]
        )
        print('Pytorch: ({:04d}/{:04d}) {}: {}'.format(i + 1,
                                                       len(image_paths), image_name, meters))
        drawn_image = draw_boxes(image, boxes, labels,
                                 scores, class_names).astype(np.uint8)
        Image.fromarray(drawn_image).save(
            os.path.join(output_dir, "pytorch_" + image_name))

        """
        Compute ONNX Runtime output prediction
        """

        start = time.time()
        ort_inputs = {onnx_runtime_sess.get_inputs()[0].name: np.array(images)}
        boxes, scores, labels = onnx_runtime_sess.run(None, ort_inputs)
        inference_time = time.time() - start

        indices = scores > score_threshold
        boxes, labels, scores = boxes[indices], labels[indices], scores[indices]
        # resize bounding boxes to size of the original image
        boxes[:, 0::2] *= (width)
        boxes[:, 1::2] *= (height)
        meters = ' | '.join(
            [
                'objects {:02d}'.format(len(boxes)),
                'load {:03d}ms'.format(round(load_time * 1000)),
                'inference {:03d}ms'.format(round(inference_time * 1000)),
                'FPS {}'.format(round(1.0 / inference_time))
            ]
        )
        print('Onnx:    ({:04d}/{:04d}) {}: {}'.format(i + 1,
                                                       len(image_paths),
                                                       image_name, meters))
        drawn_image = draw_boxes(image, boxes, labels,
                                 scores, class_names).astype(np.uint8)
        Image.fromarray(drawn_image).save(
            os.path.join(output_dir, "onnx_" + image_name))
Пример #5
0
def run_demo(cfg, ckpt, score_threshold, images_dir, output_dir):
    class_names = VOCDataset.class_names
    device = torch.device(cfg.MODEL.DEVICE)
    model = build_detection_model(cfg)
    model = model.to(device)
    checkpointer = CheckPointer(model, save_dir=cfg.OUTPUT_DIR)
    checkpointer.load(ckpt, use_latest=ckpt is None)
    weight_file = ckpt if ckpt else checkpointer.get_checkpoint_file()
    print('Loaded weights from {}'.format(weight_file))

    image_paths = glob.glob(os.path.join(images_dir, '*.bmp'))
    mkdir(output_dir)

    cpu_device = torch.device("cpu")
    transforms = build_transforms(cfg, is_train=False)
    model.eval()

    for i, image_path in enumerate(image_paths):
        start = time.time()
        image_name = os.path.basename(image_path)

        image = np.array(Image.open(image_path).convert("RGB"))
        height, width = image.shape[:2]
        images = transforms(image)[0].unsqueeze(0)
        load_time = time.time() - start

        start = time.time()
        result = model(images.to(device))[0]
        inference_time = time.time() - start

        result = result.resize((width, height)).to(cpu_device).numpy()
        boxes, labels, scores = result['boxes'], result['labels'], result[
            'scores']

        indices = scores > score_threshold
        boxes = boxes[indices]
        labels = labels[indices]
        meters = ' | '.join([
            'objects {:02d}'.format(len(boxes)),
            'load {:03d}ms'.format(round(load_time * 1000)),
            'inference {:03d}ms'.format(round(inference_time * 1000)),
            'FPS {}'.format(round(1.0 / inference_time))
        ])
        print('({:04d}/{:04d}) {}: {}'.format(i + 1, len(image_paths),
                                              image_name, meters))

        text = ['__background__']
        resDic = {}
        for j in range(len(boxes)):
            xmin = int(boxes[j, 0])
            ymin = int(boxes[j, 1])
            xmax = int(boxes[j, 2])
            ymax = int(boxes[j, 3])

            if labels[j] == 1:
                xmin += 140
                xmax -= 130
            elif labels[j] == 2:
                xmin += 130
            elif labels[j] == 4:
                xmin += 40

            hight = ymax - ymin
            width = xmax - xmin

            cropImg = image[ymin:ymin + hight, xmin:xmin + width]
            cropImg = local_threshold(cropImg)

            boxes[j, 0] = xmin
            boxes[j, 1] = ymin
            boxes[j, 2] = xmax
            boxes[j, 3] = ymax
            text_tmp = crnnOcr(Image.fromarray(cropImg))

            if labels[j] == 2:
                text_tmp = re.sub('[^\x00-\xff]', '/', text_tmp)

            text.append(text_tmp)
            resDic[class_names[labels[j]]] = text_tmp

        result = json.dumps(resDic, ensure_ascii=False)
        print(result)
Пример #6
0
def main():
    parser = argparse.ArgumentParser(
        description='Single Shot MultiBox Detector Training With PyTorch')
    parser.add_argument(
        "--config-file",
        default="",
        metavar="FILE",
        help="path to config file",
        type=str,
    )
    parser.add_argument("--local_rank", type=int, default=0)
    parser.add_argument('--log_step',
                        default=10,
                        type=int,
                        help='Print logs every log_step')
    parser.add_argument('--save_step',
                        default=2500,
                        type=int,
                        help='Save checkpoint every save_step')
    parser.add_argument(
        '--eval_step',
        default=2500,
        type=int,
        help='Evaluate dataset every eval_step, disabled when eval_step < 0')
    parser.add_argument('--use_tensorboard', default=True, type=str2bool)
    parser.add_argument(
        "--skip-test",
        dest="skip_test",
        help="Do not test the final model",
        action="store_true",
    )
    parser.add_argument(
        "opts",
        help="Modify config options using the command-line",
        default=None,
        nargs=argparse.REMAINDER,
    )
    args = parser.parse_args()
    num_gpus = int(
        os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1
    args.distributed = num_gpus > 1
    args.num_gpus = num_gpus

    if torch.cuda.is_available():
        # This flag allows you to enable the inbuilt cudnn auto-tuner to
        # find the best algorithm to use for your hardware.
        torch.backends.cudnn.benchmark = True
    if args.distributed:
        torch.cuda.set_device(args.local_rank)
        torch.distributed.init_process_group(backend="nccl",
                                             init_method="env://")
        synchronize()

    # Train distance regression network
    train_distance_regr()

    cfg.merge_from_file(args.config_file)
    cfg.merge_from_list(args.opts)
    cfg.freeze()

    if cfg.OUTPUT_DIR:
        mkdir(cfg.OUTPUT_DIR)

    logger = setup_logger("SSD", dist_util.get_rank(), cfg.OUTPUT_DIR)
    logger.info("Using {} GPUs".format(num_gpus))
    logger.info(args)

    logger.info("Loaded configuration file {}".format(args.config_file))
    with open(args.config_file, "r") as cf:
        config_str = "\n" + cf.read()
        logger.info(config_str)
    logger.info("Running with config:\n{}".format(cfg))

    model = train(cfg, args)

    if not args.skip_test:
        logger.info('Start evaluating...')
        torch.cuda.empty_cache()  # speed up evaluating after training finished
        do_evaluation(cfg, model, distributed=args.distributed)
Пример #7
0
def run_demo(cfg, ckpt, score_threshold, images_dir, output_dir, dataset_type):
    if dataset_type == "voc":
        class_names = VOCDataset.class_names
    elif dataset_type == 'coco':
        class_names = COCODataset.class_names
    else:
        raise NotImplementedError('Not implemented now.')
    device = torch.device(cfg.MODEL.DEVICE)
    smoke_name_dic = ('__background__', '一次性快餐盒', '书籍纸张', '充电宝', '剩饭剩菜', '包',
                      '垃圾桶', '塑料器皿', '塑料玩具', '塑料衣架', '大骨头', '干电池', '快递纸袋',
                      '插头电线', '旧衣服', '易拉罐', '枕头', '果皮果肉', '毛绒玩具', '污损塑料',
                      '污损用纸', '洗护用品', '烟蒂', '牙签', '玻璃器皿', '砧板', '筷子', '纸盒纸箱',
                      '花盆', '茶叶渣', '菜帮菜叶', '蛋壳', '调料瓶', '软膏', '过期药物', '酒瓶',
                      '金属厨具', '金属器皿', '金属食品罐', '锅', '陶瓷器皿', '鞋', '食用油桶', '饮料瓶',
                      '鱼骨')

    model = build_detection_model(cfg)
    cpu_device = torch.device("cpu")
    model = model.to(device)
    checkpointer = CheckPointer(model, save_dir=cfg.OUTPUT_DIR)
    checkpointer.load(ckpt, use_latest=ckpt is None)
    weight_file = ckpt if ckpt else checkpointer.get_checkpoint_file()
    print('Loaded weights from {}'.format(weight_file))

    image_paths = glob.glob(os.path.join(images_dir, '*.jpg'))
    mkdir(output_dir)

    transforms = build_transforms(cfg, is_train=False)
    model.eval()
    miss = 0

    for i, image_path in enumerate(image_paths):
        start = time.time()
        image_name = os.path.basename(image_path)
        cv_image = cv2.imread(image_path)
        PIL_image = Image.open(image_path)

        image = np.array(Image.open(image_path).convert("RGB"))
        height, width = image.shape[:2]
        images = transforms(image)[0].unsqueeze(0)
        load_time = time.time() - start

        start = time.time()
        result = model(images.to(device))[0]
        inference_time = time.time() - start

        result = result.resize((width, height)).to(cpu_device).numpy()
        boxes, labels, scores = result['boxes'], result['labels'], result[
            'scores']

        indices = scores > score_threshold
        boxes = boxes[indices]
        labels = labels[indices]
        scores = scores[indices]

        miss = miss + (1 - len(boxes))
        meters = ' | '.join([
            'objects {:02d}'.format(len(boxes)),
            'load {:03d}ms'.format(round(load_time * 1000)),
            'inference {:03d}ms'.format(round(inference_time * 1000)),
            'FPS {}'.format(round(1.0 / inference_time))
        ])
        print('({:04d}/{:04d}) {}: {}'.format(i + 1, len(image_paths),
                                              image_name, meters))

        draw_ = ImageDraw.Draw(PIL_image)
        for c in range(len(scores)):
            text = smoke_name_dic[labels[c]]
            font = ImageFont.truetype(
                '/usr/share/fonts/truetype/arphic/uming.ttc', 40)
            draw_.text((int(boxes[c][0]) + 2, int(boxes[c][1]) - 2),
                       text, (255, 0, 0),
                       font=font)

        cv_image = cv2.cvtColor(np.asarray(PIL_image), cv2.COLOR_RGB2BGR)
        for c in range(len(scores)):
            cv2.rectangle(cv_image, (int(boxes[c][0]), int(boxes[c][1])),
                          (int(boxes[c][2]), int(boxes[c][3])), (0, 0, 255), 4)
        cv2.imwrite(os.path.join(output_dir, image_name), cv_image)
    smoke_count = len(image_paths)
    print("出现:%d 漏掉: %d 漏检率:%.2f" % (smoke_count, miss, miss / smoke_count))
Пример #8
0
def run_demo(cfg, model, score_threshold, images_dir, output_dir):
    device = torch.device(cfg.MODEL.DEVICE)
    class_names = VOCDataset.class_names
    mkdir(output_dir)

    cpu_device = torch.device("cpu")
    transforms = build_transforms(cfg, is_train=False)
    model.eval()

    start = time.time()
    image_name = os.path.basename(images_dir)

    image = np.array(Image.open(images_dir).convert("RGB"))
    height, width = image.shape[:2]
    images = transforms(image)[0].unsqueeze(0)
    load_time = time.time() - start

    start = time.time()
    result = model(images.to(device))[0]
    inference_time = time.time() - start

    result = result.resize((width, height)).to(cpu_device).numpy()
    boxes, labels, scores = result['boxes'], result['labels'], result['scores']

    indices = scores > score_threshold
    boxes = boxes[indices]
    labels = labels[indices]
    meters = ' | '.join([
        'objects {:02d}'.format(len(boxes)),
        'load {:03d}ms'.format(round(load_time * 1000)),
        'inference {:03d}ms'.format(round(inference_time * 1000)),
        'FPS {}'.format(round(1.0 / inference_time))
    ])
    print('({:04d}) {}: {}'.format(len(images_dir), image_name, meters))

    text = ['__background__']
    resDic = {}
    for j in range(len(boxes)):
        xmin = int(boxes[j, 0])
        ymin = int(boxes[j, 1])
        xmax = int(boxes[j, 2])
        ymax = int(boxes[j, 3])

        if labels[j] == 1:
            xmin += 140
            xmax -= 130
        elif labels[j] == 2:
            xmin += 130
        elif labels[j] == 4:
            xmin += 40

        hight = ymax - ymin
        width = xmax - xmin

        cropImg = image[ymin:ymin + hight, xmin:xmin + width]
        cropImg = local_threshold(cropImg)

        text_tmp = crnnOcr(Image.fromarray(cropImg))

        if labels[j] == 2:
            text_tmp = re.sub('[^\x00-\xff]', '/', text_tmp)

        text.append(text_tmp)
        resDic[class_names[labels[j]]] = text_tmp

    return json.dumps(resDic, ensure_ascii=False).encode('utf-8')
def main():
    # 解析命令行 读取配置文件
    '''
    规定了模型的基本参数,训练的类,一共是20类加上背景所以是21
    模型的输入大小,为了不对原图造成影响,一般是填充为300*300的图像
    训练的文件夹路径2007和2012,测试的文件夹路径2007
    最大迭代次数为120000.学习率还有gamma的值,总之就是一系列的超参数
    输出的文件目录
    MODEL:
        NUM_CLASSES: 21
    INPUT:
        IMAGE_SIZE: 300
    DATASETS:
        TRAIN: ("voc_2007_trainval", "voc_2012_trainval")
        TEST: ("voc_2007_test", )
    SOLVER:
        MAX_ITER: 120000
        LR_STEPS: [80000, 100000]
        GAMMA: 0.1
        BATCH_SIZE: 32
        LR: 1e-3
    OUTPUT_DIR: 'outputs/vgg_ssd300_voc0712'
    Returns:
    '''
    parser = argparse.ArgumentParser(description='Single Shot MultiBox Detector Training With PyTorch')
    parser.add_argument(
        "--config-file",
        default="configs/vgg_ssd300_voc0712.yaml",
        # default="configs/vgg_ssd300_visdrone0413.yaml",
        metavar="FILE",
        help="path to config file",
        type=str,
    )
    # 每2500步保存一次文件,并且验证一次文件,记录是每10次记录一次,然后如果不想看tensor的记录的话,可以关闭,使用的是tensorboardX
    parser.add_argument("--local_rank", type=int, default=0)
    parser.add_argument('--log_step', default=10, type=int, help='Print logs every log_step')
    parser.add_argument('--save_step', default=2500, type=int, help='Save checkpoint every save_step')
    parser.add_argument('--eval_step', default=2500, type=int, help='Evaluate dataset every eval_step, disabled when eval_step < 0')
    parser.add_argument('--use_tensorboard', default=True, type=str2bool)
    parser.add_argument(
        "--skip-test",
        dest="skip_test",
        help="Do not test the final model",
        action="store_true",
    )
    parser.add_argument(
        "opts",
        help="Modify config options using the command-line",
        default=None,
        nargs=argparse.REMAINDER,
    )
    # 参数解析,可以使用多GPU进行训练
    args = parser.parse_args()
    num_gpus = int(os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1
    args.distributed = num_gpus > 1
    args.num_gpus = num_gpus

    # 做一些启动前必要的检查
    if torch.cuda.is_available():
        # This flag allows you to enable the inbuilt cudnn auto-tuner to
        # find the best algorithm to use for your hardware.
        torch.backends.cudnn.benchmark = True
    if args.distributed:
        torch.cuda.set_device(args.local_rank)
        torch.distributed.init_process_group(backend="nccl", init_method="env://")
        synchronize()

    cfg.merge_from_file(args.config_file)
    cfg.merge_from_list(args.opts)
    cfg.freeze()

    # 创建模型输出文件夹
    if cfg.OUTPUT_DIR:
        mkdir(cfg.OUTPUT_DIR)

    # 使用logger来进行记录
    logger = setup_logger("SSD", dist_util.get_rank(), cfg.OUTPUT_DIR)
    logger.info("Using {} GPUs".format(num_gpus))
    logger.info(args)

    # 加载配置文件
    logger.info("Loaded configuration file {}".format(args.config_file))
    with open(args.config_file, "r") as cf:
        config_str = "\n" + cf.read()
        logger.info(config_str)
    logger.info("Running with config:\n{}".format(cfg))

    # 模型训练
    # model = train(cfg, args)
    model = train(cfg, args)

    # 开始进行验证
    if not args.skip_test:
        logger.info('Start evaluating...')
        torch.cuda.empty_cache()  # speed up evaluating after training finished
        do_evaluation(cfg, model, distributed=args.distributed)
Пример #10
0
def run_demo(cfg, ckpt, score_threshold, images_dir, dataset_type):
    if dataset_type == "voc":
        class_names = VOCDataset.class_names
    elif dataset_type == "pick":
        class_names = PICKDataset.class_names
    elif dataset_type == "cotb":
        class_names = COTBDataset.class_names
    elif dataset_type == 'coco':
        class_names = COCODataset.class_names
    else:
        raise NotImplementedError('Not implemented now.')
    device = torch.device(cfg.MODEL.DEVICE)

    model = build_detection_model(cfg)
    model = model.to(device)
    checkpointer = CheckPointer(model, save_dir=cfg.OUTPUT_DIR)
    checkpointer.load(ckpt, use_latest=ckpt is None)
    weight_file = ckpt if ckpt else checkpointer.get_checkpoint_file()
    print('Loaded weights from {}'.format(weight_file))

    train_epoch = weight_file.split('/')[2]
    train_epoch = train_epoch.split('.')[0].split('_')[1]
    save_path = os.path.join('demo', dataset_type, cfg.MODEL.BACKBONE.NAME,
                             train_epoch)

    image_paths = glob.glob(os.path.join(images_dir, '*.jpg')) + glob.glob(
        os.path.join(images_dir, '*.jpeg'))
    mkdir(save_path)

    cpu_device = torch.device("cpu")
    transforms = build_transforms(cfg, is_train=False)
    model.eval()
    for i, image_path in enumerate(image_paths):
        start = time.time()
        image_name = os.path.basename(image_path)

        image = np.array(Image.open(image_path).convert("RGB"))
        height, width = image.shape[:2]
        images = transforms(image)[0].unsqueeze(0)
        load_time = time.time() - start

        start = time.time()
        result = model(images.to(device))[0]
        inference_time = time.time() - start

        result = result.resize((width, height)).to(cpu_device).numpy()
        boxes, labels, scores = result['boxes'], result['labels'], result[
            'scores']

        indices = scores > score_threshold
        boxes = boxes[indices]
        labels = labels[indices]
        scores = scores[indices]
        meters = ' | '.join([
            'objects {:02d}'.format(len(boxes)),
            'load {:03d}ms'.format(round(load_time * 1000)),
            'inference {:03d}ms'.format(round(inference_time * 1000)),
            'FPS {}'.format(round(1.0 / inference_time))
        ])
        print('({:04d}/{:04d}) {}: {}'.format(i + 1, len(image_paths),
                                              image_name, meters))

        drawn_image = draw_boxes(image, boxes, labels, scores,
                                 class_names).astype(np.uint8)
        Image.fromarray(drawn_image).save(os.path.join(save_path, image_name))
Пример #11
0
def run_demo(cfg, ckpt, score_threshold, images_dir, output_dir, dataset_type, gen_heatmap):
    if dataset_type == "voc":
        class_names = VOCDataset.class_names
    elif dataset_type == 'coco':
        class_names = COCODataset.class_names
    else:
        raise NotImplementedError('Not implemented now.')

    if torch.cuda.is_available():
        device = torch.device(cfg.MODEL.DEVICE)
    else:
        device = torch.device("cpu")

    model = build_detection_model(cfg)
    model = model.to(device)
    checkpointer = CheckPointer(model, save_dir=cfg.OUTPUT_DIR)
    checkpointer.load(ckpt, use_latest=ckpt is None)
    weight_file = ckpt if ckpt else checkpointer.get_checkpoint_file()
    print('Loaded weights from {}'.format(weight_file))

    mkdir(output_dir)

    cpu_device = torch.device("cpu")
    transforms = build_transforms(cfg, is_train=False)
    model.eval()

    dist_regr_model = DistanceRegrNet(2)
    dist_regr_model = load_model_weight(dist_regr_model, device)  # load weights
    dist_regr_model.eval()
    X_scaler = load_standardizer(Standardizer())
    person_label_idx = class_names.index('person')
    centroid_tracker = CentroidTracker()

    capture = cv2.VideoCapture(0)

    while capture.isOpened():
        ret, frame = capture.read()
        single_frame_render_time = 0
        if ret:
            image = frame
            height, width = image.shape[:2]
            start_time = time.time()
            images = transforms(frame)[0].unsqueeze(0)
            result = model(images.to(device))[0]
            result = result.resize((width, height)).to(cpu_device).numpy()
            single_frame_render_time += round((time.time() - start_time) * 1000, 3)
            print(f"MobileNet SSD Inference time {round((time.time() - start_time) * 1000, 3)}ms")
            boxes, labels, scores = result['boxes'], result['labels'], result['scores']

            # remove all non person class detections
            indices = np.logical_and(scores > score_threshold,
                                     labels == person_label_idx)
            boxes = boxes[indices]
            labels = labels[indices]
            scores = scores[indices]
            distances = None

            # create gaussian mixture models and kde plots only if centers detected
            if len(boxes) != 0:
                centers = np.apply_along_axis(get_mid_point, 1, boxes)
                image = draw_points(image, centers)  # draw center points on image

                # Distance Regression
                start_time = time.time()
                # As boxes is in (xmin, ymin, xmax, ymax) format
                # X should always have width, height format
                width = boxes[:, 2] - boxes[:, 0]
                height = boxes[:, 3] - boxes[:, 1]
                X = np.column_stack((width, height))
                X_scaled = X_scaler.transform(X)
                distances = dist_regr_model(torch.Tensor(X_scaled).to(device)).to(cpu_device).numpy()
                single_frame_render_time += round((time.time() - start_time) * 1000, 3)
                print(f"Distance Regression Inference time {round(time.time() - start_time, 4) * 1000}ms")

                # object tracking with centroids
                start_time = time.time()

                objects = centroid_tracker.update(centers, distances)
                # loop over the tracked objects
                # for (objectID, centroid) in objects.items():
                #     print("Center Distances tracked overtime")
                #     print(centroid_tracker.obj_distance_counts[objectID])
                single_frame_render_time += round((time.time() - start_time) * 1000, 3)
                print(f"Centroid Tracking Update time {round(time.time() - start_time, 4) * 1000}ms")

                if len(centers) > 1:
                    # reset center point ranges to a min of 0 and max of 100
                    _x = centers[:, 0]
                    _y = centers[:, 1]
                    centers[:, 0] = reset_range(max(_x), min(_x), 100, 0, _x)
                    centers[:, 1] = reset_range(max(_y), min(_y), 100, 0, _y)

                # DBSCAN Clustering
                start_time = time.time()
                dbscan_center = DBSCAN(eps=18)
                dbscan_center.fit(centers)

                # print("DBSCAN Clusters", dbscan_center._labels)
                # print("Unique number of clusters", len(set(dbscan_center._labels)))
                single_frame_render_time += round((time.time() - start_time) * 1000, 3)
                print(f"DBSCAN Clustering time {round((time.time() - start_time) * 1000, 3)}ms")

                if gen_heatmap:
                    image = generate_cv2_heatmap(centers, dbscan_center._labels, None, None,
                                                 len(set(dbscan_center._labels)),
                                                 covariance_type='diag')
                    cv2.imshow("frame", image)

            if not gen_heatmap:
                drawn_image = draw_boxes(image, boxes, labels, scores, distances, class_names).astype(np.uint8)
                cv2.imshow("frame", drawn_image)

            print(f"Total time to render one frame {single_frame_render_time}." +
                  f"FPS {round(1 / (single_frame_render_time / 1000))}")

            key = cv2.waitKey(1)
            if key & 0xFF == ord('x'):
                break
        else:
            break

    print("Distance counts for tracked objects")
    print(centroid_tracker.obj_distance_counts)

    write_file = f'{output_dir}/dist_regr_results/{round(time.time())}.txt'
    print(f"Writing the distance values to file {write_file}")
    os.makedirs(f'{output_dir}/dist_regr_results', exist_ok=True)
    with open(write_file, 'w') as fw:
        for key, arr in centroid_tracker.obj_distance_counts.items():
            arr = [str(v) for v in arr]
            fw.write(str(key) + ',' + ','.join(arr))
            fw.write('\n')

    capture.release()
    cv2.destroyAllWindows()
Пример #12
0
def run_demo(cfg,
             ckpt,
             score_threshold,
             images_dir,
             output_dir,
             dataset_type,
             model_path=None):
    if dataset_type == "voc":
        class_names = VOCDataset.class_names
    elif dataset_type == 'coco':
        class_names = COCODataset.class_names
    else:
        class_names = TxtDataset(dataset_name=dataset_type).class_names
    # else:
    #     raise NotImplementedError('Not implemented now.')
    device = torch.device(cfg.MODEL.DEVICE)

    model = build_detection_model(cfg)
    model = model.to(device)
    checkpointer = CheckPointer(model, save_dir=cfg.OUTPUT_DIR)
    checkpointer.load(ckpt, use_latest=ckpt is None)
    ##
    # model.backbone.bn_fuse()#需要修改demo.py   要bn_fuse   因为fpga端没有bn
    # model.to(device)
    # ##
    if model_path is None:
        checkpointer.load(ckpt, use_latest=ckpt is None)
        weight_file = ckpt if ckpt else checkpointer.get_checkpoint_file()
        print('Loaded weights from {}'.format(weight_file))
    else:
        model.load_state_dict(torch.load(model_path))

    if cfg.TEST.BN_FUSE is True:
        print('BN_FUSE.')
        model.backbone.bn_fuse()
        model.to(device)
    image_paths = glob.glob(os.path.join(images_dir, '*.jpg'))  #.png
    mkdir(output_dir)

    cpu_device = torch.device("cpu")
    transforms = build_transforms(cfg, is_train=False)
    model.eval()
    for i, image_path in enumerate(image_paths):
        start = time.time()
        image_name = os.path.basename(image_path)

        image = np.array(Image.open(image_path).convert("RGB"))
        height, width = image.shape[:2]
        images = transforms(image)[0].unsqueeze(0)
        load_time = time.time() - start

        start = time.time()
        result = model(images.to(device))[0]
        inference_time = time.time() - start

        result = result.resize((width, height)).to(cpu_device).numpy()
        boxes, labels, scores = result['boxes'], result['labels'], result[
            'scores']

        indices = scores > score_threshold
        boxes = boxes[indices]
        labels = labels[indices]
        scores = scores[indices]
        meters = ' | '.join([
            'objects {:02d}'.format(len(boxes)),
            'load {:03d}ms'.format(round(load_time * 1000)),
            'inference {:03d}ms'.format(round(inference_time * 1000)),
            'FPS {}'.format(round(1.0 / inference_time))
        ])
        print('({:04d}/{:04d}) {}: {}'.format(i + 1, len(image_paths),
                                              image_name, meters))

        drawn_image = draw_boxes(image, boxes, labels, scores,
                                 class_names).astype(np.uint8)
        Image.fromarray(drawn_image).save(os.path.join(output_dir, image_name))
Пример #13
0
def run_demo(cfg, ckpt, score_threshold, images_dir, output_dir, dataset_type):
    if dataset_type == "voc":
        class_names = VOCDataset.class_names
    elif dataset_type == 'coco':
        class_names = COCODataset.class_names
    else:
        raise NotImplementedError('Not implemented now.')

    if torch.cuda.is_available():
        device = torch.device(cfg.MODEL.DEVICE)
    else:
        device = torch.device("cpu")

    model = build_detection_model(cfg)
    model = model.to(device)
    checkpointer = CheckPointer(model, save_dir=cfg.OUTPUT_DIR)
    checkpointer.load(ckpt, use_latest=ckpt is None)
    weight_file = ckpt if ckpt else checkpointer.get_checkpoint_file()
    print('Loaded weights from {}'.format(weight_file))

    images_dir = 'datasets/MOT16/train/MOT16-02/img1'
    image_paths = sorted(glob.glob(os.path.join(images_dir, '*.jpg')))
    mkdir(output_dir)

    cpu_device = torch.device("cpu")
    transforms = build_transforms(cfg, is_train=False)
    model.eval()

    person_label_idx = class_names.index('person')
    centroid_tracker = CentroidTracker()
    wfile = open('py-motmetrics/motmetrics/data/MOT16/predicted/MOT16-02.txt',
                 'w')
    inference_times = []

    for i, image_path in enumerate(image_paths):
        image_name = os.path.basename(image_path)
        start_time = time.time()
        image = np.array(Image.open(image_path).convert("RGB"))
        height, width = image.shape[:2]
        images = transforms(image)[0].unsqueeze(0)
        result = model(images.to(device))[0]

        result = result.resize((width, height)).to(cpu_device).numpy()
        boxes, labels, scores = result['boxes'], result['labels'], result[
            'scores']

        # remove all non person class detections
        indices = np.logical_and(scores > score_threshold,
                                 labels == person_label_idx)
        boxes = boxes[indices]
        distances = None

        inference_times.append(time.time() - start_time)
        print(time.time() - start_time)

        if len(boxes) != 0:
            centers = np.apply_along_axis(get_mid_point, 1, boxes)

            # object tracking with centroids
            centroid_tracker.update(centers, distances, boxes)

            fnum = int(image_name.split('.')[0])
            # loop over the tracked objects
            for (objID, bbox_) in centroid_tracker.obj_bbox.items():
                xm, ym = bbox_[0], bbox_[1]
                w, h = bbox_[2] - bbox_[0], bbox_[3] - bbox_[1]
                output = f"{fnum},{objID},{xm},{ym},{w},{h},-1,-1,-1\n"
                wfile.write(output)

            # drawn_image = draw_boxes(image, boxes, labels, scores, distances, class_names).astype(np.uint8)
            # Image.fromarray(drawn_image).save(os.path.join(output_dir, image_name))

    framerates = [1 / tm for tm in inference_times]
    print(
        f"Avg frame rate is {sum(framerates) / len(framerates)} for {len(framerates)} frames"
    )
    wfile.close()
Пример #14
0
def run_demo(cfg, ckpt, score_threshold, images_dir, output_dir, dataset_type):

    if dataset_type == "voc":

        class_names = VOCDataset.class_names

    elif dataset_type == 'coco':

        class_names = COCODataset.class_names

    else:

        raise NotImplementedError('Not implemented now.')

    device = torch.device(cfg.MODEL.DEVICE)



    model = build_detection_model(cfg)

    model = model.to(device)

    checkpointer = CheckPointer(model, save_dir=cfg.OUTPUT_DIR)

    checkpointer.load(ckpt, use_latest=ckpt is None)

    weight_file = ckpt if ckpt else checkpointer.get_checkpoint_file()

    print('Loaded weights from {}'.format(weight_file))



    image_paths = glob.glob(os.path.join(images_dir, '*.jpg'))

    mkdir(output_dir)



    cpu_device = torch.device("cpu")

    transforms = build_transforms(cfg, is_train=False)

    model.eval()
    _t = {'im_detect': Timer()}
    timer = Timer()
    timer.tic()

    inference_time_list=[]
    load_time_list = []
    
    for image_path in image_paths:

        start = time.time()


        image_name = os.path.basename(image_path)



        image = np.array(Image.open(image_path).convert("RGB"))

        height, width = image.shape[:2]
        
        
        images = transforms(image)[0].unsqueeze(0)
        load_time = time.time() - start
        load_time_list.append(1000*load_time)
        _t['im_detect'].tic()

        #start = time.time()
        
        #print('1')
        result = model(images.to(device))[0]

        #print('2')
    


        result = result.resize((width, height)).to(cpu_device).numpy()

        boxes, labels, scores = result['boxes'], result['labels'], result['scores']



        indices = scores > score_threshold

        boxes = boxes[indices]

        labels = labels[indices]

        scores = scores[indices]
        
        #inference_time = time.time() - start
        inference_time = _t['im_detect'].toc()
        #print(1000*(inference_time))
        
        inference_time_list.append(1000*inference_time)

        meters = ' | '.join(

            [

                'objects {:02d}'.format(len(boxes)),

                'load {:03d}ms'.format(round(load_time * 1000)),

                'inference {:03d}ms'.format(round(inference_time * 1000)),

                'FPS {}'.format(round(1.0 / inference_time))

            ]

        )

       # print('({:04d}/{:04d}) {}: {}'.format(i + 1, len(image_paths), image_name, meters))



        #drawn_image = draw_boxes(image, boxes, labels, scores, class_names).astype(np.uint8)

        #Image.fromarray(drawn_image).save(os.path.join(output_dir, image_name))
        _t['im_detect'].clear()

    
    
    N = len(inference_time_list)//2
    total_time_list = np.array(inference_time_list) + np.array(load_time_list)
    
     
    total_time_list.sort()
    inference_time_list.sort()
    
    det_time = np.mean(total_time_list[:N])#/BATCH_SIZE
    best_det_time = np.min(total_time_list)#/BATCH_SIZE
    
    print("Total test time: %.2f s" % (timer.toc()))
    print("\nTotal detection speed: %.1f FPS" % (len(inference_time_list)/timer.toc()))
    print("\nAvg detection speed: %.1f FPS" % (1000./det_time))
    print("Best detection speed: %.1f FPS" % (1000./best_det_time))
Пример #15
0
def run_demo(cfg, ckpt, score_threshold, images_dir, output_dir, dataset_type):
    if dataset_type == "voc":
        class_names = VOCDataset.class_names
    elif dataset_type == 'coco':
        class_names = COCODataset.class_names
    else:
        raise NotImplementedError('Not implemented now.')
    device = torch.device(cfg.MODEL.DEVICE)
    cpu_device = torch.device("cpu")
    model = build_detection_model(cfg)
    model = model.to(cpu_device)
    checkpointer = CheckPointer(model, save_dir=cfg.OUTPUT_DIR)
    checkpointer.load(ckpt, use_latest=ckpt is None)
    weight_file = ckpt if ckpt else checkpointer.get_checkpoint_file()
    print('Loaded weights from {}'.format(weight_file))

    image_paths = glob.glob(os.path.join(images_dir, '*.jpg'))
    mkdir(output_dir)

    transforms = build_transforms(cfg, is_train=False)
    model.eval()
    for i, image_path in enumerate(image_paths):
        start = time.time()
        image_name = os.path.basename(image_path)

        image = np.array(Image.open(image_path).convert("RGB"))
        height, width = image.shape[:2]
        images = transforms(image)[0].unsqueeze(0)
        load_time = time.time() - start

        start = time.time()
        result = model(images.to(cpu_device))[0]
        inference_time = time.time() - start

        result = result.resize((width, height)).numpy()
        boxes, labels, scores = result['boxes'], result['labels'], result[
            'scores']

        indices = scores > score_threshold
        boxes = boxes[indices]
        labels = labels[indices]
        scores = scores[indices]
        meters = ' | '.join([
            'objects {:02d}'.format(len(boxes)),
            'load {:03d}ms'.format(round(load_time * 1000)),
            'inference {:03d}ms'.format(round(inference_time * 1000)),
            'FPS {}'.format(round(1.0 / inference_time))
        ])
        print('({:04d}/{:04d}) {}: {}'.format(i + 1, len(image_paths),
                                              image_name, meters))
        for i in range(len(labels)):
            text = str(label_name[labels[i]]) + str(round(scores[i], 2))
            cv2.rectangle(image, tuple(boxes[i][:2]), tuple(boxes[i][2:]),
                          color, 3)
            image = Image.fromarray(image)
            draw = ImageDraw.Draw(image)
            draw.text(tuple([boxes[i][0], boxes[i][1] - 40]),
                      text,
                      color,
                      font=fontStyle)
            image = np.asarray(image)
        cv2.imshow('drawn_image', image)
        # drawn_image = draw_boxes(image, boxes, labels, scores, class_names).astype(np.uint8)
        Image.fromarray(image).save(os.path.join(output_dir, image_name))
Пример #16
0
def run_demo(cfg, ckpt, score_threshold, images_dir, output_dir, dataset_type,
             gen_heatmap):
    if dataset_type == "voc":
        class_names = VOCDataset.class_names
    elif dataset_type == 'coco':
        class_names = COCODataset.class_names
    else:
        raise NotImplementedError('Not implemented now.')

    if torch.cuda.is_available():
        device = torch.device(cfg.MODEL.DEVICE)
    else:
        device = torch.device("cpu")

    model = build_detection_model(cfg)
    model = model.to(device)
    checkpointer = CheckPointer(model, save_dir=cfg.OUTPUT_DIR)
    checkpointer.load(ckpt, use_latest=ckpt is None)
    weight_file = ckpt if ckpt else checkpointer.get_checkpoint_file()
    print('Loaded weights from {}'.format(weight_file))

    image_paths = glob.glob(os.path.join(images_dir, '*.jpg'))
    mkdir(output_dir)

    cpu_device = torch.device("cpu")
    transforms = build_transforms(cfg, is_train=False)
    model.eval()

    dist_regr_model = DistanceRegrNet(2)
    dist_regr_model = load_model_weight(dist_regr_model,
                                        device)  # load weights
    dist_regr_model.eval()
    X_scaler = load_standardizer(Standardizer())

    person_label_idx = class_names.index('person')

    for i, image_path in enumerate(image_paths):
        start = time.time()
        image_name = os.path.basename(image_path)

        image = np.array(Image.open(image_path).convert("RGB"))
        height, width = image.shape[:2]
        images = transforms(image)[0].unsqueeze(0)
        load_time = time.time() - start

        start = time.time()
        result = model(images.to(device))[0]
        inference_time = time.time() - start

        result = result.resize((width, height)).to(cpu_device).numpy()
        boxes, labels, scores = result['boxes'], result['labels'], result[
            'scores']

        # remove all non person class detections
        indices = np.logical_and(scores > score_threshold,
                                 labels == person_label_idx)
        boxes = boxes[indices]
        labels = labels[indices]
        scores = scores[indices]
        distances = None

        # create gaussian mixture models and kde plots only if centers detected
        if len(boxes) != 0:
            centers = np.apply_along_axis(get_mid_point, 1, boxes)
            image = draw_points(image, centers)  # draw center points on image

            # reset center point ranges to a min of 0 and max of 100
            _x = centers[:, 0]
            _y = centers[:, 1]
            centers[:, 0] = reset_range(max(_x), min(_x), 100, 0, _x)
            centers[:, 1] = reset_range(max(_y), min(_y), 100, 0, _y)

            # DBSCAN Clustering
            start = time.time()
            dbscan_center = DBSCAN(eps=18)
            dbscan_center.fit(centers)
            # print("dbscan clusters", dbscan_center._labels)
            # print("Unique number of clusters", len(set(dbscan_center._labels)))
            print(
                f"DBSCAN clustering time {round((time.time() - start) * 1000, 3)}ms"
            )

            # Distance Regression
            start_time = time.time()
            # As boxes is in (xmin, ymin, xmax, ymax) format
            # X should always have width, height format
            width = boxes[:, 2] - boxes[:, 0]
            height = boxes[:, 3] - boxes[:, 1]
            X = np.column_stack((width, height))
            X_scaled = X_scaler.transform(X)
            distances = dist_regr_model(torch.Tensor(X_scaled).to(device))
            print(
                f"Distance Regr Inference time {round(time.time() - start_time, 4) * 1000}ms"
            )

            if gen_heatmap:
                generate_sns_kde_heatmap(centers[:, 0], centers[:, 1], i,
                                         image_name)

                generate_sk_gaussian_mixture(centers,
                                             dbscan_center._labels,
                                             i,
                                             image_name,
                                             len(set(dbscan_center._labels)),
                                             covariance_type='diag')

                generate_cv2_heatmap(centers,
                                     dbscan_center._labels,
                                     i,
                                     image_name,
                                     len(set(dbscan_center._labels)),
                                     covariance_type='diag')

        meters = ' | '.join([
            'objects {:02d}'.format(len(boxes)),
            'load {:03d}ms'.format(round(load_time * 1000)),
            'inference {:03d}ms'.format(round(inference_time * 1000)),
            'FPS {}'.format(round(1.0 / inference_time))
        ])
        print('({:04d}/{:04d}) {}: {}'.format(i + 1, len(image_paths),
                                              image_name, meters))

        drawn_image = draw_boxes(image, boxes, labels, scores, distances,
                                 class_names).astype(np.uint8)
        Image.fromarray(drawn_image).save(os.path.join(output_dir, image_name))