Esempio n. 1
0
def build_dataset(args, train_size, val_size, device):
    if args.dataset == 'voc':
        data_dir = os.path.join(args.root, 'VOCdevkit')
        num_classes = 20
        dataset = VOCDetection(data_dir=data_dir,
                               img_size=train_size,
                               transform=TrainTransforms(train_size),
                               color_augment=ColorTransforms(train_size),
                               mosaic=args.mosaic)

        evaluator = VOCAPIEvaluator(data_dir=data_dir,
                                    img_size=val_size,
                                    device=device,
                                    transform=ValTransforms(val_size))

    elif args.dataset == 'coco':
        data_dir = os.path.join(args.root, 'COCO')
        num_classes = 80
        dataset = COCODataset(data_dir=data_dir,
                              img_size=train_size,
                              image_set='train2017',
                              transform=TrainTransforms(train_size),
                              color_augment=ColorTransforms(train_size),
                              mosaic=args.mosaic)

        evaluator = COCOAPIEvaluator(data_dir=data_dir,
                                     img_size=val_size,
                                     device=device,
                                     transform=ValTransforms(val_size))

    else:
        print('unknow dataset !! Only support voc and coco !!')
        exit(0)

    return dataset, evaluator, num_classes
Esempio n. 2
0
def voc_test(model, data_dir, device, img_size):
    evaluator = VOCAPIEvaluator(data_root=data_dir,
                                img_size=img_size,
                                device=device,
                                transform=ValTransforms(img_size),
                                display=True
                                )

    # VOC evaluation
    evaluator.evaluate(model)
Esempio n. 3
0
def coco_test(model, data_dir, device, img_size, test=False):
    if test:
        # test-dev
        print('test on test-dev 2017')
        evaluator = COCOAPIEvaluator(
                        data_dir=data_dir,
                        img_size=img_size,
                        device=device,
                        testset=True,
                        transform=ValTransforms(img_size)
                        )

    else:
        # eval
        evaluator = COCOAPIEvaluator(
                        data_dir=data_dir,
                        img_size=img_size,
                        device=device,
                        testset=False,
                        transform=ValTransforms(img_size)
                        )

    # COCO evaluation
    evaluator.evaluate(model)
Esempio n. 4
0
def run():
    args = parse_args()

    # use cuda
    if args.cuda:
        device = torch.device("cuda")
    else:
        device = torch.device("cpu")

    # build model
    if args.version == 'yolo_nano':
        from models.yolo_nano import YOLONano
        backbone = '1.0x'
        model = YOLONano(device=device,
                         input_size=args.img_size,
                         num_classes=80,
                         anchor_size=config.MULTI_ANCHOR_SIZE_COCO,
                         backbone=backbone)

    else:
        print('Unknown version !!!')
        exit()

    # load weight
    model.load_state_dict(torch.load(args.trained_model, map_location=device),
                          strict=False)
    model.to(device).eval()
    print('Finished loading model!')

    # run
    detect(net=model,
           device=device,
           transform=ValTransforms(args.img_size),
           mode=args.mode,
           path_to_img=args.path_to_img,
           path_to_vid=args.path_to_vid,
           path_to_save=args.path_to_save,
           thresh=args.visual_threshold)
Esempio n. 5
0
        exit(0)

    # build model
    model = yolo_net(device=device,
                     input_size=args.img_size,
                     num_classes=num_classes,
                     trainable=False,
                     conf_thresh=args.conf_thresh,
                     nms_thresh=args.nms_thresh,
                     anchor_size=anchor_size)

    # load weight
    if args.trained_model:
        model.load_state_dict(
            torch.load(args.trained_model, map_location=device))
        print('Finished loading model!')
    else:
        print('The path to trained_model file is None !')
        exit(0)
    model = model.to(device).eval()

    # fuse conv bn
    if args.fuse_conv_bn:
        model = fuse_conv_bn(model)

    # run
    test(net=model,
         device=device,
         testset=dataset,
         transform=ValTransforms(img_size))
Esempio n. 6
0
                       anchor_size=anchor_size,
                       backbone=backbone)
        print('Let us train yolo_nano ......')

    else:
        print('Unknown version !!!')
        exit()

    # load weight
    net.load_state_dict(torch.load(args.trained_model, map_location=device))
    net.to(device).eval()
    print('Finished loading model!')

    # TTA
    test_aug = TestTimeAugmentation(
        num_classes=num_classes) if args.test_aug else None

    # run
    test(args=args,
         net=net,
         device=device,
         dataset=dataset,
         transforms=ValTransforms(args.img_size),
         vis_thresh=args.visual_threshold,
         class_colors=class_colors,
         class_names=class_names,
         class_indexs=class_indexs,
         show=args.show,
         test_aug=test_aug,
         dataset_name=args.dataset)