def test_detection_model_output_shape(model_name, dataset_name, datasplit_kwargs, output_shapes, mock_dataset_path): model = get_model_by_name( model_name=model_name, dataset_name=dataset_name, pretrained=True, progress=False, device="cpu", ) if model_name in MODEL_NAME_DATASPLIT_FN_ARG_MAP: model_name = MODEL_NAME_DATASPLIT_FN_ARG_MAP[model_name] train_loader = get_data_splits_by_name( data_root=mock_dataset_path, dataset_name=dataset_name, model_name=model_name, batch_size=TEST_BATCH_SIZE, num_workers=0, device="cpu", **datasplit_kwargs, )["train"] if 'yolo' in model_name: dataset = train_loader.dataset img, _, _, _ = dataset[0] y = model(torch.unsqueeze(img, dim=0)) assert y[0].shape == (1, *output_shapes[0]) assert y[1].shape == (1, *output_shapes[1]) assert y[2].shape == (1, *output_shapes[2]) else: img, _, _ = next(iter(train_loader)) model.eval() y1, y2 = model(img) assert y1.shape == (TEST_BATCH_SIZE, *output_shapes[0]) assert y2.shape == (TEST_BATCH_SIZE, *output_shapes[1])
def test_segmentation_model_output_shape(model_name, dataset_name, datasplit_kwargs, output_shape): model = get_model_by_name( model_name=model_name, dataset_name=dataset_name, pretrained=True, progress=False, device="cpu", ) if model_name in MODEL_NAME_DATASPLIT_FN_ARG_MAP: model_name = MODEL_NAME_DATASPLIT_FN_ARG_MAP[model_name] test_loader = get_data_splits_by_name( data_root=MOCK_DATASETS_PATH if 'voc' in dataset_name else MOCK_CARVANA_PATH, dataset_name=dataset_name, model_name=model_name, num_workers=0, device="cpu", **datasplit_kwargs, )["test"] dataset = test_loader.dataset if 'unet' in model_name: img, msk, _ = dataset[0] else: img, msk = dataset[0] model.eval() y = model(torch.unsqueeze(img, dim=0)) assert y.shape == (*output_shape, *msk.shape)
def test_mb2_ssd_coco_6(self): model = get_model_by_name( model_name="mb2_ssd", dataset_name="coco_gm_6", pretrained=True, progress=False, ) test_loader = get_data_splits_by_name( data_root="/home/ehsan/data/", dataset_name="coco_gm", model_name="mb2_ssd", batch_size=32, train_ann_file="train_data_COCO.json", train_dir="images/train", val_ann_file="test_data_COCO.json", val_dir="images/test", classes=[ "class1", "class2", "class3", "class4", "class5", "class6" ], )["test"] cocoGt = COCO("/home/ehsan/data/test_data_COCO.json") eval_fn = get_eval_function("mb2_ssd", "coco_gm") APs = eval_fn( model, test_loader, gt=cocoGt, _set="coco", ) self.assertEqual(abs(APs["mAP"] - 0.227) < 0.001, True)
def test_mb2_ssd_coco_80(self): model = get_model_by_name( model_name="mb2_ssd", dataset_name="coco_80", pretrained=True, progress=False, ) from deeplite_torch_zoo.src.objectdetection.datasets.coco_config import ( DATA, MISSING_IDS, ) test_loader = get_data_splits_by_name( data_root="/neutrino/datasets/coco2017/", dataset_name="coco", model_name="mb2_ssd", batch_size=32, missing_ids=MISSING_IDS, classes=DATA["CLASSES"], )["test"] cocoGt = COCO( "/neutrino/datasets/coco2017/annotations/instances_val2017.json") eval_fn = get_eval_function("mb2_ssd", "coco_80") APs = eval_fn( model, test_loader, gt=cocoGt, _set="coco", ) print(APs) self.assertEqual(abs(APs["mAP"] - 0.138) < 0.001, True)
def test_yolov5_6s_voc(self): model = get_model_by_name( model_name="yolo5_6s", dataset_name="voc_20", pretrained=True, progress=False, ) eval_fn = get_eval_function("yolo5_6s", "voc_20") APs = eval_fn(model, "/neutrino/datasets//VOCdevkit/VOC2007/", _set="voc") print(APs) self.assertEqual(abs(APs["mAP"] - 0.821) < 0.001, True)
def test_classification_model_output_shape(model_name, dataset_name, input_resolution, num_inp_channels, target_output_shape): model = get_model_by_name( model_name=model_name, dataset_name=dataset_name, pretrained=True, progress=False, device="cpu", ) y = model( torch.randn(TEST_BATCH_SIZE, num_inp_channels, input_resolution, input_resolution)) assert y.shape == (TEST_BATCH_SIZE, target_output_shape)
def test_mb3_small_vww(self): model = get_model_by_name( model_name="mobilenetv3_small", dataset_name="vww", pretrained=True, progress=False, ) test_loader = get_data_splits_by_name( data_root="/neutrino/datasets/vww", dataset_name="vww", batch_size=128, )["test"] eval_fn = get_eval_function("mobilenetv3_small", "vww") ACC = eval_fn(model, test_loader) self.assertEqual(abs(ACC["acc"] - 0.892) < 0.001, True)
def test_mb2_ssd_voc_20(self): model = get_model_by_name( model_name="mb2_ssd", dataset_name="voc_20", pretrained=True, progress=False, ) test_loader = get_data_splits_by_name( data_root="/neutrino/datasets/VOCdevkit", dataset_name="voc", model_name="mb2_ssd_lite", batch_size=32, )["test"] eval_fn = get_eval_function("mb2_ssd", "voc_20") APs = eval_fn(model, test_loader) self.assertEqual(abs(APs["mAP"] - 0.443) < 0.001, True)
def test_resnet50_tinyimagenet(self): model = get_model_by_name( model_name="resnet50", dataset_name="tinyimagenet", pretrained=True, progress=False, ) test_loader = get_data_splits_by_name( data_root="/neutrino/datasets/TinyImageNet/", dataset_name="tinyimagenet", batch_size=128, num_workers=0, )["val"] eval_fn = get_eval_function("resnet50", "tinyimagenet") ACC = eval_fn(model, test_loader) print(ACC) self.assertEqual(abs(ACC["acc"] - 0.730) < 0.001, True)
def test_vgg16_ssd_wider_face(self): model = get_model_by_name( model_name="vgg16_ssd", dataset_name="wider_face", pretrained=True, progress=False, ) test_loader = get_data_splits_by_name( data_root="/neutrino/datasets/wider_face", dataset_name="wider_face", model_name="vgg16_ssd", batch_size=8, )["test"] eval_fn = get_eval_function("vgg16_ssd", "wider_face") APs = eval_fn(model, test_loader) print(APs) self.assertEqual(abs(APs["mAP"] - 0.7071) < 0.001, True)
def test_unet_scse_resnet18_carvana(self): model = get_model_by_name( model_name="unet_scse_resnet18", dataset_name="carvana", pretrained=True, progress=False, ) test_loader = get_data_splits_by_name( data_root="/neutrino/datasets/carvana", dataset_name="carvana", model_name="unet", num_workers=1, )["test"] eval_fn = get_eval_function("unet_scse_resnet18", "carvana") acc = eval_fn(model, test_loader, net="unet_scse_resnet18") miou = acc["miou"] print(miou) self.assertEqual(abs(miou - 0.989) < 0.001, True)
def test_unet_carvana(self): model = get_model_by_name( model_name="unet", dataset_name="carvana", pretrained=True, progress=False, ) test_loader = get_data_splits_by_name( data_root="/neutrino/datasets/carvana", dataset_name="carvana", model_name="unet", num_workers=1, )["test"] eval_fn = get_eval_function("unet", "carvana") acc = eval_fn(model, test_loader, net="unet") dc = acc["dice_coeff"] print(dc) self.assertEqual(abs(dc - 0.983) < 0.001, True)
def test_fasterrcnn_resnet50_fpn_coco(self): model = get_model_by_name( model_name="fasterrcnn_resnet50_fpn", dataset_name="coco_80", pretrained=True, progress=False, ) test_loader = get_data_splits_by_name( data_root="/neutrino/datasets/coco2017/", dataset_name="coco", model_name="fasterrcnn_resnet50_fpn", batch_size=32, )["test"] cocoGt = COCO( "/neutrino/datasets/coco2017/annotations/instances_val2017.json") eval_fn = get_eval_function("fasterrcnn_resnet50_fpn", "coco_80") APs = eval_fn(model, test_loader, gt=cocoGt) self.assertEqual(abs(APs["mAP"] - 0.369) < 0.001, True)
def test_deeplab_mobilenet_voc_20(self): model = get_model_by_name( model_name="deeplab_mobilenet", dataset_name="voc_20", pretrained=True, progress=False, ) test_loader = get_data_splits_by_name( data_root="/neutrino/datasets", sbd_root=None, dataset_name="voc", model_name="deeplab_mobilenet", num_workers=2, backbone="vgg", )["test"] eval_fn = get_eval_function("deeplab_mobilenet", "voc_20") acc = eval_fn(model, test_loader, net="deeplab") miou = acc["miou"] print(miou) self.assertEqual(abs(miou - 0.571) < 0.001, True)
def test_fcn32_voc_20(self): model = get_model_by_name( model_name="fcn32", dataset_name="voc_20", pretrained=True, progress=False, ) test_loader = get_data_splits_by_name( data_root="/neutrino/datasets", dataset_name="voc", model_name="fcn32", num_workers=1, batch_size=1, backbone="vgg", )["test"] eval_fn = get_eval_function("fcn32", "voc_20") acc = eval_fn(model, test_loader, net="fcn32") miou = acc["miou"] print(miou) self.assertEqual(abs(miou - 0.713) < 0.001, True)
def main(): # Training settings parser = argparse.ArgumentParser(description="PyTorch training Example") parser.add_argument( "--batch-size", type=int, default=64, metavar="N", help="input batch size for training (default: 64)", ) parser.add_argument("--dataset", metavar="DATASET", default="cifar100", help="dataset to use") parser.add_argument( "-j", "--workers", type=int, metavar="N", default=4, help="number of data loading workers", ) parser.add_argument("-r", "--data_root", metavar="PATH", default="", help="dataset data root path") parser.add_argument( "--test-batch-size", type=int, default=1000, metavar="N", help="input batch size for testing (default: 1000)", ) parser.add_argument( "--epochs", type=int, default=14, metavar="N", help="number of epochs to train (default: 14)", ) parser.add_argument( "--lr", type=float, default=0.1, metavar="LR", help="learning rate (default: 1.0)", ) parser.add_argument( "--gamma", type=float, default=0.7, metavar="M", help="Learning rate step gamma (default: 0.7)", ) parser.add_argument( "--log-interval", type=int, default=100, metavar="N", help="how many batches to wait before logging training status", ) parser.add_argument('-a', '--arch', metavar='ARCH', default='vgg19', help='model architecture') args = parser.parse_args() device = torch.device("cuda") data_splits = get_data_splits_by_name( dataset_name=args.dataset, data_root=args.data_root, batch_size=args.batch_size, num_torch_workers=args.workers, ) model = get_model_by_name(model_name=args.arch, dataset_name=args.dataset, pretrained=True, progress=True, device=device) model.to(device) optimizer = optim.SGD(model.parameters(), lr=args.lr) criterion = CrossEntropyLoss() scheduler = StepLR(optimizer, step_size=1, gamma=args.gamma) for epoch in range(1, args.epochs + 1): train(args, model, device, data_splits["train"], optimizer, criterion, epoch) test(model, device, data_splits["test"]) scheduler.step() torch.save(model.state_dict(), "{}_checkpoint.pt".format(args.arch))