def run(): args = parse_args() # use cuda if args.cuda: device = torch.device("cuda") else: device = torch.device("cpu") input_size = [args.input_size, args.input_size] # load net if args.version == 'centernet': from models.centernet import CenterNet net = CenterNet(device, input_size=input_size, num_classes=80, conf_thresh=args.conf_thresh, nms_thresh=args.nms_thresh, use_nms=args.use_nms) net.load_state_dict(torch.load(args.trained_model, map_location='cuda')) net.to(device).eval() print('Finished loading model!') # run if args.mode == 'camera': detect(net, device, BaseTransform(net.input_size), thresh=args.visual_threshold, mode=args.mode) elif args.mode == 'image': detect(net, device, BaseTransform(net.input_size), thresh=args.visual_threshold, mode=args.mode, path_to_img=args.path_to_img) elif args.mode == 'video': detect(net, device, BaseTransform(net.input_size), thresh=args.visual_threshold, mode=args.mode, path_to_vid=args.path_to_vid, path_to_save=args.path_to_saveVid)
def test(): # get device if args.cuda: print('use cuda') cudnn.benchmark = True device = torch.device("cuda") else: device = torch.device("cpu") # load net num_classes = len(VOC_CLASSES) testset = VOCDetection(args.voc_root, [('2007', 'test')], None, VOCAnnotationTransform()) cfg = config.voc_cfg if args.version == 'centernet': from models.centernet import CenterNet net = CenterNet(device, input_size=cfg['min_dim'], num_classes=num_classes) net.load_state_dict(torch.load(args.trained_model, map_location=device)) net.to(device).eval() print('Finished loading model!') # evaluation test_net(net, device, testset, BaseTransform(net.input_size, mean=(0.406, 0.456, 0.485), std=(0.225, 0.224, 0.229)), thresh=args.visual_threshold)
def run(): args = parse_args() if args.cuda: device = torch.device("cuda") else: device = torch.device("cpu") if args.setup == 'VOC': print('use VOC style') cfg = config.voc_cfg num_classes = 20 elif args.setup == 'COCO': print('use COCO style') cfg = config.coco_cfg num_classes = 80 else: print('Only support VOC and COCO !!!') exit(0) if args.version == 'centernet': from models.centernet import CenterNet net = CenterNet(device, input_size=cfg['min_dim'], num_classes=num_classes, use_nms=True) net.load_state_dict(torch.load(args.trained_model, map_location=device)) net.to(device).eval() print('Finished loading model!') # run if args.mode == 'camera': detect(net, device, BaseTransform(net.input_size, mean=(0.406, 0.456, 0.485), std=(0.225, 0.224, 0.229)), thresh=args.vis_thresh, mode=args.mode, setup=args.setup) elif args.mode == 'image': detect(net, device, BaseTransform(net.input_size, mean=(0.406, 0.456, 0.485), std=(0.225, 0.224, 0.229)), thresh=args.vis_thresh, mode=args.mode, path_to_img=args.path_to_img, setup=args.setup) elif args.mode == 'video': detect(net, device, BaseTransform(net.input_size, mean=(0.406, 0.456, 0.485), std=(0.225, 0.224, 0.229)), thresh=args.vis_thresh, mode=args.mode, path_to_vid=args.path_to_vid, path_to_save=args.path_to_saveVid, setup=args.setup)
def test(): # get device if args.cuda: print('use cuda') cudnn.benchmark = True device = torch.device("cuda") else: device = torch.device("cpu") # load net num_classes = 80 if args.dataset == 'COCO': cfg = config.coco_cfg testset = COCODataset( data_dir=args.dataset_root, json_file='instances_val2017.json', name='val2017', img_size=cfg['min_dim'][0], debug=args.debug) elif args.dataset == 'VOC': cfg = config.voc_cfg testset = VOCDetection(VOC_ROOT, [('2007', 'test')], None, VOCAnnotationTransform()) if args.version == 'centernet': from models.centernet import CenterNet net = CenterNet(device, input_size=cfg['min_dim'], num_classes=num_classes) net.load_state_dict(torch.load(args.trained_model, map_location='cuda')) net.to(device).eval() print('Finished loading model!') # evaluation test_net(net, device, testset, BaseTransform(net.input_size, mean=(0.406, 0.456, 0.485), std=(0.225, 0.224, 0.229)), thresh=args.visual_threshold)
device = torch.device("cuda") else: device = torch.device("cpu") # input size input_size = [args.input_size, args.input_size] # load net if args.version == 'centernet': from models.centernet import CenterNet net = CenterNet(device, input_size=input_size, num_classes=num_classes, backbone=args.backbone, use_nms=args.use_nms) # load net net.load_state_dict(torch.load(args.trained_model, map_location='cuda')) net.eval() print('Finished loading model!') net = net.to(device) # evaluation with torch.no_grad(): if args.dataset == 'voc': voc_test(net, device, input_size) elif args.dataset == 'coco-val': coco_test(net, device, input_size, test=False) elif args.dataset == 'coco-test': coco_test(net, device, input_size, test=True)
name='val2017', img_size=input_size[0]) class_colors = [(np.random.randint(255), np.random.randint(255), np.random.randint(255)) for _ in range(num_classes)] # load net if args.version == 'centernet': from models.centernet import CenterNet net = CenterNet(device, input_size=input_size, num_classes=num_classes, conf_thresh=args.conf_thresh, nms_thresh=args.nms_thresh, use_nms=args.use_nms) net.load_state_dict(torch.load(args.trained_model, map_location=device)) net.to(device).eval() print('Finished loading model!') # evaluation test(net=net, device=device, testset=dataset, transform=BaseTransform(input_size), thresh=args.visual_threshold, class_colors=class_colors, class_names=class_names, class_indexs=class_indexs, dataset=args.dataset)