Esempio n. 1
0
def init_detector(config, checkpoint=None, device='cuda:0'):
    """Initialize a detectors from config file.

    Args:
        config (str or :obj:`mmcv.Config`): Config file path or the config
            object.
        checkpoint (str, optional): Checkpoint path. If left as None, the model
            will not load any weights.

    Returns:
        nn.Module: The constructed detectors.
    """
    if isinstance(config, str):
        config = Config.fromfile(config)
    elif not isinstance(config, Config):
        raise TypeError('config must be a filename or Config object, '
                        'but got {}'.format(type(config)))
    config.model.pretrained = None
    model = attempt_load(checkpoint, map_location=device)
    model.cfg = config  # save the config in the model for convenience
    return model
Esempio n. 2
0
    # for b in bboxes:
    #     img = cv2.rectangle(img, (b[0], b[1]), (b[2], b[3]), (0, 255, 0), 1)
    for idx,bbox in enumerate(bboxes):
        # x1, y1, x2, y2 = bbox
        cx, cy, _w, _h = bbox
        x, y = cx - _w / 2, cy - _h / 2
        # x1,y1,x2,y2 = int(x1*w),int(y1*h),int(x2*w),int(y2*h)
        x1,y1,x2,y2 = x,y, x+_w,y+_h
        x1, y1, x2, y2 = int(x1 * w), int(y1 * h), int(x2 * w), int(y2 * h)
        # img = cv2.rectangle(img, (x1, y1), (x2, y2), (0, 255, 0), 1)
        img = cv2.rectangle(img, (x1, y1), (x2, y2), (0, 255, 0), 1)
    return img

file = '/disk2/project/pytorch-YOLOv4/cfg/dataset_test.py'

cfg = Config.fromfile(file)

dataset = build_from_dict(cfg.data.train,DATASET)

dataloader = build_dataloader(dataset,data=cfg.data)

for i, data_batch in enumerate(dataloader):
    if i>30:
        break
    for idx,data in enumerate(data_batch['img']):
        gt = data_batch['gt_bboxes'][idx]
        gt_xywh = xyxy2xywh(gt)  # x,y ,w, h
        n_gt = (gt.sum(dim=-1) > 0).sum(dim=-1)
        n = int(n_gt)
        if n == 0:
            continue
Esempio n. 3
0
def main():
    args = parse_args()

    cfg = Config.fromfile(args.config)
    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    # update configs according to CLI args
    if args.work_dir is not None:
        cfg.work_dir = args.work_dir
    if args.resume_from is not None:
        cfg.resume_from = args.resume_from
    if args.device is not None:
        cfg.device = args.device
    else:
        cfg.device = None
    device = select_device(cfg.device)
    if args.autoscale_lr:
        # apply the linear scaling rule (https://arxiv.org/abs/1706.02677)
        cfg.optimizer['lr'] = cfg.optimizer['lr'] * len(cfg.gpu_ids) / 8

    # create work_dir
    file_utils.mkdir_or_exist(osp.abspath(cfg.work_dir))
    # init the logger before other steps
    # timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    # log_file = osp.join(cfg.work_dir, '{}.log'.format(timestamp))
    logger = Logging.getLogger()

    # init the meta dict to record some important information such as
    # environment info and seed, which will be logged
    meta = dict()
    # log env info
    env_info_dict = collect_env()
    env_info = '\n'.join([('{}: {}'.format(k, v))
                          for k, v in env_info_dict.items()])
    dash_line = '-' * 60 + '\n'
    logger.info('Environment info:\n' + dash_line + env_info + '\n' +
                dash_line)
    meta['env_info'] = env_info
    meta['batch_size'] = cfg.data.batch_size
    meta['subdivisions'] = cfg.data.subdivisions
    meta['multi_scale'] = args.multi_scale
    # log some basic info
    logger.info('Config:\n{}'.format(cfg.text))

    # set random seeds
    if args.seed is not None:
        logger.info('Set random seed to {}, deterministic: {}'.format(
            args.seed, args.deterministic))
        set_random_seed(args.seed, deterministic=args.deterministic)
    cfg.seed = args.seed
    meta['seed'] = args.seed
    model = build_from_dict(cfg.model, DETECTORS)

    model = model.cuda(device)
    # model.device = device
    if device.type != 'cpu' and torch.cuda.device_count() > 1:
        model = torch.nn.DataParallel(model)

    model.device = device

    datasets = [build_from_dict(cfg.data.train, DATASET)]
    if len(cfg.workflow) == 2:
        val_dataset = copy.deepcopy(cfg.data.val)
        val_dataset.pipeline = cfg.data.train.pipeline
        datasets.append(build_from_dict(val_dataset, DATASET))
    if cfg.checkpoint_config is not None:
        # save mmdet version, config file content and class names in
        # checkpoints as meta data
        cfg.checkpoint_config.meta = dict(config=cfg.text,
                                          CLASSES=datasets[0].CLASSES)
    # add an attribute for visualization convenience
    model.CLASSES = datasets[0].CLASSES
    timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    train_detector(model,
                   datasets,
                   cfg,
                   validate=args.validate,
                   timestamp=timestamp,
                   meta=meta)
Esempio n. 4
0
                        help='augmented inference')
    parser.add_argument('--merge', action='store_true', help='use Merge NMS')
    parser.add_argument('--verbose',
                        action='store_true',
                        help='report mAP by class')
    parser.add_argument('--half',
                        action='store_true',
                        help='fp16 half precision')
    opt = parser.parse_args()

    print(opt)

    # config = '/disk2/project/mmdetection/mount/pytorch-YOLOv4/cfg/yolov5_coco_gpu.py'
    # checkpoint = '/disk2/project/pytorch-YOLOv4/work_dirs/yolov5-l_epoch_24.pth'

    cfg = Config.fromfile(opt.config)
    cfg.data.val.train = False
    val_dataset = build_from_dict(cfg.data.val, DATASET)
    val_dataloader = build_dataloader(val_dataset,
                                      data=cfg.data,
                                      shuffle=False)
    device = select_device(opt.device)
    # model = init_detector(opt.config, checkpoint=opt.checkpoint, device=device)
    model = init_detector(opt.config, checkpoint=opt.checkpoint, device=device)
    result = single_gpu_test(model,
                             val_dataloader,
                             half=opt.half,
                             conf_thres=opt.conf_thres,
                             iou_thres=opt.iou_thres,
                             merge=opt.merge,
                             save_json=opt.save_json,
Esempio n. 5
0
            ┃      ┻      ┃
            ┗━┓      ┏━┛
                ┃      ┗━━━-┓
                ┃Beast god bless┣┓
                ┃ Never BUG ! ┏┛
                ┗┓┓┏━┳┓┏┛
                  ┃┫┫  ┃┫┫
                  ┗┻┛  ┗┻┛
=================================================='''
from yolodet.apis.inference import init_detector
from yolodet.apis.test import single_gpu_test
from yolodet.dataset.loader.build_dataloader import build_dataloader
from yolodet.utils.config import Config
from yolodet.utils.newInstance_utils import build_from_dict
from yolodet.utils.registry import DATASET
# config = '/disk2/project/pytorch-YOLOv4/cfg/yolov4_hand_gpu.py'
config = '/disk2/project/pytorch-YOLOv4/cfg/yolov5_hand_gpu.py'
config = '/disk2/project/pytorch-YOLOv4/cfg/ppyolo_hand_gpu.py'
# checkpoint = '/disk2/project/pytorch-YOLOv4/work_dirs/yolov4-hand/latest.pth'
checkpoint = '/disk2/project/pytorch-YOLOv4/work_dirs/ppyolo_hand/latest.pth'
# checkpoint = '/disk2/project/pytorch-YOLOv4/work_dirs/test/latest.pth'
cfg = Config.fromfile(config)
cfg.data.val.train = False
val_dataset = build_from_dict(cfg.data.val, DATASET)
val_dataloader = build_dataloader(val_dataset, data=cfg.data, shuffle=False)

model = init_detector(config, checkpoint=checkpoint, device='cuda:0')

results = single_gpu_test(model, val_dataloader, show=False)
print(results)