Ejemplo n.º 1
0
def create_datasets_and_loaders(args, model_config):
    input_config = resolve_input_config(args, model_config=model_config)

    dataset_train, dataset_eval = create_dataset(args.dataset, args.root)

    # setup labeler in loader/collate_fn if not enabled in the model bench
    labeler = None
    if not args.bench_labeler:
        labeler = AnchorLabeler(Anchors.from_config(model_config),
                                model_config.num_classes,
                                match_threshold=0.5)

    loader_train = create_loader(
        dataset_train,
        input_size=input_config['input_size'],
        batch_size=args.batch_size,
        is_training=True,
        use_prefetcher=args.prefetcher,
        re_prob=args.reprob,
        re_mode=args.remode,
        re_count=args.recount,
        # color_jitter=args.color_jitter,
        # auto_augment=args.aa,
        interpolation=args.train_interpolation
        or input_config['interpolation'],
        fill_color=input_config['fill_color'],
        mean=input_config['mean'],
        std=input_config['std'],
        num_workers=args.workers,
        distributed=args.distributed,
        pin_mem=args.pin_mem,
        anchor_labeler=labeler,
    )

    if args.val_skip > 1:
        dataset_eval = SkipSubset(dataset_eval, args.val_skip)
    loader_eval = create_loader(
        dataset_eval,
        input_size=input_config['input_size'],
        batch_size=args.batch_size,
        is_training=False,
        use_prefetcher=args.prefetcher,
        interpolation=input_config['interpolation'],
        fill_color=input_config['fill_color'],
        mean=input_config['mean'],
        std=input_config['std'],
        num_workers=args.workers,
        distributed=args.distributed,
        pin_mem=args.pin_mem,
        anchor_labeler=labeler,
    )

    evaluator = create_evaluator(args.dataset,
                                 loader_eval.dataset,
                                 distributed=args.distributed,
                                 pred_yxyx=False)

    return loader_train, loader_eval, evaluator
Ejemplo n.º 2
0
def validate(args):
    setup_default_logging()

    if args.amp:
        if has_apex:
            args.apex_amp = True
        elif has_native_amp:
            args.native_amp = True
    assert not args.apex_amp or not args.native_amp, "Only one AMP mode should be set."
    args.pretrained = args.pretrained or not args.checkpoint  # might as well try to validate something
    args.prefetcher = not args.no_prefetcher

    # create model
    with set_layer_config(scriptable=args.torchscript):
        bench = create_model(
            args.model,
            bench_task='predict',
            num_classes=args.num_classes,
            pretrained=args.pretrained,
            redundant_bias=args.redundant_bias,
            soft_nms=args.soft_nms,
            checkpoint_path=args.checkpoint,
            checkpoint_ema=args.use_ema,
        )
    model_config = bench.config

    param_count = sum([m.numel() for m in bench.parameters()])
    print('Model %s created, param count: %d' % (args.model, param_count))

    bench = bench.cuda()

    amp_autocast = suppress
    if args.apex_amp:
        bench = amp.initialize(bench, opt_level='O1')
        print('Using NVIDIA APEX AMP. Validating in mixed precision.')
    elif args.native_amp:
        amp_autocast = torch.cuda.amp.autocast
        print('Using native Torch AMP. Validating in mixed precision.')
    else:
        print('AMP not enabled. Validating in float32.')

    if args.num_gpu > 1:
        bench = torch.nn.DataParallel(bench,
                                      device_ids=list(range(args.num_gpu)))

    dataset = create_dataset(args.dataset, args.root, args.split)
    input_config = resolve_input_config(args, model_config)
    loader = create_loader(dataset,
                           input_size=input_config['input_size'],
                           batch_size=args.batch_size,
                           use_prefetcher=args.prefetcher,
                           interpolation=input_config['interpolation'],
                           fill_color=input_config['fill_color'],
                           mean=input_config['mean'],
                           std=input_config['std'],
                           num_workers=args.workers,
                           pin_mem=args.pin_mem)

    evaluator = create_evaluator(args.dataset, dataset, pred_yxyx=False)
    bench.eval()
    batch_time = AverageMeter()
    end = time.time()
    last_idx = len(loader) - 1
    with torch.no_grad():
        for i, (input, target) in enumerate(loader):
            with amp_autocast():
                output = bench(input, img_info=target)
            evaluator.add_predictions(output, target)

            # measure elapsed time
            batch_time.update(time.time() - end)
            end = time.time()

            if i % args.log_freq == 0 or i == last_idx:
                print(
                    'Test: [{0:>4d}/{1}]  '
                    'Time: {batch_time.val:.3f}s ({batch_time.avg:.3f}s, {rate_avg:>7.2f}/s)  '
                    .format(i,
                            len(loader),
                            batch_time=batch_time,
                            rate_avg=input.size(0) / batch_time.avg))

    mean_ap = 0.
    if dataset.parser.has_labels:
        mean_ap = evaluator.evaluate()
    else:
        evaluator.save(args.results)

    return mean_ap
Ejemplo n.º 3
0
def create_datasets_and_loaders(
    args,
    model_config,
    transform_train_fn=None,
    transform_eval_fn=None,
    collate_fn=None,
):
    """ Setup datasets, transforms, loaders, evaluator.

    Args:
        args: Command line args / config for training
        model_config: Model specific configuration dict / struct
        transform_train_fn: Override default image + annotation transforms (see note in loaders.py)
        transform_eval_fn: Override default image + annotation transforms (see note in loaders.py)
        collate_fn: Override default fast collate function

    Returns:
        Train loader, validation loader, evaluator
    """
    input_config = resolve_input_config(args, model_config=model_config)

    dataset_train, dataset_eval = create_dataset(args.dataset, args.root)

    # setup labeler in loader/collate_fn if not enabled in the model bench
    labeler = None
    if not args.bench_labeler:
        labeler = AnchorLabeler(Anchors.from_config(model_config),
                                model_config.num_classes,
                                match_threshold=0.5)

    loader_train = create_loader(
        dataset_train,
        input_size=input_config['input_size'],
        batch_size=args.batch_size,
        is_training=True,
        use_prefetcher=args.prefetcher,
        re_prob=args.reprob,
        re_mode=args.remode,
        re_count=args.recount,
        # color_jitter=args.color_jitter,
        # auto_augment=args.aa,
        interpolation=args.train_interpolation
        or input_config['interpolation'],
        fill_color=input_config['fill_color'],
        mean=input_config['mean'],
        std=input_config['std'],
        num_workers=args.workers,
        distributed=args.distributed,
        pin_mem=args.pin_mem,
        anchor_labeler=labeler,
        transform_fn=transform_train_fn,
        collate_fn=collate_fn,
    )

    if args.val_skip > 1:
        dataset_eval = SkipSubset(dataset_eval, args.val_skip)
    loader_eval = create_loader(
        dataset_eval,
        input_size=input_config['input_size'],
        batch_size=args.batch_size,
        is_training=False,
        use_prefetcher=args.prefetcher,
        interpolation=input_config['interpolation'],
        fill_color=input_config['fill_color'],
        mean=input_config['mean'],
        std=input_config['std'],
        num_workers=args.workers,
        distributed=args.distributed,
        pin_mem=args.pin_mem,
        anchor_labeler=labeler,
        transform_fn=transform_eval_fn,
        collate_fn=collate_fn,
    )

    evaluator = create_evaluator(args.dataset,
                                 loader_eval.dataset,
                                 distributed=args.distributed,
                                 pred_yxyx=False)

    return loader_train, loader_eval, evaluator
Ejemplo n.º 4
0
def validate(args):
    setup_default_logging()

    if args.amp:
        if has_apex:
            args.apex_amp = True
        elif has_native_amp:
            args.native_amp = True
    assert not args.apex_amp or not args.native_amp, "Only one AMP mode should be set."
    args.pretrained = args.pretrained or not args.checkpoint  # might as well try to validate something
    args.prefetcher = not args.no_prefetcher

    # create model
    with set_layer_config(scriptable=args.torchscript):
        bench = create_model(
            args.model,
            bench_task='predict',
            num_classes=args.num_classes,
            pretrained=args.pretrained,
            redundant_bias=args.redundant_bias,
            soft_nms=args.soft_nms,
            checkpoint_path=args.checkpoint,
            checkpoint_ema=args.use_ema,
        )
    model_config = bench.config

    param_count = sum([m.numel() for m in bench.parameters()])
    print('Model %s created, param count: %d' % (args.model, param_count))

    bench = bench.cuda()

    amp_autocast = suppress
    if args.apex_amp:
        bench = amp.initialize(bench, opt_level='O1')
        print('Using NVIDIA APEX AMP. Validating in mixed precision.')
    elif args.native_amp:
        amp_autocast = torch.cuda.amp.autocast
        print('Using native Torch AMP. Validating in mixed precision.')
    else:
        print('AMP not enabled. Validating in float32.')

    if args.num_gpu > 1:
        bench = torch.nn.DataParallel(bench,
                                      device_ids=list(range(args.num_gpu)))

    dataset = create_dataset(args.dataset, args.root, args.split)
    input_config = resolve_input_config(args, model_config)
    loader = create_loader(dataset,
                           input_size=input_config['input_size'],
                           batch_size=args.batch_size,
                           use_prefetcher=args.prefetcher,
                           interpolation=input_config['interpolation'],
                           fill_color=input_config['fill_color'],
                           mean=input_config['mean'],
                           std=input_config['std'],
                           num_workers=args.workers,
                           pin_mem=args.pin_mem)

    evaluator = create_evaluator(args.dataset, dataset, pred_yxyx=False)
    bench.eval()
    batch_time = AverageMeter()
    end = time.time()
    last_idx = len(loader) - 1
    imgs = []
    with torch.no_grad():
        for i, (input, target) in enumerate(loader):
            for b in range(input.shape[0]):
                imgs.append(input[b].cpu().numpy())
                # targets.append(target[b].cpu().numpy())

            with amp_autocast():
                output = bench(input, img_info=target)
            evaluator.add_predictions(output, target)

            # measure elapsed time
            batch_time.update(time.time() - end)
            end = time.time()

            if i % args.log_freq == 0 or i == last_idx:
                print(
                    'Test: [{0:>4d}/{1}]  '
                    'Time: {batch_time.val:.3f}s ({batch_time.avg:.3f}s, {rate_avg:>7.2f}/s)  '
                    .format(i,
                            len(loader),
                            batch_time=batch_time,
                            rate_avg=input.size(0) / batch_time.avg))

    mean_ap = 0.
    if dataset.parser.has_labels:
        preds = [p[:2, :] for p in evaluator.predictions]
        anns = evaluator.coco_api.imgToAnns
        targets = [
            np.asarray((anns[k][0]['bbox'], anns[k][1]['bbox']))
            for k in range(len(imgs))
        ]
        mean_ap = evaluator.evaluate()
        if not os.path.exists(args.out_dir):
            os.mkdir(args.out_dir)
        for i, img in enumerate(imgs):
            img = imgs[i]
            img_m = np.mean(img, axis=0)
            for c in range(3):
                img[c] = img_m
            img_ = img.transpose(1, 2, 0)
            m = img_.min()
            M = img_.max()
            img_ = ((img_ - m) / (M - m) * 255).astype('uint8').copy()
            img_ = draw_bbox(img_, preds[i], targets[i])
            cv2.imwrite(os.path.join(args.out_dir, '%d.jpg' % i), img_)
    else:
        evaluator.save(args.results)

    return mean_ap
Ejemplo n.º 5
0
def validate_det(args):
    setup_default_logging()

    # might as well try to validate something
    args.pretrained = args.checkpoint
    args.prefetcher = not args.no_prefetcher
    # create model

    bench = create_model(
        args.model,
        bench_task='predict',
        num_classes=args.num_classes,
        pretrained=args.pretrained,
        redundant_bias=args.redundant_bias,
        checkpoint_path=args.checkpoint,
        checkpoint_ema=args.use_ema,
    )
    model_config = bench.config
    input_size = bench.config.image_size

    param_count = sum([m.numel() for m in bench.parameters()])
    #print('Model %s created, param count: %d' % (args.model, param_count))

    bench = bench.cuda()
    '''if has_amp:
        print('Using AMP mixed precision.')
        bench = amp.initialize(bench, opt_level='O1')
    else:
        print('AMP not installed, running network in FP32.')'''

    if args.num_gpu > 1:
        bench = torch.nn.DataParallel(bench,
                                      device_ids=list(range(args.num_gpu)))
    '''if 'test' in args.anno:
        annotation_path = os.path.join(args.data, 'annotations', f'image_info_{args.anno}.json')
        image_dir = 'test2017'''
    '''annotation_path = os.path.join(args.data, 'Highway_drop_inlets.v1-vdot_coco.coco/coco_and_dropinlets_annotations/test_annotations', f'{args.anno}.json')
    image_dir = os.path.join(args.data, 'Highway_drop_inlets.v1-vdot_coco.coco/coco_and_dropinlets/test/')
    dataset = CocoDetection(image_dir, annotation_path)'''

    annotation_path = os.path.join(args.data, f'{args.anno}.json')
    image_dir = os.path.join(args.data, 'others_mix_set')
    dataset = VdotTestDataset(image_dir, annotation_path)

    loader = create_loader(dataset,
                           input_size=input_size,
                           batch_size=args.batches_size,
                           use_prefetcher=args.prefetcher,
                           interpolation=args.interpolation,
                           fill_color=args.fill_color,
                           num_workers=args.workers,
                           pin_mem=args.pin_mem)

    img_ids = []
    results = []
    bench.eval()

    #example_input = torch.randn((1, 3, 512, 512), requires_grad=True)
    #bench(example_input.cuda())
    '''tracingModelInput  = torch.ones(1,3,512,512)
    torch.onnx._export(bench.model, tracingModelInput.cuda(), './effdet0_checkonly.onnx', opset_version=11, export_params=True)
    print('\nDone exporting ONNX model!')
    onnx_model='./effdet0_checkonly.onnx'
    onnx.checker.check_model(onnx_model)'''
    '''dummy_input = torch.randn(1, 3, 512, 512, device='cuda')
    input =["input"]
    output=["class_1","class_2","class_3","class_4", "class_5", "box_1", "box_2", "box_3","box_4","box_5"]
    #dynamic_axes = {"actual_input_1":{input:"batch_size"}, "output1":{output:"batch_size"}}
    torch.onnx.export(bench.model, dummy_input, "effdet0_upsamplenormal.onnx", verbose=True, opset_version=11,
                  input_names=input, output_names=output)'''
    #dynamic_axes=dynamic_axes)
    batch_time = AverageMeter()
    end = time.time()
    with torch.no_grad():
        for i, (input, target) in enumerate(loader):
            output = bench(input, img_info=target)
            output = output.cpu()
            sample_ids = target['img_id'].cpu()
            for index, sample in enumerate(output):
                image_id = int(sample_ids[index])
                for det in sample:
                    score = float(det[4])
                    if score < 0.36:  # stop when below this threshold, scores in descending order(perfect 0.5 for 91 classes)
                        break
                    coco_det = dict(image_id=image_id,
                                    bbox=det[0:4].tolist(),
                                    score=score,
                                    category_id=int(det[5]))
                    img_ids.append(image_id)
                    results.append(coco_det)

            # measure elapsed time
            batch_time.update(time.time() - end)
            end = time.time()

            if i % args.log_freq == 0:
                print(
                    'Test: [{0:>4d}/{1}]  '
                    'Time: {batch_time.val:.3f}s ({batch_time.avg:.3f}s, {rate_avg:>7.2f}/s)  '
                    .format(
                        i,
                        len(loader),
                        batch_time=batch_time,
                        rate_avg=input.size(0) / batch_time.avg,
                    ))

    json.dump(results, open(args.results, 'w'), indent=4)
    '''if 'test' not in args.anno:
        coco_results = dataset.coco.loadRes(args.results)
        coco_eval = COCOeval(dataset.coco, coco_results, 'bbox')
        coco_eval.params.imgIds = img_ids  # score only ids we've used
        coco_eval.evaluate()
        coco_eval.accumulate()
        coco_eval.summarize()'''

    return results