Ejemplo n.º 1
0
    for k in range(5):  # warmup
        model(dummy_data[k])

    max_iter = 300
    timer = Timer()
    with tqdm.tqdm(total=max_iter) as pbar:
        for idx, d in enumerate(f()):
            if idx == max_iter:
                break
            model(d)
            pbar.update()
    logger.info("{} iters in {} seconds.".format(max_iter, timer.seconds()))


if __name__ == "__main__":
    parser = default_argument_parser()
    parser.add_argument("--task", choices=["train", "eval", "data"], required=True)
    args = parser.parse_args()
    assert not args.eval_only

    if args.task == "data":
        f = benchmark_data
    elif args.task == "train":
        """
        Note: training speed may not be representative.
        The training cost of a R-CNN model varies with the content of the data
        and the quality of the model.
        """
        f = benchmark_train
    elif args.task == "eval":
        f = benchmark_eval
Ejemplo n.º 2
0
        DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load(
            cfg.MODEL.WEIGHTS, resume=args.resume
        )
        res = MoDetTrainer.test(cfg, model)
        if cfg.TEST.AUG.ENABLED:
            res.update(MoDetTrainer.test_with_TTA(cfg, model))
        if comm.is_main_process():
            verify_results(cfg, res)
        return res

    trainer = MoDetTrainer(cfg)
    trainer.resume_or_load(resume=args.resume)
    if cfg.TEST.AUG.ENABLED:
        trainer.register_hooks(
            [hooks.EvalHook(0, lambda: trainer.test_with_TTA(cfg, trainer.model))]
        )
    return trainer.train()


if __name__ == "__main__":
    args = default_argument_parser().parse_args()
    print("Command Line Args:", args)
    launch(
        main,
        args.num_gpus,
        num_machines=args.num_machines,
        machine_rank=args.machine_rank,
        dist_url=args.dist_url,
        args=(args,),
    )
Ejemplo n.º 3
0
                parameter_count_table(model, max_depth=5))


def do_structure(cfg):
    model = build_model(cfg)
    logger.info("Model Structure:\n" + str(model))


if __name__ == "__main__":
    parser = default_argument_parser(epilog="""
Examples:

To show parameters of a model:
$ ./analyze_model.py --tasks parameter \\
    --config-file ../configs/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml

Flops and activations are data-dependent, therefore inputs and model weights
are needed to count them:

$ ./analyze_model.py --num-inputs 100 --tasks flop \\
    --config-file ../configs/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml \\
    MODEL.WEIGHTS /path/to/model.pkl
""")
    parser.add_argument(
        "--tasks",
        choices=["flop", "activation", "parameter", "structure"],
        required=True,
        nargs="+",
    )
    parser.add_argument(
        "-n",
        "--num-inputs",
Ejemplo n.º 4
0
def setup_arg_parser():
    """
    Sets up argument parser for python scripts.

    Returns:
        arg_parser (ArgumentParser): Argument parser updated with probabilistic detectron args.

    """
    arg_parser = default_argument_parser()

    arg_parser.add_argument("--dataset-dir",
                            type=str,
                            default="",
                            help="path to dataset directory")

    arg_parser.add_argument(
        "--random-seed",
        type=int,
        default=0,
        help="random seed to be used for all scientific computing libraries")

    # Inference arguments, will not be used during training.
    arg_parser.add_argument(
        "--inference-config",
        type=str,
        default="",
        help=
        "Inference parameter: Path to the inference config, which is different from training config. Check readme for more information."
    )

    arg_parser.add_argument(
        "--test-dataset",
        type=str,
        default="",
        help=
        "Inference parameter: Dataset used for testing. Can be one of the following: 'coco_2017_custom_val', 'openimages_val', 'openimages_ood_val' "
    )

    arg_parser.add_argument(
        "--image-corruption-level",
        type=int,
        default=0,
        help=
        "Inference parameter: Image corruption level between 0-5. Default is no corruption, level 0."
    )

    # Evaluation arguments, will not be used during training.
    arg_parser.add_argument(
        "--iou-min",
        type=float,
        default=0.1,
        help=
        "Evaluation parameter: IOU threshold bellow which a detection is considered a false positive."
    )

    arg_parser.add_argument(
        "--iou-correct",
        type=float,
        default=0.5,
        help=
        "Evaluation parameter: IOU threshold above which a detection is considered a true positive."
    )

    arg_parser.add_argument(
        "--min-allowed-score",
        type=float,
        default=0.0,
        help=
        "Evaluation parameter:Minimum classification score for which a detection is considered in the evaluation."
    )

    return arg_parser
Ejemplo n.º 5
0
else:
    DETECTRON2_DIR = '/home/jupyter/detectron2'
CONFIG_FILE = os.path.join(
    DETECTRON2_DIR,
    'configs/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml')

if __name__ == "__main__":
    if '--config-file' in sys.argv:
        CLI_ARGS = sys.argv[1:]
    else:
        CLI_ARGS = [
            '--config-file', f'{CONFIG_FILE}', '--num-gpus', f'{N_GPU}',
            'MODEL.WEIGHTS', './weights/model_final_f10217.pkl',
            'MODEL.DEVICE', 'cpu'
        ]
    ARGS = default_argument_parser().parse_args(CLI_ARGS)

    args = ARGS
    print('_' * 60 + f'\nmain <- {args}')

    if 'setup(args)':
        cfg = get_cfg()
        cfg.merge_from_file(args.config_file)
        cfg.merge_from_list(args.opts)
        cfg.freeze()
        default_setup(
            cfg, args
        )  # if you don't like any of the default setup, write your own setup code
        global CONFIG
        CONFIG = cfg
    if parsed_args.eval_only:

        # Load the model
        model: nn.Module = Trainer.build_model(cfg)

        # Load the latest weights
        DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load(
            cfg.MODEL.WEIGHTS, resume=parsed_args.resume)
        res: dict = Trainer.test(cfg, model)
        if comm.is_main_process():
            verify_results(cfg, res)
        return res

    # Training
    trainer: Trainer = Trainer(cfg)
    trainer.resume_or_load(resume=parsed_args.resume)
    return trainer.train()


if __name__ == "__main__":
    parser: ArgumentParser = default_argument_parser()

    parsed_args: Namespace = parser.parse_args()

    launch(main,
           parsed_args.num_gpus,
           num_machines=parsed_args.num_machines,
           machine_rank=parsed_args.machine_rank,
           dist_url=parsed_args.dist_url,
           args=(parsed_args, ))
Ejemplo n.º 7
0
def main_worker(args):
    global best_acc1

    # create model
    argss = default_argument_parser().parse_args()
    argss.config_file = 'mv_to_new_home/configs/RearrNet_50.yaml'
    cfg = setup(argss)
    # model = build_gtnet_backbone_pretrain(cfg, 3, 1000)
    # model = build_rearrnet_backbone_pretrain(cfg, 3, 100)
    # model = build_defenet_backbone_pretrain(cfg, 3, 100)
    # model = build_oidnet_backbone_pretrain(cfg, 3, 100)
    # model = build_rpnet_backbone_pretrain(cfg, 3, 100)
    # model = build_realnet_backbone_pretrain(cfg, 3, 100)
    model = build_oinet_backbone_pretrain(cfg, 3, 100)
    # model = build_deformnet_backbone_pretrain(cfg, 3, 100)
    model = torch.nn.DataParallel(model.cuda())

    # args.evaluate = True
    # define loss function (criterion) and optimizer
    criterion = nn.CrossEntropyLoss()

    optimizer = torch.optim.SGD(model.parameters(),
                                args.lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay)
    # optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)

    # optionally resume from a checkpoint
    if args.resume:
        if os.path.isfile(args.resume):
            print("=> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume)
            args.start_epoch = checkpoint['epoch']
            best_acc1 = checkpoint['best_acc1']
            best_acc1 = best_acc1.to()
            model.load_state_dict(checkpoint['state_dict'])
            optimizer.load_state_dict(checkpoint['optimizer'])
            print("=> loaded checkpoint '{}' (epoch {})".format(
                args.resume, checkpoint['epoch']))
        else:
            print("=> no checkpoint found at '{}'".format(args.resume))

    # Data loading code
    data_path = '/ws/data/imagenet'
    traindir = os.path.join(data_path, 'train')
    valdir = os.path.join(data_path, 'val')
    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])
    input_size = 128
    cifar_data_path = '/ws/data/open_datasets/classification/cifar100'
    train_dataset = datasets.CIFAR100(
        cifar_data_path,
        train=True,
        download=True,
        transform=transforms.Compose([
            transforms.RandomHorizontalFlip(),
            # transforms.RandomVerticalFlip(),
            transforms.RandomRotation(30),
            transforms.Resize((int(input_size * 1.4), int(input_size * 1.4))),
            transforms.CenterCrop((input_size, input_size)),
            transforms.ToTensor(),
            transforms.RandomErasing(),
            transforms.Normalize((0.5, ), (0.5, ))
        ]))
    val_dataset = datasets.CIFAR100(
        cifar_data_path,
        train=False,
        download=True,
        transform=transforms.Compose([
            # transforms.RandomRotation(90),
            # transforms.RandomHorizontalFlip(),
            transforms.Resize((int(input_size * 1.4), int(input_size * 1.4))),
            transforms.CenterCrop((input_size, input_size)),
            transforms.Resize((input_size, input_size)),
            transforms.ToTensor(),
            transforms.Normalize((0.5, ), (0.5, )),
        ]))
    # train_dataset = datasets.ImageFolder(
    #     traindir,
    #     transforms.Compose([
    #         transforms.RandomResizedCrop(size=299, scale=(0.08, 1), ratio=(0.75, 4/3)),
    #         transforms.RandomHorizontalFlip(p=0.5),
    #         transforms.RandomVerticalFlip(p=0.5),
    #         transforms.ColorJitter(brightness=[0.5, 1.5], contrast=[0.5, 1.5], saturation=[0.5, 1.5], hue=[-0.1, 0.1]),
    #         transforms.RandomRotation(degrees=(-45, 45)),
    #         transforms.ToTensor(),
    #         normalize,
    #     ]))

    # val_dataset = datasets.ImageFolder(valdir, transforms.Compose([
    #         transforms.Resize(324),
    #         transforms.CenterCrop(299),
    #         transforms.ToTensor(),
    #         normalize,
    #     ]))

    train_loader = torch.utils.data.DataLoader(train_dataset,
                                               batch_size=args.batch_size,
                                               shuffle=True,
                                               num_workers=1)

    val_loader = torch.utils.data.DataLoader(val_dataset,
                                             batch_size=args.batch_size,
                                             shuffle=False,
                                             num_workers=1)

    if args.evaluate:
        validate(val_loader, model, criterion, args)
        return

    for epoch in range(args.start_epoch, args.epochs):
        adjust_learning_rate(optimizer, epoch, args)

        # train for one epoch
        train(train_loader, model, criterion, optimizer, epoch, args)

        # evaluate on validation set
        acc1 = validate(val_loader, model, criterion, args)

        # remember best acc@1 and save checkpoint
        is_best = acc1 > best_acc1
        best_acc1 = max(acc1, best_acc1)

        save_checkpoint(
            {
                'epoch': epoch + 1,
                'state_dict': model.state_dict(),
                'best_acc1': best_acc1,
                'optimizer': optimizer.state_dict(),
            },
            is_best,
            filename='/ws/data/deformed/rp_all_ckpt.pt')