示例#1
0
def main(args):
    if args.im_size in [300, 512]:
        from model.detection.ssd_config import get_config
        cfg = get_config(args.im_size)
    else:
        print_error_message('{} image size not supported'.format(args.im_size))

    if args.dataset in ['voc', 'pascal']:
        from data_loader.detection.voc import VOC_CLASS_LIST
        num_classes = len(VOC_CLASS_LIST)
        object_names = VOC_CLASS_LIST

        cfg.conf_threshold = 0.4
    elif args.dataset == 'coco':
        from data_loader.detection.coco import COCO_CLASS_LIST
        num_classes = len(COCO_CLASS_LIST)
        object_names = COCO_CLASS_LIST

        cfg.conf_threshold = 0.3
    else:
        print_error_message('{} dataset not supported.'.format(args.dataset))
        exit(-1)

    cfg.NUM_CLASSES = num_classes

    # -----------------------------------------------------------------------------
    # Model
    # -----------------------------------------------------------------------------
    model = ssd(args, cfg)
    if args.weights_test:
        weight_dict = torch.load(args.weights_test, map_location='cpu')
        model.load_state_dict(weight_dict)
    else:
        print_error_message(
            "Please provide the location of weight files using --weights argument"
        )

    num_gpus = torch.cuda.device_count()
    device = 'cuda' if num_gpus >= 1 else 'cpu'

    if num_gpus >= 1:
        model = torch.nn.DataParallel(model)
        model = model.to(device)
        if torch.backends.cudnn.is_available():
            import torch.backends.cudnn as cudnn
            cudnn.benchmark = True
            cudnn.deterministic = True
    predictor = BoxPredictor(cfg=cfg, device=device)

    if args.live:
        main_live(predictor=predictor, model=model, object_names=object_names)
    else:
        if not os.path.isdir(args.save_dir):
            os.makedirs(args.save_dir)
        main_images(predictor=predictor,
                    model=model,
                    object_names=object_names,
                    in_dir=args.im_dir,
                    out_dir=args.save_dir)
示例#2
0
def main(args):
    if args.im_size in [300, 512]:
        from model.detection.ssd_config import get_config
        cfg = get_config(args.im_size)
    else:
        print_error_message('{} image size not supported'.format(args.im_size))

    if args.dataset in ['voc', 'pascal']:
        from data_loader.detection.voc import VOC_CLASS_LIST
        num_classes = len(VOC_CLASS_LIST)
    elif args.dataset == 'coco':
        from data_loader.detection.coco import COCO_CLASS_LIST
        num_classes = len(COCO_CLASS_LIST)
    else:
        print_error_message('{} dataset not supported.'.format(args.dataset))
        exit(-1)

    cfg.NUM_CLASSES = num_classes

    # -----------------------------------------------------------------------------
    # Model
    # -----------------------------------------------------------------------------
    model = ssd(args, cfg)

    if args.weights_test:
        weight_dict = torch.load(args.weights_test, map_location='cpu')
        model.load_state_dict(weight_dict)

    num_params = model_parameters(model)
    flops = compute_flops(model,
                          input=torch.Tensor(1, 3, cfg.image_size,
                                             cfg.image_size))
    print_info_message(
        'FLOPs for an input of size {}x{}: {:.2f} million'.format(
            cfg.image_size, cfg.image_size, flops))
    print_info_message('Network Parameters: {:.2f} million'.format(num_params))

    num_gpus = torch.cuda.device_count()
    device = 'cuda' if num_gpus >= 1 else 'cpu'

    if num_gpus >= 1:
        model = torch.nn.DataParallel(model)
        model = model.to(device)
        if torch.backends.cudnn.is_available():
            import torch.backends.cudnn as cudnn
            cudnn.benchmark = True
            cudnn.deterministic = True

    # -----------------------------------------------------------------------------
    # Dataset
    # -----------------------------------------------------------------------------
    if args.dataset in ['voc', 'pascal']:
        from data_loader.detection.voc import VOCDataset, VOC_CLASS_LIST
        dataset_class = VOCDataset(root_dir=args.data_path,
                                   transform=None,
                                   is_training=False,
                                   split="VOC2007")
        class_names = VOC_CLASS_LIST
    else:
        from data_loader.detection.coco import COCOObjectDetection, COCO_CLASS_LIST
        dataset_class = COCOObjectDetection(root_dir=args.data_path,
                                            transform=None,
                                            is_training=False)
        class_names = COCO_CLASS_LIST

    # -----------------------------------------------------------------------------
    # Evaluate
    # -----------------------------------------------------------------------------
    predictor = BoxPredictor(cfg=cfg)
    predictions = eval(model=model, dataset=dataset_class, predictor=predictor)

    result_info = evaluate(dataset=dataset_class,
                           predictions=predictions,
                           output_dir=None,
                           dataset_name=args.dataset)

    # -----------------------------------------------------------------------------
    # Results
    # -----------------------------------------------------------------------------
    if args.dataset in ['voc', 'pascal']:
        mAP = result_info['map']
        ap = result_info['ap']
        for i, c_name in enumerate(class_names):
            if i == 0:  # skip the background class
                continue
            print_info_message('{}: {}'.format(c_name, ap[i]))

        print_info_message('* mAP: {}'.format(mAP))
    elif args.dataset == 'coco':
        print_info_message('AP_IoU=0.50:0.95: {}'.format(result_info.stats[0]))
        print_info_message('AP_IoU=0.50: {}'.format(result_info.stats[1]))
        print_info_message('AP_IoU=0.75: {}'.format(result_info.stats[2]))
    else:
        print_error_message('{} not supported'.format(args.dataset))

    print_log_message('Done')
示例#3
0
def main(args):
    if args.im_size in [300, 512]:
        from model.detection.ssd_config import get_config
        cfg = get_config(args.im_size)
    else:
        print_error_message('{} image size not supported'.format(args.im_size))

    # -----------------------------------------------------------------------------
    # Dataset
    # -----------------------------------------------------------------------------
    train_transform = TrainTransform(cfg.image_size)
    target_transform = MatchPrior(
        PriorBox(cfg)(), cfg.center_variance, cfg.size_variance,
        cfg.iou_threshold)
    val_transform = ValTransform(cfg.image_size)

    if args.dataset in ['voc', 'pascal']:
        from data_loader.detection.voc import VOCDataset, VOC_CLASS_LIST
        train_dataset_2007 = VOCDataset(root_dir=args.data_path,
                                        transform=train_transform,
                                        target_transform=target_transform,
                                        is_training=True,
                                        split="VOC2007")
        train_dataset_2012 = VOCDataset(root_dir=args.data_path,
                                        transform=train_transform,
                                        target_transform=target_transform,
                                        is_training=True,
                                        split="VOC2012")
        train_dataset = torch.utils.data.ConcatDataset(
            [train_dataset_2007, train_dataset_2012])
        val_dataset = VOCDataset(root_dir=args.data_path,
                                 transform=val_transform,
                                 target_transform=target_transform,
                                 is_training=False,
                                 split="VOC2007")
        num_classes = len(VOC_CLASS_LIST)
    elif args.dataset == 'coco':
        from data_loader.detection.coco import COCOObjectDetection, COCO_CLASS_LIST
        train_dataset = COCOObjectDetection(root_dir=args.data_path,
                                            transform=train_transform,
                                            target_transform=target_transform,
                                            is_training=True)
        val_dataset = COCOObjectDetection(root_dir=args.data_path,
                                          transform=val_transform,
                                          target_transform=target_transform,
                                          is_training=False)
        num_classes = len(COCO_CLASS_LIST)
    else:
        print_error_message('{} dataset is not supported yet'.format(
            args.dataset))
        exit()
    cfg.NUM_CLASSES = num_classes

    # -----------------------------------------------------------------------------
    # Dataset loader
    # -----------------------------------------------------------------------------
    print_info_message('Training samples: {}'.format(len(train_dataset)))
    print_info_message('Validation samples: {}'.format(len(val_dataset)))
    train_loader = DataLoader(train_dataset,
                              batch_size=args.batch_size,
                              shuffle=True,
                              num_workers=args.workers,
                              pin_memory=True)
    val_loader = DataLoader(val_dataset,
                            batch_size=args.batch_size,
                            shuffle=False,
                            num_workers=args.workers,
                            pin_memory=True)
    # -----------------------------------------------------------------------------
    # Model
    # -----------------------------------------------------------------------------
    model = ssd(args, cfg)
    if args.finetune:
        if os.path.isfile(args.finetune):
            print_info_message('Loading weights for finetuning from {}'.format(
                args.finetune))
            weight_dict = torch.load(args.finetune,
                                     map_location=torch.device(device='cpu'))
            model.load_state_dict(weight_dict)
            print_info_message('Done')
        else:
            print_warning_message('No file for finetuning. Please check.')

    if args.freeze_bn:
        print_info_message('Freezing batch normalization layers')
        for m in model.modules():
            if isinstance(m, torch.nn.BatchNorm2d):
                m.eval()
                m.weight.requires_grad = False
                m.bias.requires_grad = False
    # -----------------------------------------------------------------------------
    # Optimizer and Criterion
    # -----------------------------------------------------------------------------
    optimizer = torch.optim.SGD(model.parameters(),
                                lr=args.lr,
                                momentum=args.momentum,
                                weight_decay=args.wd)

    criterion = MultiBoxLoss(neg_pos_ratio=cfg.neg_pos_ratio)

    # writer for logs
    writer = SummaryWriter(log_dir=args.save,
                           comment='Training and Validation logs')
    try:
        writer.add_graph(model,
                         input_to_model=torch.Tensor(1, 3, cfg.image_size,
                                                     cfg.image_size))
    except:
        print_log_message(
            "Not able to generate the graph. Likely because your model is not supported by ONNX"
        )

    #model stats
    num_params = model_parameters(model)
    flops = compute_flops(model,
                          input=torch.Tensor(1, 3, cfg.image_size,
                                             cfg.image_size))
    print_info_message(
        'FLOPs for an input of size {}x{}: {:.2f} million'.format(
            cfg.image_size, cfg.image_size, flops))
    print_info_message('Network Parameters: {:.2f} million'.format(num_params))

    num_gpus = torch.cuda.device_count()
    device = 'cuda' if num_gpus >= 1 else 'cpu'

    min_val_loss = float('inf')
    start_epoch = 0  # start from epoch 0 or last epoch
    if args.resume:
        if os.path.isfile(args.resume):
            print_info_message("=> loading checkpoint '{}'".format(
                args.resume))
            checkpoint = torch.load(args.checkpoint,
                                    map_location=torch.device('cpu'))
            model.load_state_dict(checkpoint['state_dict'])
            min_val_loss = checkpoint['min_loss']
            start_epoch = checkpoint['epoch']
        else:
            print_warning_message("=> no checkpoint found at '{}'".format(
                args.resume))

    if num_gpus >= 1:
        model = torch.nn.DataParallel(model)
        model = model.to(device)
        if torch.backends.cudnn.is_available():
            import torch.backends.cudnn as cudnn
            cudnn.benchmark = True
            cudnn.deterministic = True

    # -----------------------------------------------------------------------------
    # Scheduler
    # -----------------------------------------------------------------------------
    if args.lr_type == 'poly':
        from utilities.lr_scheduler import PolyLR
        lr_scheduler = PolyLR(base_lr=args.lr,
                              max_epochs=args.epochs,
                              power=args.power)
    elif args.lr_type == 'hybrid':
        from utilities.lr_scheduler import HybirdLR
        lr_scheduler = HybirdLR(base_lr=args.lr,
                                max_epochs=args.epochs,
                                clr_max=args.clr_max,
                                cycle_len=args.cycle_len)
    elif args.lr_type == 'clr':
        from utilities.lr_scheduler import CyclicLR
        lr_scheduler = CyclicLR(min_lr=args.lr,
                                cycle_len=args.cycle_len,
                                steps=args.steps,
                                gamma=args.gamma,
                                step=True)
    elif args.lr_type == 'cosine':
        from utilities.lr_scheduler import CosineLR
        lr_scheduler = CosineLR(base_lr=args.lr, max_epochs=args.epochs)
    else:
        print_error_message('{} scheduler not yet supported'.format(
            args.lr_type))
        exit()

    print_info_message(lr_scheduler)

    # -----------------------------------------------------------------------------
    # Training and validation loop
    # -----------------------------------------------------------------------------

    extra_info_ckpt = '{}_{}'.format(args.model, args.s)
    for epoch in range(start_epoch, args.epochs):
        curr_lr = lr_scheduler.step(epoch)
        optimizer.param_groups[0]['lr'] = curr_lr

        print_info_message('Running epoch {} at LR {}'.format(epoch, curr_lr))
        train_loss, train_cl_loss, train_loc_loss = train(train_loader,
                                                          model,
                                                          criterion,
                                                          optimizer,
                                                          device,
                                                          epoch=epoch)
        val_loss, val_cl_loss, val_loc_loss = validate(val_loader,
                                                       model,
                                                       criterion,
                                                       device,
                                                       epoch=epoch)
        # Save checkpoint
        is_best = val_loss < min_val_loss
        min_val_loss = min(val_loss, min_val_loss)

        weights_dict = model.module.state_dict(
        ) if device == 'cuda' else model.state_dict()
        save_checkpoint(
            {
                'epoch': epoch,
                'model': args.model,
                'state_dict': weights_dict,
                'min_loss': min_val_loss
            }, is_best, args.save, extra_info_ckpt)

        writer.add_scalar('Detection/LR/learning_rate', round(curr_lr, 6),
                          epoch)
        writer.add_scalar('Detection/Loss/train', train_loss, epoch)
        writer.add_scalar('Detection/Loss/val', val_loss, epoch)
        writer.add_scalar('Detection/Loss/train_cls', train_cl_loss, epoch)
        writer.add_scalar('Detection/Loss/val_cls', val_cl_loss, epoch)
        writer.add_scalar('Detection/Loss/train_loc', train_loc_loss, epoch)
        writer.add_scalar('Detection/Loss/val_loc', val_loc_loss, epoch)
        writer.add_scalar('Detection/Complexity/Flops', min_val_loss,
                          math.ceil(flops))
        writer.add_scalar('Detection/Complexity/Params', min_val_loss,
                          math.ceil(num_params))

    writer.close()