Beispiel #1
0
def evaluate(model, path, datasets,iou_thres, conf_thres, nms_thres, img_size, batch_size):
    model.eval()

    # Get dataloader
    dataset = VOCDetection(root=path, image_sets=datasets, augment=True, multiscale=opt.multiscale_training)
    dataloader = torch.utils.data.DataLoader(
        dataset, batch_size=batch_size, shuffle=False, num_workers=1, collate_fn=dataset.collate_fn
    )

    Tensor = torch.cuda.FloatTensor if torch.cuda.is_available() else torch.FloatTensor

    labels = []
    sample_metrics = []  # List of tuples (TP, confs, pred)
    for batch_i, (_, imgs, targets) in enumerate(tqdm.tqdm(dataloader, desc="Detecting objects")):

        # Extract labels
        labels += targets[:, 1].tolist()
        # Rescale target
        targets[:, 2:] = xywh2xyxy(targets[:, 2:])
        targets[:, 2:] *= img_size

        imgs = Variable(imgs.type(Tensor), requires_grad=False)

        with torch.no_grad():
            outputs = model(imgs)
            outputs = non_max_suppression(outputs, conf_thres=conf_thres, nms_thres=nms_thres)

        sample_metrics += get_batch_statistics(outputs, targets, iou_threshold=iou_thres)

    # Concatenate sample statistics
    true_positives, pred_scores, pred_labels = [np.concatenate(x, 0) for x in list(zip(*sample_metrics))]
    precision, recall, AP, f1, ap_class = ap_per_class(true_positives, pred_scores, pred_labels, labels)

    return precision, recall, AP, f1, ap_class
Beispiel #2
0
def main_worker(gpu, ngpus_per_node, args):
    args.gpu = gpu
    if args.gpu is not None:
        print("Use GPU: {} for training".format(args.gpu))

    if args.distributed:
        if args.dist_url == "env://" and args.rank == -1:
            # args.rank = int(os.environ["RANK"])
            args.rank = 1
        if args.multiprocessing_distributed:
            # For multiprocessing distributed training, rank needs to be the
            # global rank among all the processes
            args.rank = args.rank * ngpus_per_node + gpu
        dist.init_process_group(backend=args.dist_backend,
                                init_method=args.dist_url,
                                world_size=args.world_size,
                                rank=args.rank)
    checkpoint = []
    if (args.resume is not None):
        if os.path.isfile(args.resume):
            print("=> loading checkpoint '{}'".format(args.resume))
            if args.gpu is None:
                checkpoint = torch.load(args.resume)
            else:
                # Map model to be loaded to specified single gpu.
                loc = 'cuda:{}'.format(args.gpu)
                checkpoint = torch.load(args.resume, map_location=loc)
        params = checkpoint['parser']
        args.num_class = params.num_class
        args.network = params.network
        args.start_epoch = params.start_epoch + 1
        del params

    model = EfficientDet(num_classes=args.num_class,
                         network=args.network,
                         W_bifpn=EFFICIENTDET[args.network]['W_bifpn'],
                         D_bifpn=EFFICIENTDET[args.network]['D_bifpn'],
                         D_class=EFFICIENTDET[args.network]['D_class'],
                         gpu=args.gpu)
    if (args.resume is not None):
        model.load_state_dict(checkpoint['state_dict'])
    del checkpoint
    if args.distributed:
        # For multiprocessing distributed, DistributedDataParallel constructor
        # should always set the single device scope, otherwise,
        # DistributedDataParallel will use all available devices.
        if args.gpu is not None:
            torch.cuda.set_device(args.gpu)
            model.cuda(args.gpu)
            # When using a single GPU per process and per
            # DistributedDataParallel, we need to divide the batch size
            # ourselves based on the total number of GPUs we have
            args.batch_size = int(args.batch_size / ngpus_per_node)
            args.workers = int(
                (args.workers + ngpus_per_node - 1) / ngpus_per_node)
            model = torch.nn.parallel.DistributedDataParallel(
                model, device_ids=[args.gpu], find_unused_parameters=True)
            print('Run with DistributedDataParallel with divice_ids....')
        else:
            model.cuda()
            # DistributedDataParallel will divide and allocate batch_size to all
            # available GPUs if device_ids are not set
            model = torch.nn.parallel.DistributedDataParallel(model)
            print('Run with DistributedDataParallel without device_ids....')
    elif args.gpu is not None:
        torch.cuda.set_device(args.gpu)
        model = model.cuda(args.gpu)
    else:
        print('Run with DataParallel ....')
        model = torch.nn.DataParallel(model).cuda()

    # Training dataset
    train_dataset = []
    if (args.dataset == 'VOC'):
        #         train_dataset = VOCDetection(root=args.dataset_root,
        #                                      transform=get_augumentation(phase='train', width=EFFICIENTDET[args.network]['input_size'], height=EFFICIENTDET[args.network]['input_size']))
        train_dataset = VOCDetection(root=args.dataset_root,
                                     transform=transforms.Compose([
                                         Normalizer(),
                                         Augmenter(),
                                         Resizer()
                                     ]))

    elif (args.dataset == 'COCO'):
        train_dataset = CocoDataset(
            root_dir=args.dataset_root,
            set_name='train2017',
            transform=get_augumentation(
                phase='train',
                width=EFFICIENTDET[args.network]['input_size'],
                height=EFFICIENTDET[args.network]['input_size']))


#     train_loader = DataLoader(train_dataset,
#                                   batch_size=args.batch_size,
#                                   num_workers=args.workers,
#                                   shuffle=True,
#                                   collate_fn=detection_collate,
#                                   pin_memory=True)
    train_loader = DataLoader(train_dataset,
                              batch_size=args.batch_size,
                              num_workers=args.workers,
                              shuffle=True,
                              collate_fn=collater,
                              pin_memory=True)
    # define loss function (criterion) , optimizer, scheduler
    optimizer = optim.AdamW(model.parameters(), lr=args.lr)
    scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer,
                                                     patience=3,
                                                     verbose=True)
    cudnn.benchmark = True

    for epoch in range(args.start_epoch, args.num_epoch):
        train(train_loader, model, scheduler, optimizer, epoch, args)
        state = {
            'epoch': epoch,
            'parser': args,
            'state_dict': get_state_dict(model)
        }
        torch.save(
            state,
            './weights/checkpoint_{}_{}_{}.pth'.format(args.dataset,
                                                       args.network, epoch))
Beispiel #3
0
                        help='Checkpoint state_dict file to resume training from')
    args = parser.parse_args()

    if(args.weight is not None):
        resume_path = str(args.weight)
        print("Loading checkpoint: {} ...".format(resume_path))
        checkpoint = torch.load(
            args.weight, map_location=lambda storage, loc: storage)
        params = checkpoint['parser']
        args.num_class = params.num_class
        args.network = params.network
        model = EfficientDet(
            num_classes=args.num_class,
            network=args.network,
            W_bifpn=EFFICIENTDET[args.network]['W_bifpn'],
            D_bifpn=EFFICIENTDET[args.network]['D_bifpn'],
            D_class=EFFICIENTDET[args.network]['D_class'],
            is_training=False,
            threshold=args.threshold,
            iou_threshold=args.iou_threshold)
        model.load_state_dict(checkpoint['state_dict'])
    model = model.cuda()
    if(args.dataset == 'VOC'):
        valid_dataset = VOCDetection(root=args.dataset_root, image_sets=[('2007', 'test')],
                                     transform=transforms.Compose([Normalizer(), Resizer()]))
        evaluate(valid_dataset, model)
    else:
        valid_dataset = CocoDataset(root_dir=args.dataset_root, set_name='val2017',
                                    transform=transforms.Compose([Normalizer(), Resizer()]))
        evaluate_coco(valid_dataset, model)
Beispiel #4
0
    # Get data configuration
    class_names = VOC_CLASSES

    # Initiate model
    model = Darknet(opt.model_def).to(device)
    model.apply(weights_init_normal)

    # If specified we start from checkpoint
    if opt.pretrained_weights:
        if opt.pretrained_weights.endswith(".pth"):
            model.load_state_dict(torch.load(opt.pretrained_weights))
        else:
            model.load_darknet_weights(opt.pretrained_weights)

    # Get dataloader
    dataset = VOCDetection(root=opt.root, augment=True, multiscale=opt.multiscale_training)
    #dataset = ListDataset(train_path, augment=True, multiscale=opt.multiscale_training)
    dataloader = torch.utils.data.DataLoader(
        dataset,
        batch_size=opt.batch_size,
        shuffle=True,
        num_workers=opt.n_cpu,
        pin_memory=True,
        collate_fn=dataset.collate_fn,
    )

    optimizer = torch.optim.Adam(model.parameters())

    metrics = [
        "grid_size",
        "loss",
Beispiel #5
0
def get_state_dict(model):
    if type(model) == torch.nn.DataParallel:
        state_dict = model.module.state_dict()
    else:
        state_dict = model.state_dict()
    return state_dict
checkpoint = []
if(args.resume is not None):
    resume_path = str(args.resume)
    print("Loading checkpoint: {} ...".format(resume_path))
    checkpoint = torch.load(args.resume, map_location=lambda storage, loc: storage)

train_dataset = []
if(args.dataset=='VOC'):
    train_dataset = VOCDetection(root = args.dataset_root,
                                transform = get_augumentation(phase='train'))
elif(args.dataset=='COCO'):
    train_dataset = COCODetection(root = args.dataset_root,
                                transform = get_augumentation(phase='train'))
train_dataloader = DataLoader(train_dataset, 
                            batch_size=args.batch_size,
                            num_workers=args.num_worker,
                            shuffle=True,
                            collate_fn=detection_collate,
                            pin_memory=True)

model = EfficientDet(num_classes = args.num_classes, network = args.network)
if(args.resume is not None):
    # num_class = checkpoint['num_classes']
    # network = checkpoint['network']
    model = EfficientDet(num_classes = 21, network = 'efficientdet-d0')
Beispiel #6
0
    if not os.path.exists(args.output_path):
        os.makedirs(args.output_path)

    # Initiate model
    model = Yolo(num_classes=20).to(device)

    # If specified we start from checkpoint
    if args.pretrained_weights:
        if args.pretrained_weights.endswith('.pth'):
            model.load_state_dict(torch.load(args.pretrained_weights))
        else:
            model.load_darknet_weights(args.pretrained_weights)

    # Get dataloader
    train_dataset = VOCDetection(args.train_path, args.img_size)
    train_loader = DataLoader(train_dataset,
                              batch_size=args.batch_size,
                              shuffle=True,
                              num_workers=args.num_workers,
                              collate_fn=train_dataset.collate_fn)
    val_dataset = VOCDetection(args.val_path, args.img_size)
    val_loader = DataLoader(val_dataset,
                            batch_size=args.batch_size,
                            shuffle=False,
                            num_workers=args.num_workers,
                            collate_fn=val_dataset.collate_fn)

    optimizer = optim.SGD(model.parameters(),
                          lr=args.learning_rate,
                          momentum=args.momentum,
        state_dict = model.state_dict()
    return state_dict


checkpoint = []
if(args.resume is not None):
    resume_path = str(args.resume)
    print("Loading checkpoint: {} ...".format(resume_path))
    checkpoint = torch.load(
        args.resume, map_location=lambda storage, loc: storage)
    args.num_class = checkpoint['num_class']
    args.network = checkpoint['network']

train_dataset = []
if(args.dataset == 'VOC'):
    train_dataset = VOCDetection(root=args.dataset_root,
                                 transform=get_augumentation(phase='train', width=EFFICIENTDET[args.network]['input_size'], height=EFFICIENTDET[args.network]['input_size']))

elif(args.dataset == 'COCO'):
    train_dataset = CocoDataset(root_dir=args.dataset_root, set_name='train2017', transform=get_augumentation(
        phase='train', width=EFFICIENTDET[args.network]['input_size'], height=EFFICIENTDET[args.network]['input_size']))

train_dataloader = DataLoader(train_dataset,
                              batch_size=args.batch_size,
                              num_workers=args.num_worker,
                              shuffle=True,
                              collate_fn=detection_collate,
                              pin_memory=True)

model = EfficientDet(num_classes=args.num_classes,
                     network=args.network,
                     W_bifpn=EFFICIENTDET[args.network]['W_bifpn'],
Beispiel #8
0
            "Warning: There\'s no GPU available on this machine, training will be performed on CPU.")
        n_gpu_use = 0
    if n_gpu_use > n_gpu:
        print(
            "Warning: The number of GPU\'s configured to use is {}, but only {} are available on this machine.".format(
                n_gpu_use, n_gpu))
        n_gpu_use = n_gpu
    list_ids = device
    device = torch.device('cuda:{}'.format(
        device[0]) if n_gpu_use > 0 else 'cpu')

    return device, list_ids


if (args.dataset == 'VOC'):
    valid_dataset = VOCDetection(root=args.dataset_root,
                                 transform=get_augumentation(phase='valid'))
elif (args.dataset == 'COCO'):
    valid_dataset = COCODetection(root=args.dataset_root,
                                  transform=get_augumentation(phase='valid'))
valid_dataloader = DataLoader(valid_dataset,
                              batch_size=1,
                              num_workers=args.num_worker,
                              shuffle=False,
                              collate_fn=detection_collate,
                              pin_memory=False)
if (args.weights is not None):
    resume_path = str(args.weights)
    print("Loading checkpoint: {} ...".format(resume_path))
    checkpoint = torch.load(
        args.weights, map_location=lambda storage, loc: storage)
    num_class = checkpoint['num_class']
    args.network = checkpoint['network']
    model = EfficientDet(num_classes=args.num_class,
                         network=args.network,
                         W_bifpn=EFFICIENTDET[args.network]['W_bifpn'],
                         D_bifpn=EFFICIENTDET[args.network]['D_bifpn'],
                         D_class=EFFICIENTDET[args.network]['D_class'],
                         is_training=False,
                         threshold=args.threshold,
                         iou_threshold=args.iou_threshold)
    model.load_state_dict(checkpoint['state_dict'])

if (args.dataset == 'VOC'):
    valid_dataset = VOCDetection(
        root=args.dataset_root,
        image_sets=[('2007', 'test')],
        transform=get_augumentation(
            phase='valid',
            width=EFFICIENTDET[args.network]['input_size'],
            height=EFFICIENTDET[args.network]['input_size']))
elif (args.dataset == 'COCO'):
    valid_dataset = COCODetection(
        root=args.dataset_root,
        transform=get_augumentation(
            phase='valid',
            width=EFFICIENTDET[args.network]['input_size'],
            height=EFFICIENTDET[args.network]['input_size']))

valid_dataloader = DataLoader(valid_dataset,
                              batch_size=1,
                              num_workers=args.num_worker,
                              shuffle=False,