Example #1
0
def prefetch_test(opt):
    os.environ['CUDA_VISIBLE_DEVICES'] = str(opt.gpus)
    opt = opts().update_dataset_info_and_set_heads(opt, COCO)
    print(opt)
    Logger(opt)

    split = 'val' if not opt.trainval else 'test'
    dataset = COCO(opt, split)
    detector = CtdetDetector(opt)

    data_loader = torch.utils.data.DataLoader(PrefetchDataset(opt, dataset, pre_process),
                                              batch_size=1,
                                              shuffle=False,
                                              num_workers=1,
                                              pin_memory=True)

    results = {}
    num_iters = len(dataset)
    bar = Bar(f'{opt.exp_id}', max=num_iters)
    time_stats = ['tot', 'net', 'dec', 'post', 'merge']
    avg_time_stats = {t: AverageMeter() for t in time_stats}

    for i, (img_id, images) in enumerate(data_loader):
        ret = detector.run(images)
        results[img_id.numpy().astype(np.int32)[0]] = ret['results']
        bar.suffix = f'{i}/{num_iters}|Elapsed: {bar.elapsed_td} |ETA: {bar.eta_td} '

        for t in avg_time_stats:
            avg_time_stats[t].update(ret[t])
            bar.suffix = bar.suffix + '|{} {tm.val:.3f} ({tm.avg:.3f}) '.format(t, tm=avg_time_stats[t])
        bar.next()
    bar.finish()

    dataset.run_eval(results, opt.save_dir)
Example #2
0
def get_train_dataset(opt, transforms=None):
    if opt.dataset == 'coco':
        from data.coco import COCO
        dataset = COCO(
            opt.dataset_path, os.path.join(opt.annotation_path, opt.train_json), subset='train',
            image_size=opt.image_size, multi_scale=(not opt.no_multi_scale), transforms=transforms)
    else:
        raise NotImplementedError('the dataset [%s] is not implemented' % opt.dataset_mode)
    print("train dataset [%s] was created" % (dataset.name()))
    return dataset
Example #3
0
def get_val_dataset(opt, transforms=None):
    if opt.dataset == 'coco':
        from data.coco import COCO
        dataset = COCO(
            opt.dataset_path, opt.annotation_path, subset='val',
            image_size=opt.image_size, multi_scale=False, transforms=transforms)
    else:
        raise NotImplementedError('the dataset [%s] is not implemented' % opt.dataset_mode)
    print("dataset [%s] was created" % (dataset.name()))
    return dataset
Example #4
0
def main():

    # data
    data_root = Path(args.data_root)
    train_dataset = COCO(annFile=str(data_root /
                                     'annotations/instances_train2014.json'),
                         root=str(data_root / 'train2014/'),
                         image_size=args.img_size)
    val_dataset = COCO(annFile=str(data_root /
                                   'annotations/instances_val2014.json'),
                       root=str(data_root / 'val2014/'),
                       image_size=args.img_size)
    train_loader = DataLoader(dataset=train_dataset,
                              batch_size=args.batch_size,
                              shuffle=True,
                              num_workers=args.nworkers,
                              pin_memory=args.cuda,
                              collate_fn=collate_fn)
    val_loader = DataLoader(dataset=val_dataset,
                            batch_size=args.batch_size,
                            shuffle=False,
                            num_workers=args.nworkers,
                            pin_memory=args.cuda,
                            collate_fn=collate_fn)

    # model
    model = Detector(args.nclasses + 1)
    optimizer = optim.Adam(params=model.parameters(), lr=args.lr)

    if args.cuda:
        model = torch.nn.DataParallel(model)
        model = model.cuda()

    prev_loss = np.inf
    for epoch in range(args.epoch):
        print('{:3d}/{:3d} epoch'.format(epoch + 1, args.epoch))

        train(model, train_loader, optimizer)
        loss = validate(model, val_loader)

        if loss < prev_loss:
            torch.save(model, str('model.save'))
            prev_loss = loss
Example #5
0
lr = (args.batch_size / 32) * cfg.init_lr

net = create_model(args.backbone, cfg.heads, head_channel)
optimizer = torch.optim.Adam(net.parameters(), lr)

start_epoch = 0

if args.resume:
    model, optimizer, start_epoch = load_model(model, args.resume, optimizer,
                                               lr, cfg.lr_step)

trainer = CtdetTrainer(args, model, optimizer)
trainer.set_device(opt.gpus, opt.chunk_sizes, device)

print('Setting up data...')
val_loader = data.DataLoader(COCO(opt, 'val'),
                             batch_size=1,
                             shuffle=False,
                             num_workers=4,
                             pin_memory=True)

train_loader = data.DataLoader(COCO(opt, 'train'),
                               batch_size=opt.batch_size,
                               shuffle=False,
                               num_workers=1,
                               pin_memory=True,
                               drop_last=True)

print('Starting training...')
best = 1e10