Exemplo n.º 1
0
def prefetch_test(opt):
    os.environ['CUDA_VISIBLE_DEVICES'] = str(opt.gpus)
    opt = opts().update_dataset_info_and_set_heads(opt, COCO)
    print(opt)
    Logger(opt)

    split = 'val' if not opt.trainval else 'test'
    dataset = COCO(opt, split)
    detector = CtdetDetector(opt)

    data_loader = torch.utils.data.DataLoader(PrefetchDataset(opt, dataset, pre_process),
                                              batch_size=1,
                                              shuffle=False,
                                              num_workers=1,
                                              pin_memory=True)

    results = {}
    num_iters = len(dataset)
    bar = Bar(f'{opt.exp_id}', max=num_iters)
    time_stats = ['tot', 'net', 'dec', 'post', 'merge']
    avg_time_stats = {t: AverageMeter() for t in time_stats}

    for i, (img_id, images) in enumerate(data_loader):
        ret = detector.run(images)
        results[img_id.numpy().astype(np.int32)[0]] = ret['results']
        bar.suffix = f'{i}/{num_iters}|Elapsed: {bar.elapsed_td} |ETA: {bar.eta_td} '

        for t in avg_time_stats:
            avg_time_stats[t].update(ret[t])
            bar.suffix = bar.suffix + '|{} {tm.val:.3f} ({tm.avg:.3f}) '.format(t, tm=avg_time_stats[t])
        bar.next()
    bar.finish()

    dataset.run_eval(results, opt.save_dir)
def main(opt):
    wandb.init(project="centernet_easy_tf")

    TF_ctdet_temp = TF_ctdet()

    opt = opts().update_dataset_info_and_set_heads(opt, TF_ctdet_temp)
    print(opt)

    os.environ['CUDA_VISIBLE_DEVICES'] = opt.gpus_str
    opt.device = 'cuda'

    print('Creating model...')

    TF_dataloader_ = TF_dataloader(
        './data/coco/images/train2017',
        './data/coco/annotations/instances_train2017.json', 1, opt)

    model = create_model(opt.backbone, opt.heads, opt.head_conv)
    optimizer = tf.keras.optimizers.Adam(learning_rate=5e-4)
    Custom_loss = CtdetLoss(opt)

    model.compile(loss=Custom_loss, optimizer=optimizer)

    for epoch in range(1, 100):
        while (1):
            temp = TF_dataloader_.get_coco_data('train')
            losse = model.fit(temp['input'], temp['hm'])

            print(1)

    wandb.watch(model)
Exemplo n.º 3
0
def prefetch_test(opt):
    os.environ['CUDA_VISIBLE_DEVICES'] = opt.gpus_str
    opt = opts().update_dataset_info_and_set_heads(opt, CTDetDataset)
    print(opt)
    split = 'val'

    dataset = CTDetDataset(opt, split)
    detector = CtdetDetector(opt)

    result = {}
    if not os.path.exists(os.path.join(opt.save_dir, 'result.json')):
        data_loader = torch.utils.data.DataLoader(PrefetchDataset(
            opt, dataset, detector.pre_process),
                                                  batch_size=1,
                                                  shuffle=False,
                                                  pin_memory=True)

        for ind, (img_id, pre_processes_images) in enumerate(data_loader):
            ret = detector.run(pre_processes_images)
            result[img_id.numpy().astype(np.int32)[0]] = ret['results']
            print(ind)

    dataset.run_eval(result, os.path.join(opt.save_dir, 'result.json'))
Exemplo n.º 4
0
            cv2.imshow('input', img)
            ret = detector.run(img)
            time_str = ''
            for stat in time_stats:
                time_str = time_str + '{} {:.3f}s |'.format(stat, ret[stat])
            print(time_str)
            if cv2.waitKey(1) == 27:
                return  # esc to quit
    else:
        if os.path.isdir(opt.demo):
            image_names = []
            ls = os.listdir(opt.demo)
            for file_name in sorted(ls):
                ext = file_name[file_name.rfind('.') + 1:].lower()
                if ext in image_ext:
                    image_names.append(os.path.join(opt.demo, file_name))
        else:
            image_names = [opt.demo]

        for (image_name) in image_names:
            ret = detector.run(image_name)
            time_str = ''
            for stat in time_stats:
                time_str = time_str + '{} {:.3f}s |'.format(stat, ret[stat])
            print(time_str)


if __name__ == '__main__':
    opt = opts().init()
    demo(opt)
Exemplo n.º 5
0
    data_loader = torch.utils.data.DataLoader(PrefetchDataset(opt, dataset, pre_process),
                                              batch_size=1,
                                              shuffle=False,
                                              num_workers=1,
                                              pin_memory=True)

    results = {}
    num_iters = len(dataset)
    bar = Bar(f'{opt.exp_id}', max=num_iters)
    time_stats = ['tot', 'net', 'dec', 'post', 'merge']
    avg_time_stats = {t: AverageMeter() for t in time_stats}

    for i, (img_id, images) in enumerate(data_loader):
        ret = detector.run(images)
        results[img_id.numpy().astype(np.int32)[0]] = ret['results']
        bar.suffix = f'{i}/{num_iters}|Elapsed: {bar.elapsed_td} |ETA: {bar.eta_td} '

        for t in avg_time_stats:
            avg_time_stats[t].update(ret[t])
            bar.suffix = bar.suffix + '|{} {tm.val:.3f} ({tm.avg:.3f}) '.format(t, tm=avg_time_stats[t])
        bar.next()
    bar.finish()

    dataset.run_eval(results, opt.save_dir)


if __name__ == '__main__':
    opt = opts().parse()
    prefetch_test(opt)
Exemplo n.º 6
0
def main(opt):
    wandb.init(project="centernet_easy")


    torch.manual_seed(opt.seed)
    torch.backends.cudnn.benchmark = True
    opt = opts().update_dataset_info_and_set_heads(opt, CTDetDataset)
    print(opt)

    os.environ['CUDA_VISIBLE_DEVICES'] = opt.gpus_str
    opt.device = 'cuda'

    print('Creating model...')

    model = create_model(opt.backbone, opt.heads, opt.head_conv, opt.tensorrt)
    wandb.watch(model)
    optimizer = torch.optim.Adam(model.parameters(), lr=opt.lr)

    start_epoch = 0
    if opt.load_model != '':
        model, optimizer, start_epoch = load_model(model, opt.load_model, optimizer, opt.resume, opt.lr, opt.lr_step)
    Trainer = CtdetTrainer
    trainer = Trainer(opt, model, optimizer)
    trainer.set_device(opt.gpus, opt.chunk_sizes, opt.device)

    print('Setting up data...')
    val_loader = torch.utils.data.DataLoader(
        CTDetDataset(opt, 'val'),
        batch_size=1,
        shuffle=False,
        num_workers=1,
        pin_memory=True
    )

    if opt.test:
        _, preds = trainer.val(0, val_loader)
        val_loader.dataset.run_eval(preds, opt.save_dir)
        return

    train_loader = torch.utils.data.DataLoader(
        CTDetDataset(opt, 'train'),
        batch_size=opt.batch_size,
        shuffle=True,
        num_workers=opt.num_workers,
        pin_memory=True,
        drop_last=True
    )

    print('Starting training...')
    best = 1e10

    if not os.path.exists(os.path.join(opt.root_dir, 'exp')):
        os.mkdir(os.path.join(opt.root_dir, 'exp'))
    if not os.path.exists(opt.exp_dir):
        os.mkdir(opt.exp_dir)
    if not os.path.exists(opt.save_dir):
        os.mkdir(opt.save_dir)



    for epoch in range(start_epoch + 1, opt.num_epochs + 1):
        mark = epoch if opt.save_all else 'last'

        log_dict_train, _ = trainer.train(epoch, train_loader, wandb)
        if wandb != None:
            wandb.log({"{} loss".format('train'): log_dict_train[opt.metric]})
        #print('epoch: {}, loss: {} |'.format(epoch, avg_loss))
        if opt.val_intervals > 0 and epoch % opt.val_intervals == 0:
            save_model(os.path.join(opt.save_dir, 'model_{}.pth'.format(mark)),
                       epoch, model, optimizer)
            with torch.no_grad():
                log_dict_val, preds = trainer.val(epoch, val_loader, wandb)
                if wandb != None:
                    wandb.log({"{} loss".format('val'): log_dict_val[opt.metric]})
            if log_dict_val[opt.metric] < best:
                best = log_dict_val[opt.metric]
                save_model(os.path.join(opt.save_dir, 'model_best.pth'),
                           epoch, model)
        else:
            save_model(os.path.join(opt.save_dir, 'model_last.pth'),
                       epoch, model, optimizer)
        if epoch in opt.lr_step:
            save_model(os.path.join(opt.save_dir, 'model_{}.pth'.format(epoch)),
                       epoch, model, optimizer)
            lr = opt.lr * (0.1 ** (opt.lr_step.index(epoch) + 1))
            print('Drop LR to', lr)
            for param_group in optimizer.param_groups:
                param_group['lr'] = lr