Example #1
0
            if datapath.endswith('300W_LP'):
                pts = load_lua(osp.join(base_dir, f.split('_')[0], f[:-4] + '.t7'))[0]
            else:
                print("Given data set do not have 3D annotations.")
                exit()
        else:
            pts = load_lua(f)
        all_gts[i, :, :] = pts
    print('Loaded {} sample from {}'.format(len(lines), base_dir))

    return all_gts, lines


if __name__ == "__main__":
    import opts
    args = opts.argparser()
    dataset = args.data.split('/')[-1]
    save_dir = osp.join(args.checkpoint, dataset)
    print("save dictory: " + save_dir)
    preds = torch.from_numpy(loadpreds_if_exists(osp.join(save_dir, 'preds_valid.mat')))
    gts, _ = loadgts(args.data, args.pointType)
    norm = np.ones(preds.size(0))
    for i, gt in enumerate(gts):
        norm[i] = _get_bboxsize(gt)

    if dataset == 'LS3D-W' or dataset == '300VW-3D':
        for i in range(3):
            if dataset == 'LS3D-W':
                category = {'0': 'Easy', '1': 'Media', '2': 'Hard'}[str(i)]
                l, f = 2400*i, 2400*(i+1)
            else:
Example #2
0
            mean /= self.total
            std /= self.total
            ms = {
                'mean': mean,
                'std': std,
            }
            torch.save(ms, meanstd_file)
        if self.is_train:
            print('\tMean: %.4f, %.4f, %.4f' %
                  (ms['mean'][0], ms['mean'][1], ms['mean'][2]))
            print('\tStd:  %.4f, %.4f, %.4f' %
                  (ms['std'][0], ms['std'][1], ms['std'][2]))
        return ms['mean'], ms['std']


if __name__ == "__main__":
    import opts, demo
    args = opts.argparser()
    dataset = W300LP(args, 'test')
    crop_win = None
    for i in range(dataset.__len__()):
        input, target, meta = dataset.__getitem__(i)
        input = input.numpy().transpose(1, 2, 0) * 255.
        target = target.numpy()
        if crop_win is None:
            crop_win = plt.imshow(input)
        else:
            crop_win.set_data(input)
        plt.pause(0.5)
        plt.draw
Example #3
0
def main():
    args = argparser()
    args.log_dir = os.path.join(args.log_path, 'log')
    args.checkpoint_dir = os.path.join(args.log_path, 'checkpoint')
    args.model_dir = os.path.join(args.log_path, 'model')
    check_log_folders(args)


    model = Model()

    # save model graph visualization
    model_writer = SummaryWriter(os.path.join(args.log_path, 'model'))
    inputs = torch.rand(8, 10, 2)
    model_writer.add_graph(model, inputs)
    model_writer.close()


    if args.device_ids != '' and torch.cuda.device_count() > 1:
        args.device_ids = args.device_ids.split(",")
        args.device_ids = list(map(lambda x: int(x), args.device_ids))
        print("Let's use", torch.cuda.device_count(), "GPUs!")
        print(args.device_ids)
        torch.cuda.set_device(args.device_ids[0])
        model = nn.DataParallel(model, device_ids=args.device_ids).cuda()
    else:
        args.device = torch.device("cuda:"+str(args.device) if torch.cuda.is_available() else "cpu")
        model.to(args.device)


    train_dataset = Dataset()
    val_dataset = Dataset()

    train_loader = torch.utils.data.DataLoader(
        train_dataset,
        batch_size=args.batch_size, shuffle=True,
        num_workers=args.workers, pin_memory=True,
        drop_last=True)  
    val_loader = torch.utils.data.DataLoader(
        val_dataset,
        batch_size=args.batch_size, shuffle=False,
        num_workers=args.workers, pin_memory=True)

    optimizer = optim.SGD(
        model.parameters(),
        args.lr,
        momentum=args.momentum,
        weight_decay=args.weight_decay)
    criterion = l2_loss

    scheduler = lr_scheduler.CosineAnnealingLR(
        optimizer, args.epochs, eta_min=0, last_epoch=-1
    )
    tf_writer = SummaryWriter(log_dir=os.path.join(args.log_path, 'log'))

    for epoch in range(args.epochs):
        # train for one epoch
        train(train_loader, model, criterion, optimizer, epoch, args, tf_writer)

        # evaluate on validation set
        if (epoch + 1) % args.eval_freq == 0 or epoch == args.epochs - 1:
            validate(val_loader, model, criterion, epoch*len(train_loader), args, tf_writer)
        save_checkpoint({
            'epoch': epoch + 1,
            'state_dict': model.state_dict(),
            'optimizer': optimizer.state_dict(),
        }, epoch, args)

        scheduler.step()