Exemple #1
0
# read mean and stdev for un-normalizing predictions
pose_stats_file = osp.join(opt.data_dir, opt.dataset, opt.scene, 'stats.txt')
pose_m, pose_s = np.loadtxt(pose_stats_file)  # mean and stdev

# Load the dataset
kwargs = dict(scene=opt.scene,
              data_path=opt.data_dir,
              train=False,
              transform=data_transform,
              target_transform=target_transform,
              seed=opt.seed)
if opt.model == 'AtLoc':
    if opt.dataset == '7Scenes':
        data_set = SevenScenes(**kwargs)
    elif opt.dataset == 'RobotCar':
        data_set = RobotCar(**kwargs)
    elif opt.dataset == 'Topo':
        data_set = Topo(**kwargs)
    elif opt.dataset == 'comballaz':
        data_set = Topo2(**kwargs)
    elif opt.dataset == 'EPFL':
        data_set = Topo3(**kwargs)
    else:
        raise NotImplementedError
elif opt.model == 'AtLocPlus':
    kwargs = dict(kwargs,
                  dataset=opt.dataset,
                  skip=opt.skip,
                  steps=opt.steps,
                  variable_skip=opt.variable_skip)
    data_set = MF(real=opt.real, **kwargs)
opt = Options().parse()

if opt.val:
    print('processing VAL data using {:d} cores'.format(opt.nThreads))
else:
    print('processing TRAIN data using {:d} cores'.format(opt.nThreads))

# create data loader
transform = transforms.Compose([
    transforms.ToPILImage(),
    transforms.Resize(opt.cropsize),
    transforms.Lambda(lambda x: np.asarray(x))
])
dset = RobotCar(scene=opt.scene,
                data_path=opt.data_dir,
                train=not opt.val,
                transform=transform,
                undistort=True)
loader = DataLoader(dset, batch_size=opt.batchsize, num_workers=opt.nThreads)

# gather information about output filenames
base_dir = osp.join(opt.data_dir, opt.dataset, opt.scene)
if opt.val:
    split_filename = osp.join(base_dir, 'test_split.txt')
else:
    split_filename = osp.join(base_dir, 'train_split.txt')
with open(split_filename, 'r') as f:
    seqs = [l.rstrip() for l in f if not l.startswith('#')]

im_filenames = []
for seq in seqs:
Exemple #3
0
                           'pose_stats.txt')
pose_m, pose_s = np.loadtxt(pose_stats_file)  # 获取pose统计文件中的均值和标准差

# 加载测试数据集
kwargs = dict(scene=opt.scene,
              data_path=opt.data_dir,
              train=False,
              transform=data_transform,
              target_transform=target_transform,
              seed=opt.seed)

if opt.model == 'AtLoc':
    if opt.dataset == '7Scenes':
        data_set = SevenScenes(**kwargs)  # 获取处理后的7Scenes数据
    elif opt.dataset == 'RobotCar':
        data_set = RobotCar(**kwargs)  # 获取处理后的RobotCar数据
else:
    raise NotImplementedError

L = len(data_set)

# num_workers:使用的子进程数,0为不使用多进程
# 是否将tensor数据复制到CUDA pinned memory中,pin memory中的数据转到GPU中会快一些
kwargs = {'num_workers': opt.nThreads, 'pin_memory': True} if cuda else {}

# Dataset负责表示数据集,它可以每次使用__getitem__返回一个样本
# 而torch.utils.data.Dataloader提供了对batch的处理,如shuffle等
# Dataset被封装在了Dataloader中
loader = DataLoader(data_set, batch_size=1, shuffle=False, **kwargs)

pred_poses = np.zeros((L, 7))  # save all predicted poses
Exemple #4
0
from torch.utils.data import DataLoader
from tools.options import Options

opt = Options().parse()

data_transform = transforms.Compose([
    transforms.Resize(opt.cropsize),
    transforms.RandomCrop(opt.cropsize),
    transforms.ToTensor()])

# dataset loader
kwargs = dict(scene=opt.scene, data_path=opt.data_dir, train=True, real=False, transform=data_transform)
if opt.dataset == '7Scenes':
    dset = SevenScenes(**kwargs)
elif opt.dataset == 'RobotCar':
    dset = RobotCar(**kwargs)
else:
    raise NotImplementedError

# accumulate
loader = DataLoader(dset, batch_size=opt.batch_size, num_workers=opt.nThreads)
acc = np.zeros((3, opt.cropsize, opt.cropsize))
sq_acc = np.zeros((3, opt.cropsize, opt.cropsize))
for batch_idx, (imgs, _) in enumerate(loader):
    imgs = imgs.numpy()
    acc += np.sum(imgs, axis=0)
    sq_acc += np.sum(imgs ** 2, axis=0)

    if batch_idx % 50 == 0:
        print('Accumulated {:d} / {:d}'.format(batch_idx * opt.batch_size, len(dset)))
Exemple #5
0
tforms.append(transforms.Normalize(mean=stats[0], std=np.sqrt(stats[1])))
data_transform = transforms.Compose(tforms)
target_transform = transforms.Lambda(lambda x: torch.from_numpy(x).float())

# Load the dataset
kwargs = dict(scene=opt.scene,
              data_path=opt.data_dir,
              transform=data_transform,
              target_transform=target_transform,
              seed=opt.seed)
if opt.model == 'AtLoc':
    if opt.dataset == '7Scenes':
        train_set = SevenScenes(train=True, **kwargs)
        val_set = SevenScenes(train=False, **kwargs)
    elif opt.dataset == 'RobotCar':
        train_set = RobotCar(train=True, **kwargs)
        val_set = RobotCar(train=False, **kwargs)
    elif opt.dataset == 'Topo':
        train_set = Topo(train=True, **kwargs)
        val_set = Topo(train=False, **kwargs)
    elif opt.dataset == 'comballaz':
        train_set = Topo2(train=True, **kwargs)
        val_set = Topo2(train=False, **kwargs)
    elif opt.dataset == 'EPFL':
        train_set = Topo3(train=True, **kwargs)
        val_set = Topo3(train=False, **kwargs)

    else:
        raise NotImplementedError
elif opt.model == 'AtLocPlus':
    kwargs = dict(kwargs,