コード例 #1
0
# datasets
data_dir = osp.join('..', 'data', 'deepslam_data', args.dataset)
kwargs = dict(dataset=args.dataset,
              scene=args.scene,
              data_path=data_dir,
              transform=data_transform,
              target_transform=target_transform,
              reduce=args.reduce,
              seed=seed)

if args.model == 'posenet':
    train_set = Env(train=True, **kwargs)
    val_set = Env(train=False, **kwargs)
elif args.model.find('mapnet') >= 0:
    kwargs = dict(kwargs, skip=skip, steps=steps)
    train_set = MF(train=True, **kwargs)
    val_set = MF(train=False, **kwargs)
else:
    raise NotImplementedError

# trainer
config_name = args.config_file.split('/')[-1]
config_name = config_name.split('.')[0]
if args.reduce is None:
    experiment_name = '{:s}_{:s}_{:s}_{:s}'.format(args.dataset, args.scene,
                                                   args.model, config_name)
else:
    experiment_name = '{:s}_{:s}_{:s}_{:s}_reduce'.format(
        args.dataset, args.scene, args.model, config_name)
trainer = Trainer(model,
                  optimizer,
コード例 #2
0
ファイル: eval.py プロジェクト: zjudzl/geomapnet
kwargs = dict(scene=args.scene,
              data_path=data_dir,
              train=train,
              transform=data_transform,
              target_transform=target_transform,
              seed=seed)
if (args.model.find('mapnet') >= 0) or args.pose_graph:
    if args.pose_graph:
        assert real
        kwargs = dict(kwargs, vo_lib=vo_lib)
    vo_func = calc_vos_safe_fc if fc_vos else calc_vos_safe
    data_set = MF(dataset=args.dataset,
                  steps=steps,
                  skip=skip,
                  real=real,
                  variable_skip=variable_skip,
                  include_vos=args.pose_graph,
                  vo_func=vo_func,
                  no_duplicates=False,
                  **kwargs)
    L = len(data_set.dset)
elif args.dataset == '7Scenes':
    from dataset_loaders.seven_scenes import SevenScenes
    data_set = SevenScenes(**kwargs)
    L = len(data_set)
elif args.dataset == 'RobotCar':
    from dataset_loaders.robotcar import RobotCar
    data_set = RobotCar(**kwargs)
    L = len(data_set)
else:
    raise NotImplementedError
コード例 #3
0
ファイル: plot_activations.py プロジェクト: a1302z/geomapnet
        input_types=input_types,
        output_types=output_types,
        train_split=train_split,
        #concatenate_inputs=True
    )
    if args.dataset == 'AachenDayNight':
        kwargs['night_augmentation'] = None
        kwargs['resize'] = resize
if (args.model.find('mapnet') >= 0) or (args.model.find('semantic') >= 0) or (
        args.model.find('multitask') >= 0):
    vo_func = calc_vos_safe_fc if fc_vos else calc_vos_safe
    data_set = MF(dataset=args.dataset,
                  steps=steps,
                  skip=skip,
                  real=real,
                  variable_skip=variable_skip,
                  include_vos=False,
                  vo_func=vo_func,
                  no_duplicates=False,
                  **kwargs)
    L = len(data_set.dset)
if args.dataset == '7Scenes':
    from dataset_loaders.seven_scenes import SevenScenes
    data_set = SevenScenes(**kwargs)
elif args.dataset == 'DeepLoc':
    from dataset_loaders.deeploc import DeepLoc
    data_set = DeepLoc(**kwargs)
elif args.dataset == 'RobotCar':
    from dataset_loaders.robotcar import RobotCar
    data_set = RobotCar(**kwargs)
elif args.dataset == 'AachenDayNight':
コード例 #4
0
        val_set = RobotCar(train=False, **kwargs)
    else:
        raise NotImplementedError
elif args.model.find('mapnet') >= 0:
    kwargs = dict(kwargs,
                  dataset=args.dataset,
                  skip=skip,
                  steps=steps,
                  variable_skip=variable_skip)
    if args.model.find('++') >= 0:
        train_set = MFOnline(vo_lib=vo_lib,
                             gps_mode=(vo_lib == 'gps'),
                             **kwargs)
        val_set = None
    else:
        train_set = MF(train=True, real=real, **kwargs)
        val_set = MF(train=False, real=real, **kwargs)
else:
    raise NotImplementedError

# trainer
config_name = args.config_file.split('/')[-1]
config_name = config_name.split('.')[0]
experiment_name = '{:s}_{:s}_{:s}_{:s}'.format(args.dataset, args.scene,
                                               args.model, config_name)
if args.learn_beta:
    experiment_name = '{:s}_learn_beta'.format(experiment_name)
if args.learn_gamma:
    experiment_name = '{:s}_learn_gamma'.format(experiment_name)
experiment_name += args.suffix
trainer = Trainer(model,
コード例 #5
0
ファイル: eval.py プロジェクト: yjinyyzyq/PoseEstimation
# read mean and stdev for un-normalizing predictions
pose_stats_file = osp.join(data_dir, args.scene, 'pose_stats.txt')
pose_m, pose_s  = np.loadtxt(pose_stats_file)  # max value

# dataset
train = not args.val
if train:
    print 'Running {:s} on Training Dataset'.format(args.model)
else:
    print 'Running {:s} on Validation Dataset'.format(args.model)

data_dir = osp.join('..', 'data', 'deepslam_data', args.dataset)
kwargs = dict(dataset=args.dataset, scene=args.scene, data_path=data_dir, train=train, transform=data_transform, target_transform=target_transform, reduce=args.reduce, seed=seed)

if args.model.find('mapnet') >= 0:
    data_set = MF(steps=steps, skip=skip, **kwargs)
    L = len(data_set.dset)
else:
    data_set = Env(**kwargs)
    L = len(data_set)

# loader (batch_size MUST be 1)
batch_size = 1
assert batch_size == 1
loader = DataLoader(data_set, batch_size=batch_size, shuffle=False, num_workers=10, pin_memory=True)

# activate GPUs
CUDA = torch.cuda.is_available()
torch.manual_seed(seed)
if CUDA:
    torch.cuda.manual_seed(seed)