Пример #1
0
def main():
  from common.vis_utils import show_batch
  from torchvision.utils import make_grid
  import torchvision.transforms as transforms
  import matplotlib.pyplot as plt
  from mapnet.config import MapNetConfigurator
  from common.utils import get_configuration
  scene = 'full'
  num_workers = 4
  transform = transforms.Compose([
    transforms.Scale(256),
    # transforms.CenterCrop(224),
    transforms.ToTensor()])

  config = get_configuration(MapNetConfigurator())
  config.uniform_sampling = False
  config.mask_sampling = False
  config.mask_image = True

  data_path = "/home/drinkingcoder/Dataset/robotcar/"
  dset = RobotCar(scene, data_path, train=True, real=False,
          transform=transform, data_dir=osp.join("..", "data", "RobotCar"),
          config=config)
  print 'Loaded RobotCar scene {:s}, length = {:d}'.format(scene, len(dset))

  # plot the poses
  plt.figure()
  plt.plot(dset.poses[:, 0], dset.poses[:, 1])
  plt.show()

  print(len(dset))
  data_loader = data.DataLoader(dset, batch_size=10, shuffle=True,
                                num_workers=num_workers)
  #plt.imshow(dset.muimg.data.numpy()[0, :, :])
  #plt.imshow(dset[0].data.numpy())

  batch_count = 0
  N = 2
  for batch in data_loader:
    print 'Minibatch {:d}'.format(batch_count)
    show_batch(make_grid(batch[0], nrow=5, padding=25, normalize=True))

    batch_count += 1
    if batch_count >= N:
      break
Пример #2
0
def main():
    """
    visualizes the dataset
    """
    from common.vis_utils import show_batch, show_stereo_batch
    from torchvision.utils import make_grid
    import torchvision.transforms as transforms
    seq = 'chess'
    mode = 2
    num_workers = 6
    transform = transforms.Compose([
        transforms.Scale(256),
        transforms.CenterCrop(224),
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406],
                             std=[0.229, 0.224, 0.225])
    ])
    dset = SevenScenes(seq,
                       '../data/deepslam_data/7Scenes',
                       True,
                       transform,
                       mode=mode)
    print(
        ('Loaded 7Scenes sequence {:s}, length = {:d}'.format(seq, len(dset))))

    data_loader = data.DataLoader(dset,
                                  batch_size=10,
                                  shuffle=True,
                                  num_workers=num_workers)

    batch_count = 0
    N = 2
    for batch in data_loader:
        print(('Minibatch {:d}'.format(batch_count)))
        if mode < 2:
            show_batch(make_grid(batch[0], nrow=1, padding=25, normalize=True))
        elif mode == 2:
            lb = make_grid(batch[0][0], nrow=1, padding=25, normalize=True)
            rb = make_grid(batch[0][1], nrow=1, padding=25, normalize=True)
            show_stereo_batch(lb, rb)

        batch_count += 1
        if batch_count >= N:
            break
Пример #3
0
def main():
    from common.vis_utils import show_batch
    from torchvision.utils import make_grid
    import torchvision.transforms as transforms
    import matplotlib.pyplot as plt
    from mapnet.config import MapNetConfigurator
    from common.utils import get_configuration
    scene = 'KingsCollege'
    num_workers = 4
    transform = transforms.Compose([
        transforms.Scale(256),
        # transforms.CenterCrop(224),
        transforms.ToTensor()])

    config = get_configuration(MapNetConfigurator())
    data_path = "/home/drinkingcoder/Dataset/Cambridge/"
    dset = Cambridge(scene, data_path, train=True, real=False,
                    transform=transform, data_dir=osp.join("..", "data", "Cambridge", "KingsCollege"),
                    config=config)
    print 'Loaded RobotCar scene {:s}, length = {:d}'.format(scene, len(dset))

    # plot the poses
    plt.figure()
    plt.scatter(dset.poses[:, 0], dset.poses[:, 1])
    plt.show()
    plt.figure()
    plt.scatter(dset.poses[:, 3], dset.poses[:, 5])
    plt.show()

    print(len(dset))
    data_loader = data.DataLoader(dset, batch_size=10, shuffle=True,
                                  num_workers=num_workers)

    batch_count = 0
    N = 2
    for batch in data_loader:
        print 'Minibatch {:d}'.format(batch_count)
        show_batch(make_grid(batch[0], nrow=5, padding=25, normalize=True))

        batch_count += 1
        if batch_count >= N:
            break
Пример #4
0
def main():
    from common.vis_utils import show_batch
    from torchvision.utils import make_grid
    import torchvision.transforms as transforms
    import matplotlib.pyplot as plt
    scene = 'loop'
    num_workers = 4
    transform = transforms.Compose([
        transforms.Scale(256),
        transforms.CenterCrop(224),
        transforms.ToTensor()
    ])

    data_path = osp.join('..', 'data', 'deepslam_data', 'RobotCar')
    dset = RobotCar(scene,
                    data_path,
                    train=True,
                    real=True,
                    transform=transform)
    print('Loaded RobotCar scene {:s}, length = {:d}'.format(scene, len(dset)))

    # plot the poses
    plt.figure()
    plt.plot(dset.poses[:, 0], dset.poses[:, 1])
    plt.show()

    data_loader = data.DataLoader(dset,
                                  batch_size=10,
                                  shuffle=True,
                                  num_workers=num_workers)

    batch_count = 0
    N = 2
    for batch in data_loader:
        print('Minibatch {:d}'.format(batch_count))
        show_batch(make_grid(batch[0], nrow=5, padding=25, normalize=True))

        batch_count += 1
        if batch_count >= N:
            break
Пример #5
0
def main():
    """
    visualizes the dataset
    """
    from common.vis_utils import show_batch, show_stereo_batch
    from torchvision.utils import make_grid
    import torchvision.transforms as transforms
    mode = int(sys.argv[1])
    num_workers = 6
    transform = transforms.Compose([
        transforms.Scale(256),
        transforms.CenterCrop(224),
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406],
                             std=[0.229, 0.224, 0.225])
    ])

    concatenate_inputs = (mode == 3)

    if mode == 0:
        input_types = 'left'
    elif mode == 1 and not dual_output:
        input_types = 'depth'
    elif mode == 2 and not dual_output:
        input_types = ['left', 'depth']
    elif mode == 3:
        input_types = ['left', 'label_colorized']

    dset = DeepLoc('../data/deepslam_data/DeepLoc',
                   True,
                   transform,
                   input_types=input_types,
                   output_types=[],
                   concatenate_inputs=concatenate_inputs)

    print('Loaded DeepLoc, length = {:d}'.format(len(dset)))

    data_loader = data.DataLoader(dset,
                                  batch_size=10,
                                  shuffle=True,
                                  num_workers=num_workers)

    batch_count = 0
    N = 2
    for batch in data_loader:
        print('Minibatch {:d}'.format(batch_count))
        if mode < 2:
            show_batch(make_grid(batch[0], nrow=1, padding=25, normalize=True))
        elif mode == 2:
            lb = make_grid(batch[0][0], nrow=1, padding=25, normalize=True)
            rb = make_grid(batch[0][1], nrow=1, padding=25, normalize=True)
            show_stereo_batch(lb, rb)
        elif mode == 3:
            print(len(batch))
            for elm in batch:
                print(elm.shape)
            lb = make_grid(batch[0][0][:3], nrow=1, padding=25, normalize=True)
            rb = make_grid(batch[0][0][3:], nrow=1, padding=25, normalize=True)
            show_stereo_batch(lb, rb)

        batch_count += 1
        if batch_count >= N:
            break