Exemplo n.º 1
0
def load_building(dataset_name, building_name):
    dataset = factory.get_dataset(dataset_name)

    navtask = get_args()
    cp = navtask.camera_param
    rgb_shader, d_shader = renderer.get_shaders(cp.modalities)
    r_obj = SwiftshaderRenderer()
    r_obj.init_display(width=cp.width,
                       height=cp.height,
                       fov=cp.fov,
                       z_near=cp.z_near,
                       z_far=cp.z_far,
                       rgb_shader=rgb_shader,
                       d_shader=d_shader)
    r_obj.clear_scene()
    b = VisualNavigationEnv(robot=navtask.robot,
                            env=navtask.env,
                            task_params=navtask.task_params,
                            building_name=building_name,
                            flip=False,
                            logdir=None,
                            building_loader=dataset,
                            r_obj=r_obj)
    b.load_building_into_scene()
    b.set_building_visibility(False)
    return b
Exemplo n.º 2
0
def _write_map_files(b_in, b_out, transform):
  cats = get_categories()

  env = utils.Foo(padding=10, resolution=5, num_point_threshold=2,
                  valid_min=-10, valid_max=200, n_samples_per_face=200)
  robot = utils.Foo(radius=15, base=10, height=140, sensor_height=120,
                    camera_elevation_degree=-15)

  building_loader = factory.get_dataset('sbpd')
  for flip in [False, True]:
    b = nav_env.Building(b_out, robot, env, flip=flip,
                         building_loader=building_loader)
    logging.info("building_in: %s, building_out: %s, transform: %d", b_in,
                 b_out, transform)
    maps = _get_semantic_maps(b_in, transform, b.map, flip, cats)
    maps = np.transpose(np.array(maps), axes=[1,2,0])

    #  Load file from the cache.
    file_name = '{:s}_{:d}_{:d}_{:d}_{:d}_{:d}_{:d}.pkl'
    file_name = file_name.format(b.building_name, b.map.size[0], b.map.size[1],
                                 b.map.origin[0], b.map.origin[1],
                                 b.map.resolution, flip)
    out_file = os.path.join(DATA_DIR, 'processing', 'class-maps', file_name)
    logging.info('Writing semantic maps to %s.', out_file)
    save_variables(out_file, [maps, cats], ['maps', 'cats'], overwrite=True)
def _write_map_files(b_in, b_out, transform):
  cats = get_categories()

  env = utils.Foo(padding=10, resolution=5, num_point_threshold=2,
                  valid_min=-10, valid_max=200, n_samples_per_face=200)
  robot = utils.Foo(radius=15, base=10, height=140, sensor_height=120,
                    camera_elevation_degree=-15)
  
  building_loader = factory.get_dataset('sbpd')
  for flip in [False, True]:
    b = nav_env.Building(b_out, robot, env, flip=flip,
                         building_loader=building_loader)
    logging.info("building_in: %s, building_out: %s, transform: %d", b_in,
                 b_out, transform)
    maps = _get_semantic_maps(b_in, transform, b.map, flip, cats)
    maps = np.transpose(np.array(maps), axes=[1,2,0])

    #  Load file from the cache.
    file_name = '{:s}_{:d}_{:d}_{:d}_{:d}_{:d}_{:d}.pkl'
    file_name = file_name.format(b.building_name, b.map.size[0], b.map.size[1],
                                 b.map.origin[0], b.map.origin[1],
                                 b.map.resolution, flip)
    out_file = os.path.join(DATA_DIR, 'processing', 'class-maps', file_name)
    logging.info('Writing semantic maps to %s.', out_file)
    save_variables(out_file, [maps, cats], ['maps', 'cats'], overwrite=True)
Exemplo n.º 4
0
def accuracyVsIterations(network_name, dataset_name):
    percentRetrainable = [1.0, 2.0, 4.0, 5.0, 6.0, 7.0]
    #8.0, 9.0, 1.0, 11.0, 12.0, 13.0, 14.0, 15.0]
    #per = 10.0
    stddevVar = 0.8
    num_levels = 32
    tf.reset_default_graph()
    dataset = get_dataset(dataset_name)
    network = get_network(network_name)
    max_iters = iters[dataset_name]
    output_dir = get_output_dir()
    path = 'experiments/out/accuracyVsIterations/' + network_name + '_' + dataset_name
    acc, iterations = train_net_v1(network, dataset, output_dir, max_iters,
                                   stddevVar, per, dataset_name, num_levels)
    """
	path1 = path + str(int(per*10))+'.csv'
	with open(path1, 'wb+') as fid:
		for i in range(len(acc)):
			fid.write(str(iterations[i])+','+str(acc[i])+'\n')
	"""
    for per in percentRetrainable:
        path1 = path + str(int(per * 10)) + '.csv'
        acc, iterations = train_net_v1(network, dataset, output_dir, max_iters,
                                       stddevVar, per, dataset_name,
                                       num_levels)
        with open(path1, 'wb+') as fid:
            for i in range(len(acc)):
                fid.write(str(iterations[i]) + ',' + str(acc[i]) + '\n')
    """
Exemplo n.º 5
0
def createModelWithVariation(percentRetrainable,
                             stddevVar,
                             network_name,
                             dataset_name,
                             num_levels=32):
    tf.reset_default_graph()
    dataset = get_dataset(dataset_name)
    network = get_network(network_name)
    max_iters = iters[dataset_name]
    output_dir = get_output_dir()
    acc = train_net(network, dataset, output_dir, max_iters, stddevVar,
                    percentRetrainable, dataset_name, num_levels)
    return acc
Exemplo n.º 6
0
def readVerifyTopN(network_name, dataset_name):
    stddevVar = 0.8
    num_levels = 32
    tf.reset_default_graph()
    dataset = get_dataset(dataset_name)
    network = get_network(network_name)
    output_dir = get_output_dir()
    topNs = [1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, \
       11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, \
       21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, \
       31.0, 32.0, 33.0, 33.0, 35.0, 36.0, 37.0, 38.0, 39.0, 40.0, \
       41.0, 42.0, 43.0, 44.0, 45.0, 46.0, 47.0, 48.0, 49.0, 50.0, \
       51.0, 52.0, 53.0, 54.0, 55.0, 56.0, 57.0, 58.0, 59.0, 60.0, \
       61.0, 62.0, 63.0, 64.0, 65.0, 66.0, 67.0, 68.0, 69.0, 70.0, \
       71.0, 72.0, 73.0, 74.0, 75.0, 76.0, 77.0, 78.0, 79.0, 80.0, \
       81.0, 82.0, 83.0, 84.0, 85.0, 86.0, 87.0, 88.0, 89.0, 90.0, \
       91.0, 92.0, 93.0, 94.0, 95.0, 96.0, 97.0, 98.0, 99.0 ]
    accuracy = []
    p = 'experiments/out/readVerifyTopN/' + network_name + '_' + dataset_name + '.csv'
    with open(p, 'wb+') as fid:
        for topN in topNs:
            print 'Doing read and verify for top {} of parameters'.format(topN)
            print 'readVerifyTopN | stddev', str(stddevVar)
            acc = verifyTopN(network, dataset, output_dir, stddevVar,
                             dataset_name, num_levels, topN)
            fid.write(str(acc) + '\n')
            accuracy.append(acc)

    if writeToPickle:
        result = []
        metaData = 'accuracy, read Verified %, stddevVar,' + network_name + ',' + dataset_name + ', num_levels'
        result.append(metaData)
        result.append(accuracy)
        result.append(topNs)
        result.append(stddevVar)
        result.append(num_levels)
        path = 'experiments/out/readVerifyTopN/' + network_name + '_' + dataset_name + '_' + str(
            num_levels) + '_.pkl'
        with open(path, 'wb') as fid:
            cPickle.dump(result, fid, cPickle.HIGHEST_PROTOCOL)

    plt.plot(topNs, accuracy, 'xb-')
    plt.ylabel('Accuracy')
    plt.xlabel('% of parameters read_verified')
    plt.show()
Exemplo n.º 7
0
def load_building(dataset_name, building_name):
  dataset = factory.get_dataset(dataset_name)

  navtask = get_args()
  cp = navtask.camera_param
  rgb_shader, d_shader = renderer.get_shaders(cp.modalities)
  r_obj = SwiftshaderRenderer()
  r_obj.init_display(width=cp.width, height=cp.height,
                     fov=cp.fov, z_near=cp.z_near, z_far=cp.z_far,
                     rgb_shader=rgb_shader, d_shader=d_shader)
  r_obj.clear_scene()
  b = VisualNavigationEnv(robot=navtask.robot, env=navtask.env,
                          task_params=navtask.task_params,
                          building_name=building_name, flip=False,
                          logdir=None, building_loader=dataset,
                          r_obj=r_obj)
  b.load_building_into_scene()
  b.set_building_visibility(False)
  return b
Exemplo n.º 8
0
def process_navtask_str(navtask_str):
    navtask = nec.nav_env_base_config()

    # Clobber with overrides from strings.
    navtask_vars = get_navtask_vars(navtask_str)

    navtask.task_params.n_ori = int(navtask_vars.n_ori)
    navtask.task_params.max_dist = int(navtask_vars.max_dist)
    navtask.task_params.num_steps = int(navtask_vars.num_steps)
    navtask.task_params.step_size = int(navtask_vars.step_size)
    navtask.task_params.data_augment.delta_xy = int(
        navtask_vars.step_size) / 2.
    n_aux_views_each = int(navtask_vars.aux_views[2])
    aux_delta_thetas = np.concatenate(
        (np.arange(n_aux_views_each) + 1, -1 - np.arange(n_aux_views_each)))
    aux_delta_thetas = aux_delta_thetas * np.deg2rad(navtask.camera_param.fov)
    navtask.task_params.aux_delta_thetas = aux_delta_thetas

    if navtask_vars.data_aug == 'aug':
        navtask.task_params.data_augment.structured = False
    elif navtask_vars.data_aug == 'straug':
        navtask.task_params.data_augment.structured = True
    else:
        logging.fatal('Unknown navtask_vars.data_aug %s.',
                      navtask_vars.data_aug)
        assert (False)

    navtask.task_params.num_history_frames = int(navtask_vars.history[1:])
    navtask.task_params.n_views = 1 + navtask.task_params.num_history_frames

    navtask.task_params.goal_channels = int(navtask_vars.n_ori)

    if navtask_vars.task == 'hard':
        navtask.task_params.type = 'rng_rejection_sampling_many'
        navtask.task_params.rejection_sampling_M = 2000
        navtask.task_params.min_dist = 10
    elif navtask_vars.task == 'r2r':
        navtask.task_params.type = 'room_to_room_many'
    elif navtask_vars.task == 'ST':
        # Semantic task at hand.
        navtask.task_params.goal_channels = \
            len(navtask.task_params.semantic_task.class_map_names)
        navtask.task_params.rel_goal_loc_dim = \
            len(navtask.task_params.semantic_task.class_map_names)
        navtask.task_params.type = 'to_nearest_obj_acc'
    else:
        logging.fatal('navtask_vars.task: should be hard or r2r, ST')
        assert (False)

    if navtask_vars.modality == 'rgb':
        navtask.camera_param.modalities = ['rgb']
        navtask.camera_param.img_channels = 3
    elif navtask_vars.modality == 'd':
        navtask.camera_param.modalities = ['depth']
        navtask.camera_param.img_channels = 2

    navtask.task_params.img_height = navtask.camera_param.height
    navtask.task_params.img_width = navtask.camera_param.width
    navtask.task_params.modalities = navtask.camera_param.modalities
    navtask.task_params.img_channels = navtask.camera_param.img_channels
    navtask.task_params.img_fov = navtask.camera_param.fov

    navtask.dataset = factory.get_dataset(navtask_vars.dataset_name)
    return navtask
Exemplo n.º 9
0
    print('Called with args:')
    print(args)

    if args.cfg_file is not None:
        cfg_from_file(args.cfg_file)

    print('Using config:')
    pprint.pprint(cfg)

    if not args.randomize:
        # fix the random seeds (numpy and caffe) for reproducibility
        np.random.seed(cfg.RNG_SEED)

    # prepare dataset
    cfg.MODE = 'TRAIN'
    dataset = get_dataset(args.dataset_name)
    worker_init_fn = dataset.worker_init_fn if hasattr(dataset, 'worker_init_fn') else None
    num_workers = 4
    dataloader = torch.utils.data.DataLoader(dataset, batch_size=cfg.TRAIN.IMS_PER_BATCH, shuffle=True, 
        num_workers=num_workers, worker_init_fn=worker_init_fn)
    print('Use dataset `{:s}` for training'.format(dataset.name))

    # overwrite intrinsics
    if len(cfg.INTRINSICS) > 0:
        K = np.array(cfg.INTRINSICS).reshape(3, 3)
        dataset._intrinsic_matrix = K
        print(dataset._intrinsic_matrix)

    output_dir = get_output_dir(dataset, None)
    print('Output will be saved to `{:s}`'.format(output_dir))
    if not os.path.exists(output_dir):
Exemplo n.º 10
0
        torch.manual_seed(cfg.RNG_SEED)

    # device
    cfg.gpu_id = 0
    cfg.device = torch.device('cuda:{:d}'.format(cfg.gpu_id))
    print('GPU device {:d}'.format(args.gpu_id))
    
    cfg.classes = cfg.TEST.CLASSES
    # prepare dataset
    if cfg.TEST.VISUALIZE:
        shuffle = True
    else:
        shuffle = False 
    cfg.MODE = 'TEST'
    
    dataset = get_dataset(args.dataset_name)
    dataloader = torch.utils.data.DataLoader(dataset, batch_size=cfg.TEST.IMS_PER_BATCH, shuffle=shuffle, num_workers=0)
    print('Use dataset `{:s}` for training'.format(dataset.name))

    # background dataset
    if cfg.TEST.SYNTHESIZE:
        if cfg.TRAIN.SYN_BACKGROUND_SPECIFIC:
            background_dataset = get_dataset(args.dataset_background_name)
        else:
            background_dataset = get_dataset('background_coco')
        background_loader = torch.utils.data.DataLoader(background_dataset, batch_size=cfg.TRAIN.IMS_PER_BATCH,
                                                        shuffle=True, num_workers=4)
    else:
        background_loader = None

    cfg.TEST.MODEL = args.pretrained.split('/')[-1]
Exemplo n.º 11
0
if __name__ == '__main__':

    args = parse_args()

    root = '../data/YCB_Video/data/0009/'
    image_ids = [1]
    num_images = 1
    anlge = 45
    height = 480
    width = 640

    cfg.TRAIN.CLASSES = [10, 14, 15]
    cfg.MODE = 'TEST'
    cfg.TEST.SYNTHESIZE = False
    dataset = get_dataset('ycb_video_train')

    # prepare renderer
    print('loading 3D models')
    cfg.renderer = YCBRenderer(width=cfg.TRAIN.SYN_WIDTH,
                               height=cfg.TRAIN.SYN_HEIGHT,
                               render_marker=False,
                               gpu_id=args.gpu_id)
    cfg.renderer.load_objects(dataset.model_mesh_paths_target,
                              dataset.model_texture_paths_target,
                              dataset.model_colors_target)
    print(dataset.model_mesh_paths_target)
    cfg.renderer.set_camera_default()

    for i in image_ids:
Exemplo n.º 12
0
def crossBarTests(dataset_name, max_iters, network_name, output_dir, stddev=0.1, percentRetrain=5.0):
    tf.reset_default_graph()
    network = get_network(network_name)
    dataset = get_dataset(dataset_name)
    acc = train_net(network,dataset, output_dir, max_iters, stddevVar, percentRetrainable, dataset_name)
    return acc