示例#1
0
                        num_heading_bin=DC.num_heading_bin,
                        num_size_cluster=DC.num_size_cluster,
                        mean_size_arr=DC.mean_size_arr).to(device)
    print('Constructed model.')

    # Load checkpoint
    optimizer = optim.Adam(net.parameters(), lr=0.001)
    checkpoint = torch.load(checkpoint_path)
    net.load_state_dict(checkpoint['model_state_dict'])
    optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
    epoch = checkpoint['epoch']
    print("Loaded checkpoint %s (epoch: %d)" % (checkpoint_path, epoch))

    # Load and preprocess input point cloud
    net.eval()  # set model to eval mode (for bn and dp)
    point_cloud = read_ply(pc_path)
    pc = preprocess_point_cloud(point_cloud)
    print('Loaded point cloud data: %s' % (pc_path))

    # Model inference
    inputs = {'point_clouds': torch.from_numpy(pc).to(device)}
    tic = time.time()
    with torch.no_grad():
        end_points = net(inputs)
    toc = time.time()
    print('Inference time: %f' % (toc - tic))
    end_points['point_clouds'] = inputs['point_clouds']
    pred_map_cls = parse_predictions(end_points, eval_config_dict)
    print('Finished detection. %d object detected.' % (len(pred_map_cls[0])))

    dump_dir = os.path.join(demo_dir, '%s_results' % (FLAGS.dataset))
示例#2
0
    def __init__(self, point_path, patch_size=2048, patch_num=100, normalization=False, add_noise=False):
        print(point_path)
        self.name = point_path.split('/')[-1][:-4]
        self.data = pc_util.read_ply(point_path)
        self.data = self.data[:,0:3]

        #####
        # angles = np.asarray([0.25 * np.pi, 0.25 * np.pi, 0.25 * np.pi])
        # Rx = np.array([[1, 0, 0],
        #                [0, np.cos(angles[0]), -np.sin(angles[0])],
        #                [0, np.sin(angles[0]), np.cos(angles[0])]])
        # Ry = np.array([[np.cos(angles[1]), 0, np.sin(angles[1])],
        #                [0, 1, 0],
        #                [-np.sin(angles[1]), 0, np.cos(angles[1])]])
        # Rz = np.array([[np.cos(angles[2]), -np.sin(angles[2]), 0],
        #                [np.sin(angles[2]), np.cos(angles[2]), 0],
        #                [0, 0, 1]])
        # rotation_matrix = np.dot(Rz, np.dot(Ry, Rx))
        # self.data = np.dot(self.data, rotation_matrix)
        #####
        
        self.clean_data = self.data

        # self.data = self.data[np.random.permutation(len(self.data))[:100000]]
        self.centroid = np.mean(self.data, axis=0, keepdims=True)
        self.furthest_distance = np.amax(np.sqrt(np.sum((self.data - self.centroid) ** 2, axis=-1)), keepdims=True)

        if normalization:
            print("Normalize the point data")
            self.data = (self.data-self.centroid)/self.furthest_distance
            self.clean_data = self.data
        if add_noise:
            print("Add gaussian noise into the point")
            #self.data = jitter_perturbation_point_cloud(np.expand_dims(self.data,axis=0), sigma=self.furthest_distance * 0.004, clip=self.furthest_distance * 0.01)
            self.data = self.data[0]

        print("Total %d points" % len(self.data))

        self.patch_size = patch_size
        self.patch_num = patch_num

        start = time.time()
        self.nbrs = spatial.cKDTree(self.clean_data)
        # dists,idxs = self.nbrs.query(self.clean_data,k=6,distance_upper_bound=0.2)
        dists,idxs = self.nbrs.query(self.clean_data,k=16)
        self.graph=[] # xl: not used??
        for item,dist in zip(idxs,dists):
            item = item[dist<0.07] #use 0.03 for chair7 model; otherwise use 0.05
            self.graph.append(set(item))
        print("Build the graph cost %f second" % (time.time() - start))

        self.graph2 = pygraph.classes.graph.graph()
        self.graph2.add_nodes(range(len(self.clean_data)))
        sid = 0
        for idx, dist in zip(idxs, dists):
            for eid, d in zip(idx, dist):
                if not self.graph2.has_edge((sid, eid)) and eid < len(self.clean_data):
                    self.graph2.add_edge((sid, eid), d)
            sid = sid + 1
        print("Build the graph cost %f second" % (time.time() - start))

        return
示例#3
0
def _votenet_inference(queue):

    # Set file paths and dataset config
    demo_dir = os.path.join(BASE_DIR, 'demo_files')

    # Use sunrgbd
    sys.path.append(os.path.join(ROOT_DIR, 'sunrgbd'))
    from sunrgbd_detection_dataset import DC  # dataset config
    checkpoint_path = os.path.join(demo_dir,
                                   'pretrained_votenet_on_sunrgbd.tar')

    eval_config_dict = {
        'remove_empty_box': True,
        'use_3d_nms': True,
        'nms_iou': 0.25,
        'use_old_type_nms': False,
        'cls_nms': False,
        'per_class_proposal': False,
        'conf_thresh': 0.5,
        'dataset_config': DC
    }

    # Init the model and optimzier
    MODEL = importlib.import_module('votenet')  # import network module
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    net = MODEL.VoteNet(
        num_proposal=256,
        input_feature_dim=1,
        vote_factor=1,
        #sampling='seed_fps', num_class=DC.num_class,
        sampling='vote_fps',
        num_class=DC.num_class,
        num_heading_bin=DC.num_heading_bin,
        num_size_cluster=DC.num_size_cluster,
        mean_size_arr=DC.mean_size_arr).to(device)
    print('Constructed model.')

    # Load checkpoint
    optimizer = optim.Adam(net.parameters(), lr=0.001)
    checkpoint = torch.load(checkpoint_path)
    net.load_state_dict(checkpoint['model_state_dict'])
    optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
    epoch = checkpoint['epoch']
    print("Loaded checkpoint %s (epoch: %d)" % (checkpoint_path, epoch))

    # Load and preprocess input point cloud
    net.eval()  # set model to eval mode (for bn and dp)

    filename = queue.get()
    print(filename)
    pc_dir = os.path.join(BASE_DIR, 'point_cloud')
    pc_path = os.path.join(pc_dir, filename)

    point_cloud = read_ply(pc_path)
    pc = preprocess_point_cloud(point_cloud)
    print('Loaded point cloud data: %s' % (pc_path))

    # Model inference
    inputs = {'point_clouds': torch.from_numpy(pc).to(device)}
    tic = time.time()
    with torch.no_grad():
        #with profiler.profile(with_stack=True, profile_memory=True) as prof:
        end_points = net(inputs)
        toc = time.time()
        print('Inference time: %f' % (toc - tic))

    end_points['point_clouds'] = inputs['point_clouds']
    pred_map_cls = parse_predictions(end_points, eval_config_dict)
    print('Finished detection. %d object detected.' % (len(pred_map_cls[0])))

    #dump_dir = os.path.join(demo_dir, '%s_results'%('sunrgbd'))
    #if not os.path.exists(dump_dir): os.mkdir(dump_dir)
    #MODEL.dump_results(end_points, dump_dir, DC, True)
    #print('Dumped detection results to folder %s'%(dump_dir))

    #return pred_map_cls

    queue.put(pred_map_cls)
示例#4
0
        num_size_cluster=DC.num_size_cluster,
        mean_size_arr=DC.mean_size_arr).to(device)
    
    # Load checkpoint
    optimizer = optim.Adam(net.parameters(), lr=0.001)
    checkpoint = torch.load(checkpoint_path)
    net.load_state_dict(checkpoint['model_state_dict'])
    optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
    epoch = checkpoint['epoch']
    print("Loaded checkpoint %s "%(checkpoint_path))


    for pc_path in os.listdir(path_to_point_clouds):
        # Load and preprocess input point cloud
        net.eval() # set model to eval mode (for bn and dp)
        point_cloud = read_ply(path_to_point_clouds+'/'+pc_path)
        pc = preprocess_point_cloud(point_cloud)
        print('Loaded point cloud data: %s'%(pc_path))

        # Model inference
        inputs = {'point_clouds': torch.from_numpy(pc).to(device)}
        tic = time.time()
        with torch.no_grad():
            end_points = net(inputs)
        toc = time.time()
        print('Inference time: %f'%(toc-tic))
        end_points['point_clouds'] = inputs['point_clouds']
        pred_map_cls = parse_predictions(end_points, eval_config_dict)
        # print(pred_map_cls)
        print('Finished detection. %d object detected.'%(len(pred_map_cls[0])))
        dataset='sunrgbd'
    mesh = trimesh.Trimesh(vertices=verts, faces=faces)
    return mesh

sdf_data = []
model_id_list = []
model_list = os.listdir(input_point_cloud_dir)
model_list.sort()
for model_name in model_list:
    #if '90c6d1df1f83329fe1181b0e584cdf9b_clean' not in model_name: continue

    model_id = model_name.split('_')[0]
    input_pc_filename = os.path.join(input_point_cloud_dir, model_name)
    gt_mesh_filename = os.path.join(mesh_dir, model_id, 'model.obj')

    # pc from -z to +z face
    input_pc = pc_util.read_ply(input_pc_filename)
    input_pc = pc_util.rotate_point_cloud_by_axis_angle(input_pc, [0,1,0], 180)

    # mesh from +x to +z face
    gt_mesh_pymesh = pymesh.load_mesh(gt_mesh_filename)
    gt_mesh = trimesh.Trimesh(vertices=(gt_mesh_pymesh.vertices).copy(), faces=(gt_mesh_pymesh.faces).copy())
    gt_mesh_pts_min = np.amin(gt_mesh.vertices, axis=0)
    gt_mesh_pts_max = np.amax(gt_mesh.vertices, axis=0)
    bbox_center = (gt_mesh_pts_max + gt_mesh_pts_min) / 2.0
    trans_v = -bbox_center
    gt_mesh.apply_translation(trans_v)
    gt_mesh_points, gt_mesh_sample_fidx  = trimesh.sample.sample_surface(gt_mesh, 20480)
    gt_mesh_point_normals = gt_mesh.face_normals[gt_mesh_sample_fidx]
    gt_mesh_points = pc_util.rotate_point_cloud_by_axis_angle(gt_mesh_points, [0,1,0], -90)
    gt_mesh_point_normals = pc_util.rotate_point_cloud_by_axis_angle(gt_mesh_point_normals, [0,1,0], -90)
    #pc_util.write_ply_versatile(gt_mesh_points, os.path.join(output_dir, model_id+'_gt_points_0.ply'), normals=gt_mesh_point_normals)