Exemplo n.º 1
0
 def calculate_angle_ranges(self, cam_angle_i):
     cam_angle = self.omsws.image_angles[cam_angle_i:cam_angle_i+1]
     nearest_points = self.find_nearest_points(self.points, self.points_angles,
                                               cam_angle)
     
     cam_positions = oms_geo.pol2cart(self.cam_offset, cam_angle)
     cam_positions = np.array(cam_positions).T
     cam_positions = np.hstack((cam_positions, np.zeros((cam_positions.shape[0], 1))))
     mid_image_vector = nearest_points - cam_positions
     mid_image_vector[:,2] = np.zeros((mid_image_vector.shape[0]))
     
     image_beg_rot = oms_geo.trans_rot_array_3d((0, 0, self.cam_angle_of_view/2), 
                                                (0, 0, 0))
     image_end_rot = oms_geo.trans_rot_array_3d((0, 0, -self.cam_angle_of_view/2), 
                                                (0, 0, 0))
     
     points_start = oms_geo.transform(mid_image_vector, image_beg_rot) + cam_positions
     angles_start = np.arctan2(points_start[:, 1], points_start[:, 0])
     angles_start = (angles_start + 2*math.pi) % (2*math.pi)
     
     points_end = oms_geo.transform(mid_image_vector, image_end_rot) + cam_positions
     angles_end = np.arctan2(points_end[:, 1], points_end[:, 0])
     angles_end = (angles_end + 2*math.pi) % (2*math.pi)
     
     return angles_start[0], angles_end[0]
Exemplo n.º 2
0
def save_as_grid(pc_path, json_path, r, resolution, cam_settings, omsws_settings):

    (cam_angle_of_view, cam_rot, cam_pos, image_flip_h, image_flip_v) = cam_settings
    dir_path, _ = split(pc_path)
    
    # read flattened point cloud
    pcd = o3d.read_point_cloud(pc_path)
    points = np.asarray(pcd.points)
    
    # read json file
    (paths, points_numbers, transformations, pprc_transformation) =\
                                    su.read_json_file(json_path, points.shape[0])
    
    # create grid and sources - from which scan each point is
    points, (scan_sources_h, scan_sources_l), grid_data = \
                     pu.calc_points_on_grid(points, points_numbers, resolution)
    
    (height, width, grid_step) = grid_data
    
    # create stitcher class and output image
    stitcher = Stitcher()
    stitcher.update_output_image(np.zeros((height, width, 3), dtype=np.uint8))
    
    # wrap the points on cylinder
    points = pol2cart_pc(points, r)

    # transform back from before postprocessing
    points = transform(points, inv(pprc_transformation))
    
    
    for i, path in enumerate(paths):
        print('loading omsws:', split(path)[1])
        
        points_scan = points[np.logical_or(scan_sources_l == i, 
                                           scan_sources_h == i)]

        
        # all scans after first one have another transformation (stitching). 
        # we need to go back to raw positions, so inverting transformation
        if i > 0:
            points_scan = transform(points_scan, inv(transformations[i-1]))
        
        print('loading image')
        image_loader = su.StitchImageLoader(path, omsws_settings, cam_settings,
                                            r, points_scan)

        points_colors = image_loader.load_images()
    
        # points to flat surface
        points_flat = cart2pol_pc(points_scan, r)
        
        output = su.create_image(points_flat, points_colors, grid_step)
        result = Image.fromarray(output, mode='RGB')
        result.save(join(split(json_path)[0], 'pictures/result%02d.png' %i))
Exemplo n.º 3
0
    def load_image(self, cam_angle_i):
        # get image associated with cam_angle_i, get all points that correspond
        # to this image from self.points. Output both points and colors,
        # and info if the image is split (starts to the right, and ends to the 
        # left) on the output stitched image
                
        angle_start, angle_end = self.calculate_angle_ranges(cam_angle_i)
        
        split = angle_start > angle_end
        if split:
            points_image = self.points[np.logical_or(self.points_angles > angle_start,
                                               self.points_angles < angle_end)]
        else:
            points_image = self.points[np.logical_and(self.points_angles > angle_start,
                                               self.points_angles < angle_end)]
                
        if len(points_image != 0):
            im_dist = self.cam_offset + self.im_width /\
                                    (2 * math.tan(self.cam_angle_of_view / 2))
        
            points_cam = np.dot(points_image, self.cam_head_rot[cam_angle_i])
            points_cam[:, 0] -= self.cam_offset
            points_cam = oms_geo.transform(points_cam, self.cam_rot_matrix)

            d = np.linalg.norm(points_cam, axis=1)
            # angle on hight
            rxy = np.arcsin(points_cam[:, 2] / d)
            # angle on width
            rz = np.arctan2(points_cam[:, 1], points_cam[:, 0])
            
            # calculate the pixels that correspond to that angle
            y_im = np.array(im_dist * np.tan(rxy) + self.im_width / 2, dtype=int)
            x_im = np.array(im_dist * np.tan(rz) + self.im_height / 2, dtype=int)
            f = np.logical_and(np.logical_and(0 < x_im, x_im < self.im_height),
                               np.logical_and(0 < y_im, y_im < self.im_width))
            
            scan_colors = self.omsws.images[cam_angle_i][x_im[f], y_im[f], :]
            scan_colors = scan_colors.astype(np.uint8)
            
        return points_image[f], scan_colors, split
Exemplo n.º 4
0
##pcd.points = o3d.Vector3dVector(np.array(np.hstack((points[:, :2], scan_sources.reshape(-1, 1)))))
#pcd.points = o3d.Vector3dVector(points)
#o3d.draw_geometries([pcd])

# now that we have new grid of points, we want the raw position of points and
# camera images to assign them to each other

# wrap the points on cylinder
points = pol2cart_pc(points, r)

#pcd.points = o3d.Vector3dVector(np.array(points))
#pcd.transform(inv(pprc_transformation))
#o3d.draw_geometries([pcd])

# transform back from before postprocessing
points = transform(points, inv(pprc_transformation))
#points = np.asarray(pcd.points)
rotation = 0
scan_id = 1
inverse_x = True
cam_angle_of_view = 0.76
omsws_settings = (scan_id, inverse_x, rotation)

for i, path in enumerate(paths):
    print(i)
    points_scan = points[scan_sources == i]

    # all scans after first one have another transformation (stitching).
    # we need to go back to raw positions, so inverting transformation
    if i > 0:
        points_scan = transform(points_scan, inv(transformations[i - 1]))
Exemplo n.º 5
0
    def load_images(self):
        cam_rot_matrix = oms_geo.trans_rot_array_3d(-self.cam_rot, [0, 
                                                               *-self.cam_pos[1:]])
        cam_angles = self.omsws.image_angles

        if not isinstance(self, StitchImageLoader):
            scans_points = np.asarray(self.full_copy.points)
        else:
            scans_points = self.points
    
        # initialize point cloud color array
        scan_colors = np.zeros_like(np.asarray(scans_points))
        self.image_sources = np.zeros(scans_points.shape, dtype=np.uint16)
        
        im_height, im_width = self.omsws.images[0].shape[:2]
        # calculate distance of an image from the camera, so we can find pixels
        # for point cloud assuming pixels are millimiters
        im_dist = self.cam_offset + im_width /\
                    (2 * math.tan(self.cam_angle_of_view / 2))
                
        # create array of rotations back to position 0, where we are loading images
        real_cam_angles = np.array(cam_angles) + math.atan2(self.cam_pos[1], 
                                                          self.cam_offset)
        
        cam_head_rot = [oms_geo.trans_rot_array_3d((0, 0, -cam_angle), 
                                                   (0,0,0))[:3, :3] 
                                                for cam_angle in real_cam_angles]
        
        # calculate angles of all the points, make sure they are 0 - 2*pi
        points_angles = np.arctan2(scans_points[:, 1], scans_points[:, 0])
        points_angles = (points_angles + 2*math.pi) % (2*math.pi)

        indexes = list(range(len(cam_angles)))
        ind_cam_angles = sorted(zip(indexes, real_cam_angles), key=lambda x:x[1])
        
        # add first and last cam angle on the beginning and end of angles list
        ind_cam_angles.insert(0, (ind_cam_angles[-1][0], 
                                  ind_cam_angles[-1][1] - 2*math.pi))
        ind_cam_angles.append((ind_cam_angles[1][0], 
                                  ind_cam_angles[1][1] + 2*math.pi))


        (im_index, sorted_cam_angles) = zip(*ind_cam_angles)
        point_im_indexes = griddata(np.array(sorted_cam_angles), np.array(im_index), 
                                    points_angles, method='nearest')
        
        for image_i in range(len(cam_angles)):
            points_i, = np.where(point_im_indexes == image_i)
            points = scans_points[points_i]
            
            if len(points != 0):
                points = np.dot(points, cam_head_rot[image_i])
                points[:, 0] -= self.cam_offset
                points = oms_geo.transform(points, cam_rot_matrix)
#                points = np.dot(points, cam_rot_matrix[:3, :3])
#                points += cam_rot_matrix[:3, 3] 
                
                x = points[:, 0]; y = points[:, 1]; z = points[:, 2]
                d = np.sqrt(np.power(x, 2) + np.power(y, 2) + np.power(z, 2))
                # angle on hight
                rxy = np.arcsin(z / d)
                # angle on width
                rz = np.arctan2(y, x)
                
                # calculate the pixels that correspond to that angle
                y_im = np.array(im_dist * np.tan(rxy) + im_width / 2, dtype=int)
                x_im = np.array(im_dist * np.tan(rz) + im_height / 2, dtype=int)
                ixy = np.column_stack((points_i, x_im, y_im))
                ixy = ixy[np.logical_and(np.logical_and(0 < x_im, x_im < im_height),
                                         np.logical_and(0 < y_im, y_im < im_width))]
                scan_colors[ixy[:, 0]] =\
                        self.omsws.images[image_i][ixy[:, 1], ixy[:, 2], :]
                    
        return scan_colors