Exemplo n.º 1
0
 def object_from_point_cloud(path, number_of_points):
     xyz_points, rgb_points = download_point_cloud.download_ply(path)
     if number_of_points is not None and number_of_points > xyz_points.shape[
             0]:
         number_of_points = xyz_points.shape[0]
     object_points = PointsObject()
     object_points.set_points(xyz_points, rgb_points, number_of_points)
     object_points.scale(0.5)
     # visualization.visualize_object([object_points])
     return object_points
Exemplo n.º 2
0
 def object_from_picture():
     mask, depth, rgb = get_moving_mask()
     object_mask = get_one_object_mask(mask / 255,
                                       depth / 255,
                                       depth_threshold=0.05,
                                       number_of_object=1)
     xyz_points, rgb_points = image_processing.calculate_point_cloud(
         rgb / 255, depth * object_mask / 255)
     pc = PointsObject()
     pc.set_points(xyz_points, rgb_points)
     visualization.visualize_object([pc])
def try_vrep_connection():
    """Function for checking if vrep_functions and PointsObject are working fine"""
    client_id = vrep_functions.vrep_connection()
    vrep_functions.vrep_start_sim(client_id)
    kinect_rgb_id = vrep_functions.get_object_id(client_id, 'kinect_rgb')
    kinect_depth_id = vrep_functions.get_object_id(client_id, 'kinect_depth')
    depth_im, rgb_im = vrep_functions.vrep_get_kinect_images(
        client_id, kinect_rgb_id, kinect_depth_id)
    print(depth_im.shape, rgb_im.shape)
    vrep_functions.vrep_stop_sim(client_id)

    depth, rgb = vrep_functions.calculate_point_cloud(rgb_im, depth_im,
                                                      cam_angle,
                                                      near_clipping_plane,
                                                      far_clipping_plane)

    background = PointsObject()
    background.set_points(depth, rgb, number_of_active_points)
    # visualization.visualize_object([background])
    xyz = np.asarray(background.return_n_last_points(number_of_active_points))
    print(xyz[0].shape)
Exemplo n.º 4
0
def save_points_cloud():
    points_cloud, points_color = create_points_cloud()
    object = PointsObject()
    object.set_points(points_cloud, points_color)
    object.save_all_points("Test", "ball")
Exemplo n.º 5
0
    def objects_test_moving_figures_global():
        classes = {}

        rgb_im = image_processing.load_image("falling balls and cylinder",
                                             "rgb_" + str(0) + ".png")
        depth_im = image_processing.load_image("falling balls and cylinder",
                                               "depth_" + str(0) + ".png",
                                               "depth")
        mog = RGBD_MoG(rgb_im, depth_im, number_of_gaussians=3)

        for number_of_frame in range(1, 5):

            color_mask = np.zeros([rgb_im.shape[0], rgb_im.shape[1], 3])

            rgb_im = image_processing.load_image(
                "falling balls and cylinder",
                "rgb_" + str(number_of_frame) + ".png")
            depth_im = image_processing.load_image(
                "falling balls and cylinder",
                "depth_" + str(number_of_frame) + ".png", "depth")
            mask = mog.set_mask(rgb_im, depth_im)

            depth_im = depth_im * (mask / 255).astype(int)
            masks = region_growing(mask / 255,
                                   depth_im / 255,
                                   depth_threshold=0.01,
                                   significant_number_of_points=10)
            if len(masks) == 0:
                print("No moving objects in the frame")
            else:
                for mask in masks:
                    xyz_points, rgb_points = image_processing.calculate_point_cloud(
                        rgb_im / 255, depth_im * mask / 255)
                    current_object = PointsObject()
                    current_object.set_points(xyz_points, rgb_points)
                    norms = current_object.get_normals()
                    compared_object_descriptor = GlobalCovarianceDescriptor(
                        xyz_points,
                        rgb_points,
                        norms,
                        depth_im,
                        rgb_im,
                        mask,
                        use_xyz=True,
                        use_rgb=True,
                        use_normals=True)
                    match_found = False
                    lengths = np.zeros([len(classes)])

                    if number_of_frame == 1:
                        match_found = False
                    else:
                        match_found = True
                        for object_number, object_class in enumerate(classes):
                            lengths[
                                object_number] = object_class.compare_descriptors(
                                    compared_object_descriptor.
                                    object_descriptor)
                        min_arg = np.argmin(lengths)
                        print(lengths)
                        for object_number, object_class in enumerate(classes):
                            if object_number == min_arg:
                                color_mask[:, :,
                                           0] += mask * classes[object_class][0]
                                color_mask[:, :,
                                           1] += mask * classes[object_class][1]
                                color_mask[:, :,
                                           2] += mask * classes[object_class][2]

                    if not match_found:
                        classes[compared_object_descriptor] = np.random.rand(3)
                        color_mask[:, :, 0] += mask * classes[
                            compared_object_descriptor][0]
                        color_mask[:, :, 1] += mask * classes[
                            compared_object_descriptor][1]
                        color_mask[:, :, 2] += mask * classes[
                            compared_object_descriptor][2]
                image_processing.save_image(color_mask,
                                            "tracking_results",
                                            frame_number=number_of_frame,
                                            image_name="global_two_same")
Exemplo n.º 6
0
    def objects_test_moving_figures_local():
        number_of_comparing_points = 100
        classes = {}

        rgb_im = image_processing.load_image("falling balls and cylinder",
                                             "rgb_" + str(0) + ".png")
        depth_im = image_processing.load_image("falling balls and cylinder",
                                               "depth_" + str(0) + ".png",
                                               "depth")
        mog = RGBD_MoG(rgb_im, depth_im, number_of_gaussians=3)

        for number_of_frame in range(1, 5):

            color_mask = np.zeros([rgb_im.shape[0], rgb_im.shape[1], 3])

            rgb_im = image_processing.load_image(
                "falling balls and cylinder",
                "rgb_" + str(number_of_frame) + ".png")
            depth_im = image_processing.load_image(
                "falling balls and cylinder",
                "depth_" + str(number_of_frame) + ".png", "depth")
            mask = mog.set_mask(rgb_im, depth_im)

            depth_im = depth_im * (mask / 255).astype(int)
            masks = region_growing(mask / 255,
                                   depth_im / 255,
                                   depth_threshold=0.01,
                                   significant_number_of_points=10)
            if len(masks) == 0:
                print("No moving objects in the frame")
            else:
                for mask in masks:
                    xyz_points, rgb_points = image_processing.calculate_point_cloud(
                        rgb_im / 255, depth_im * mask / 255)
                    current_object = PointsObject()
                    current_object.set_points(xyz_points, rgb_points)
                    norms = current_object.get_normals()
                    compared_object_descriptor = CovarianceDescriptor(
                        xyz_points,
                        rgb_points,
                        norms,
                        k_nearest_neighbours=None,
                        relevant_distance=0.1,
                        use_alpha=True,
                        use_beta=True,
                        use_ro=True,
                        use_theta=True,
                        use_psi=True,
                        use_rgb=True)
                    match_found = False
                    lengths = np.zeros([
                        len(classes),
                        np.amin(
                            [number_of_comparing_points, xyz_points.shape[0]])
                    ])

                    if number_of_frame == 1:
                        match_found = False
                    else:
                        match_found = True
                        for object_number, object_class in enumerate(classes):
                            lengths[
                                object_number] = object_class.compare_descriptors(
                                    compared_object_descriptor.
                                    object_descriptor,
                                    number_of_comparing_points)
                            print(np.sum(mask))
                        min_args = np.argmin(
                            lengths, axis=0)[np.amin(lengths, axis=0) < 0.1]
                        unique, counts = np.unique(min_args,
                                                   return_counts=True)
                        best_match = unique[np.argmax(counts)]
                        for object_number, object_class in enumerate(classes):
                            if object_number == best_match:
                                color_mask[:, :,
                                           0] += mask * classes[object_class][0]
                                color_mask[:, :,
                                           1] += mask * classes[object_class][1]
                                color_mask[:, :,
                                           2] += mask * classes[object_class][2]

                    if not match_found:
                        classes[compared_object_descriptor] = np.random.rand(3)
                        color_mask[:, :, 0] += mask * classes[
                            compared_object_descriptor][0]
                        color_mask[:, :, 1] += mask * classes[
                            compared_object_descriptor][1]
                        color_mask[:, :, 2] += mask * classes[
                            compared_object_descriptor][2]
                image_processing.save_image(color_mask,
                                            "tracking_results",
                                            frame_number=number_of_frame,
                                            image_name="local_two_same")