Exemple #1
0
    def generate_processed_dataset(self, src_path, dst_path, testing=False):
        src_suffix = ".mp4"
        dst_suffix = ".jpg"

        files = [
            f for f in glob.glob(src_path + "*" + src_suffix, recursive=True)
        ]

        if not os.path.exists(dst_path):
            os.mkdir(dst_path)

        for f in tqdm(files):
            # Iterates through all the video files and export images
            sub_directory = dst_path + "/" + self.extract_name(
                f, src_path, src_suffix)
            if not os.path.exists(
                    sub_directory):  # Create a directory for each video file
                os.mkdir(sub_directory)

            extractor = VideoProcessor(f)
            processed_images = extractor.process_video(roi_extraction=True,
                                                       filter_enabled=True,
                                                       average_frames=True)

            for frame_no in range(processed_images.shape[0]):
                output_filename = sub_directory + "/" + str(
                    frame_no) + dst_suffix
                cv2.imwrite(output_filename, processed_images[frame_no, :, :])

            if testing == True:
                break
Exemple #2
0
    def test2_3_1(self):
        source_directory = "../../dataset/micro/"
        source_file_suffix = ".mp4"

        file_handler = Generator(source_directory, source_file_suffix)

        depths = np.zeros(2399)
        heights = np.zeros(2399)
        widths = np.zeros(2399)
        i = 0

        for f in tqdm(file_handler.files):
            extractor = VideoProcessor(f)
            extracted_images = extractor.process_video(roi_extraction=True, filter_enabled=False, average_frames=True)

            depths[i] = extracted_images.shape[0]
            heights[i] = extracted_images.shape[1]
            widths[i] = extracted_images.shape[2]

            i = i + 1

        print("Depth min: " + str(np.min(depths)) + " average: " + str(np.average(depths)) + " max: " + str(
            np.max(depths)))
        print(np.percentile(depths, 90), np.percentile(depths, 95), np.percentile(depths, 99))
        print("Height min: " + str(np.min(heights)) + " average: " + str(np.average(heights)) + " max: " + str(
            np.max(heights)))
        print(np.percentile(heights, 90), np.percentile(heights, 95), np.percentile(heights, 99))
        print("Width min: " + str(np.min(widths)) + " average: " + str(np.average(widths)) + " max: " + str(
            np.max(widths)))
        print(np.percentile(widths, 90), np.percentile(widths, 95), np.percentile(widths, 99))
Exemple #3
0
    def test2_3(self):
        checkpoint_enabled = True
        checkpoint = '100109'
        flag = False

        source_directory = "../../dataset/micro/"
        source_file_suffix = ".mp4"

        # Extract all processed frames from video files
        file_handler = Generator(source_directory, source_file_suffix)

        for f in tqdm(file_handler.files):
            # Iterates through all the video files and export images
            plot_title = file_handler.extract_name(f)

            if checkpoint_enabled:
                if plot_title == checkpoint:
                    flag = True
                if flag == False:
                    continue

            extractor = VideoProcessor(f)
            extracted_images = extractor.process_video(roi_extraction=True, filter_enabled=False, average_frames=True)

            percentiles = [99, 95, 90]
            for i in range(3):
                visualization_tools.Interactive(extracted_images).show_point_cloud(percentile=percentiles[i], clustering=True,
                                                                               filter_outliers=True, name=plot_title)
Exemple #4
0
    def generate_tensor_dataset(self, src_path, dst_path, testing=False):
        src_suffix = ".mp4"
        dst_suffix = ".pt"
        depth, height, width = 32, 64, 64
        percentiles = [99, 95, 90]

        files = [
            f for f in glob.glob(src_path + "*" + src_suffix, recursive=True)
        ]

        if not os.path.exists(dst_path):
            os.mkdir(dst_path)

        for f in tqdm(files):
            # Iterates through all the video files and export images to file

            extractor = VideoProcessor(f)
            image_collection = extractor.process_video(roi_extraction=True,
                                                       filter_enabled=True,
                                                       average_frames=True)

            in_depth = image_collection.shape[0]
            in_height = image_collection.shape[1]
            in_width = image_collection.shape[2]

            output_3D = np.zeros((3, depth, height, width))

            for i in range(3):
                thresh = int(
                    np.percentile(image_collection.ravel(), percentiles[i]))

                cloud, _ = ImageProcessor3D().point_cloud_from_collecton(
                    image_collection, threshold=thresh, filter_outliers=True)

                mask = np.zeros((in_depth, in_height, in_width)).astype(float)
                mask[cloud[:, 0], cloud[:, 1], cloud[:, 2]] = 1.0
                mask[mask != 1.0] = 0.2

                masked = np.multiply(image_collection.astype(float), mask)
                masked = masked / np.max(masked.ravel())

                depth_ratio = depth / in_depth
                height_ratio = height / in_height
                width_ratio = width / in_width

                output_3D[i, :, :, :] = zoom(
                    masked, (depth_ratio, height_ratio, width_ratio))

            sample_name = self.extract_name(f, src_path, src_suffix)

            output_filename = dst_path + "/" + sample_name + dst_suffix
            output_3D = torch.from_numpy(output_3D).float()
            torch.save(output_3D, output_filename)

            if testing == True:
                break
Exemple #5
0
    def test2_2(self):
        filename = "../../dataset/micro/100109.mp4"
        extractor = VideoProcessor(filename)
        image_collection = extractor.process_video(roi_extraction=True, filter_enabled=True, average_frames=True)

        # visualization_tools.Interactive(extracted_images).compare_with_chunk(extracted_images_filtered)
        # visualization_tools.Interactive(extracted_images).plot_intensities()
        # plt.hist(extracted_images.ravel(), bins=256, range=(0, 255), fc='k', ec='k')
        # plt.show()
        visualization_tools.Interactive(image_collection).show_point_cloud(percentile=99, clustering=True, filter_outliers=True)
Exemple #6
0
    def test1(self):
        filename = "../../dataset/micro/187558.mp4"
        extractor = VideoProcessor(filename)
        image_collection = extractor.process_video(roi_extraction=True, filter_enabled=True, average_frames=True)

        thresh = int(np.percentile(image_collection.ravel(), 95))
        cloud, labels = ImageProcessor3D().point_cloud_from_collecton(image_collection, threshold=thresh,
                                                                      filter_outliers=True)

        def downsample(pcd):
            print("Downsampled")
            downpcd = pcd.voxel_down_sample(voxel_size=2)
            o3d.visualization.draw_geometries([downpcd])
            return downpcd

        def normals(pcd):
            print("View normals")
            pcd.estimate_normals(search_param=o3d.geometry.KDTreeSearchParamHybrid(radius=0.1, max_nn=30))
            o3d.visualization.draw_geometries([pcd], point_show_normal=True)
            return pcd

        def hull(pcd):
            print("Define parameters used for hidden_point_removal")
            diameter = np.linalg.norm(np.asarray(pcd.get_max_bound()) - np.asarray(pcd.get_min_bound()))
            radius = diameter * 100

            camera = [0, 0, diameter]
            _, pt_map = pcd.hidden_point_removal(camera, radius)
            pcd_hull = pcd.select_by_index(pt_map)
            o3d.visualization.draw_geometries([pcd_hull])

            return pcd_hull

        def bpa_mesh(pcd):
            pcd.estimate_normals(search_param=o3d.geometry.KDTreeSearchParamHybrid(radius=1, max_nn=30))

            distances = pcd.compute_nearest_neighbor_distance()
            avg_dist = np.mean(distances)
            radius = 3 * avg_dist
            bpa_mesh = o3d.geometry.TriangleMesh.create_from_point_cloud_ball_pivoting(pcd,o3d.utility.DoubleVector([radius, radius * 2]))

            dec_mesh = bpa_mesh.simplify_quadric_decimation(100000)
            dec_mesh.remove_degenerate_triangles()
            dec_mesh.remove_duplicated_triangles()
            dec_mesh.remove_duplicated_vertices()
            dec_mesh.remove_non_manifold_edges()

            o3d.visualization.draw_geometries([bpa_mesh])
Exemple #7
0
    def test2_2_1(self):
        filename = "../../dataset/micro/187558.mp4"
        extractor = VideoProcessor(filename)
        image_collection = extractor.process_video(roi_extraction=True, filter_enabled=True, average_frames=True)

        thresh = int(np.percentile(image_collection.ravel(), 95))
        cloud, labels = ImageProcessor3D().point_cloud_from_collecton(image_collection, threshold=thresh,
                                                                      filter_outliers=True)
        # voxels = ImageProcessor3D().voxel_grid_from_cloud(cloud, out_depth=50, out_width=128, out_height=128)
        # cloud2 = np.argwhere(voxels == 255)

        h5f = h5py.File('../../point_cloud_dataset/187558.h5', 'r')
        cloud2 = h5f['cloud2'][:]
        h5f.close()

        print(cloud2.shape[0], cloud.shape[0])
Exemple #8
0
    def test2(self):
        filename = "../../dataset/micro/100109.mp4"
        extractor = VideoProcessor(filename)
        image_collection = extractor.process_video(roi_extraction=True, filter_enabled=True, average_frames=True)

        thresh = int(np.percentile(image_collection.ravel(), 95))
        cloud, labels =ImageProcessor3D().point_cloud_from_collecton(image_collection, threshold=thresh,
                                                                      filter_outliers=True)

        for i in range(9):
            if i == 0:
                projection = PointCloud().cloud_projection(cloud=cloud)
            else:
                cloud_rotated = PointCloud().rotate(cloud=cloud, pos=i)
                projection = PointCloud().cloud_projection(cloud=cloud_rotated)
            plt.subplot(3, 3, i+1)
            plt.imshow(projection, cmap='gray')

        plt.show()
Exemple #9
0
    def generate_point_cloud_dataset(self, src_path, dst_path, testing=False):
        src_suffix = ".mp4"
        dst_suffix = ".h5"

        files = [
            f for f in glob.glob(src_path + "*" + src_suffix, recursive=True)
        ]

        if not os.path.exists(dst_path):
            os.mkdir(dst_path)

        for f in tqdm(files):
            # Iterates through all the video files and export images to file

            extractor = VideoProcessor(f)
            image_collection = extractor.process_video(roi_extraction=True,
                                                       filter_enabled=True,
                                                       average_frames=True)

            thresh1 = int(np.percentile(image_collection.ravel(), 99))
            thresh2 = int(np.percentile(image_collection.ravel(), 95))
            thresh3 = int(np.percentile(image_collection.ravel(), 90))

            cloud1, nil = ImageProcessor3D().point_cloud_from_collecton(
                image_collection, threshold=thresh1, filter_outliers=True)
            cloud2, nil = ImageProcessor3D().point_cloud_from_collecton(
                image_collection, threshold=thresh2, filter_outliers=True)
            cloud3, nil = ImageProcessor3D().point_cloud_from_collecton(
                image_collection, threshold=thresh3, filter_outliers=True)

            sample_name = self.extract_name(f, src_path, src_suffix)

            output_filename = dst_path + "/" + sample_name + dst_suffix
            hf = h5py.File(output_filename, 'w')
            hf.create_dataset('cloud1', data=cloud1)
            hf.create_dataset('cloud2', data=cloud2)
            hf.create_dataset('cloud3', data=cloud3)
            hf.close()

            if testing == True:
                break
            self.cloud_plot.set_zlabel('Height')

            limit = max(self.height, self.width, self.depth)
            self.cloud_plot.set_xlim([0, limit])
            self.cloud_plot.set_ylim([0, limit])
            self.cloud_plot.set_zlim([0, limit])

            plt.gca().invert_zaxis()
            elevation = 30  # Up/Down
            azimuth = 300  # Left/Right
            self.cloud_plot.view_init(elevation, azimuth)

            plt.draw()

        self.fig.canvas.mpl_connect(
            'scroll_event',
            lambda event: self.callback_scroll(event, update_cloud))
        update_cloud()
        plt.show()


if __name__ == "__main__":

    filename = "../../micro/100109.mp4"
    extractor = VideoProcessor(filename)
    extracted_images = extractor.process_video(roi_extraction=False,
                                               average_frames=True)
    # os.system('find . | grep -E "(__pycache__|\.pyc|\.pyo$)" | xargs rm -rf')

    Interactive(extracted_images).plot_intensities()