Exemplo n.º 1
0
def get_correction_file_data(
    correction_file_path=None,
    output_file_path=None,
    video_threshold=None,
    frame_history=None,
    droplet_similarity=None,
    distance_threshold=None,
):
    # If it does exist, load any droplet corrections from the override file.
    if os.path.exists(correction_file_path):
        # Returns dict of droplet corrections and count.
        return load_correction_file(correction_file_path)
    else:
        # if not, create a blank one from the template text.
        correction_file_text_string = strip_tokens(
            substitute_tokens(
                {
                    "filename": get_filename_from_path(output_file_path),
                    "threshold": video_threshold,
                    "frame_history": frame_history,
                    "droplet_similarity": droplet_similarity,
                    "distance_threshold": distance_threshold,
                },
                correction_file_text_template,
            ))
        correction_file = open(correction_file_path, "w")
        correction_file.write(correction_file_text_string)
        correction_file.close()

        # And set the empty state variables if we created a new file.
        droplet_corrections = {}
        correction_count = None

        return droplet_corrections, correction_count
Exemplo n.º 2
0
    def _process(self, frame, index_frame_number):
        """"""

        droplet_data = self._video_master.frames[index_frame_number].droplets
        # print(
        #     "frame: {}, {} droplets found".format(index_frame_number, len(droplet_data))
        # )  # Debug

        self.video_total_unprocessed_droplet_count += len(droplet_data)

        # We want the grayscale frame with the border cleaned up, but
        # we don't want the droplets.
        thresholded_frame = threshold_and_find_droplets(frame,
                                                        self.image_threshold,
                                                        self.border_width,
                                                        DROPLET_SCAN=False)

        # Introduce this frame.
        if self._VERBOSE:
            areas = [droplet_data[x].area for x in droplet_data]
            if len(areas) > 0:
                areas_string = "(" + " ".join([str(x) for x in areas]) + ")"
            else:
                areas_string = ""

            printc(
                "----- Frame {}: {} raw droplet{}, {} pixel{} {} ---------------"
                .format(
                    self.index_frame_number + 1,
                    len(droplet_data),
                    ess(len(droplet_data)),
                    sum(areas),
                    ess(sum(areas)),
                    areas_string,
                ),
                "purple",
            )

        #
        # Tracker interlude
        #

        # Most of the shenanigans happen here. All the droplets go out, but
        # some don't come back.
        winnowed_droplets = self._droplet_tracker.update(
            new_droplet_dict=droplet_data, this_frame=self.index_frame_number)

        #
        # Beginning of pretty video frame.
        #

        # Convert frame back to color so we can write in color on it.
        self.processed_frame = cv2.cvtColor(thresholded_frame,
                                            cv2.COLOR_GRAY2RGB)

        self.frame_droplet_count = 0
        self.frame_pixel_area = 0
        frame_area_correction = 0

        #
        # Highlight and label found droplets.
        #

        labels = Labeler(
            index_frame_number,
            frame_shape=self.frame_shape,
            VERBOSE=self._VERBOSE,
            DEBUG=self._DEBUG,
        )

        for droplet_id in winnowed_droplets:

            new_droplet = False  # Our flag for dealing with counts and areas later.

            # Check to see if this droplet was matched to a prior frame.
            if (droplet_id not in self._video_master.index_by_frame[
                    self.index_frame_number]):
                # We think the droplet is a match to a prior frame. :)
                new_droplet = True

                # "New droplet" as in "this droplet number isn't one we
                # expected in this frame."

                # The default droplet pixel area is the area of the most recent
                # droplet sighting. We might be able to do better here, for
                # instance getting the max area from the multiple sightings,
                # but for now let's be simple, and use the original area.
                # We might need to subtract out the current contour area
                # later: save the correction.
                frame_area_correction += self._video_master.index_by_droplet[
                    droplet_id].area

            # Get the data for this droplet.
            droplet = self._video_master.index_by_droplet[droplet_id]

            label = labels.add_label(droplet)

            if not new_droplet:
                self.frame_droplet_count += 1
                if not self._reprocessing:
                    self.video_total_droplet_count += 1

            # If we need video, either for captured file or end-user display
            # while processing or to capture the top 10 frame images.
            # if not HIDE_VIDEO or CAPTURE_VIDEO or TOP_10:

            if not self._HIDE_DROPLET_HISTORY:
                # Draw outlines of any prior generations.
                if droplet.generations() >= 2:
                    # Contour history to draw before the green box.
                    for contour in droplet.contour_history():
                        self.processed_frame = cv2.drawContours(
                            self.processed_frame, contour, -1, config.amber)

            # Draw red bounding box around this frame's contour.
            label.draw_contour_bounding_box(self.processed_frame,
                                            color=config.bright_red,
                                            thickness=1)

            # Mark the center with a single red pixel.
            # (This will never be seen, unless the frame is grabbed and
            # magnified. :)
            integer_droplet_center = tuple([int(n) for n in droplet.centroid])
            cv2.line(
                self.processed_frame,
                integer_droplet_center,
                integer_droplet_center,
                config.bright_red,
                1,
            )

            # Getting the droplet area has already been done for us in the file scan.
            area = droplet.area

            # if new_droplet:
            self.frame_pixel_area += area
            self.video_total_pixel_area += area

            if self._csv_file:
                self._csv_file.update_csv_row(
                    str(self.counting_frame_number),
                    str(droplet.initial_id),
                    [
                        droplet_id,
                        area,
                        "{:.2f}".format(droplet.centroid[0]),
                        "{:.2f}".format(droplet.centroid[1]),
                    ],
                )

        # if self.index_frame_number >= 116:  # Debug breakpoint catcher
        #     debug_catcher = True

        # Draw all the labels.
        labels.draw(self.processed_frame)

        # Add some frame labeling.
        self.processed_frame = add_frame_header_text(
            self.processed_frame,
            get_filename_from_path(self.file_path),
            self.counting_frame_number,
            self.file_length_in_frames,
            self.frame_droplet_count,
            self.frame_pixel_area,
            self.video_total_droplet_count,
            self.video_total_unprocessed_droplet_count,
            self.video_total_pixel_area,
            self.image_threshold,
            self.history,
            self.similarity_threshold,
            self.distance_threshold,
        )

        # Update and draw the droplet graph.
        if self._reprocessing:
            self._tiny_graph.reset_max_y(
                max(self._video_master.droplet_counts_by_frame))

        else:
            self._tiny_graph.update(
                len(droplet_data),
                self.audio_data_by_frame[self.index_frame_number])
        self._tiny_graph.canvas = self.processed_frame
        self.processed_frame = self._tiny_graph.draw_graph()

        # Composite annotations on to original video frame.
        self.processed_frame = cv2.add(
            add_alpha_channel(frame),
            add_alpha_channel(self.processed_frame,
                              transparent_color=(0, 0, 0)),
        )
        # Capture the output frame.
        # if self._CAPTURE_VIDEO:
        #     # Not sure if this is needed...
        #     self.processed_frame = remove_alpha_channel(self.processed_frame)
        #     for _ in range(self._output_frames):
        #         self.video_output.write(self.processed_frame.astype("uint8"))

        # cv2.imwrite(
        #     '/Users/CS255/Desktop/git/python/fmva/test_output_3/test.jpg',
        #     self.processed_frame,
        # )

        # Put the finished frame back into the dispenser.
        self._frame_dispenser.processed_frame_return(self.processed_frame)

        if self._reprocessing:
            self._reprocessing = False

        return self.processed_frame
Exemplo n.º 3
0
def main():

    # If running from an IDE, set TEST to True to use test command line arguments
    # defined in utils.cl_args.get_test_args()
    # Or, from the command line, using the ---test flag will read the same values,
    # ignoring the rest of the command line flags.

    TEST = False
    # TEST = True  # Comment this in to use test args in utils/cl_args.py.

    TEST_ARGS = get_test_args(TEST)

    # Retrieve argparse namespace and convert to dict.
    argv = vars(get_options(TEST_ARGS=TEST_ARGS))

    # Housekeeping and config set-up

    DEBUG = argv["DEBUG"]
    VERBOSE = argv["VERBOSE"]
    INTERACTIVE = argv["INTERACTIVE"]
    CAPTURE_VIDEO = argv["CAPTURE_VIDEO"]
    HIDE_VIDEO = argv["HIDE_VIDEO"]
    INCLUDE_AUDIO = argv["INCLUDE_AUDIO"]
    TOP_10 = argv["TOP_10"]
    CORRECTIONS = argv["CORRECTIONS"]
    HIDE_DROPLET_HISTORY = argv["HIDE_DROPLET_HISTORY"]
    LOG = argv["LOG"]
    CSV = argv["CSV"]
    video_threshold = argv["threshold"]

    # Print the command line string, if we're testing.
    if VERBOSE and TEST_ARGS:
        print("\n{}{}\n".format(get_filename_from_path(__file__), TEST_ARGS))

    input_directory = resolve_directory(dir=argv['input_directory'])
    output_directory = resolve_directory(dir=argv['output_directory'])

    video_files = unpack_input_files(
        argv['video_files'],
        input_directory=input_directory,
        output_directory=output_directory,
    )

    #
    # Main loop.
    # Process each video file.
    #

    for video_filename in video_files:

        #
        # File set-up.
        #

        video_file_input_path = video_filename

        # Set up all the output file names we'll need.
        output_files = set_up_output_filenames(
            video_file_input_path,
            argv_output_dir=video_files[video_filename]['output_dir'],
        )

        # Start saving a log file if requested.
        if LOG:
            transcript = Transcript(output_files["log_file_output_path"])

        # Check to see if a manual correction file for the source video data
        # file exists, load any data, and create a new file if there isn't one.
        if CORRECTIONS:
            droplet_corrections, correction_count = get_correction_file_data(
                correction_file_path=output_files["correction_file_path"],
                output_file_path=output_files["video_file_output_path"],
                video_threshold=video_threshold,
                frame_history=argv["frame_history"],
                droplet_similarity=argv["droplet_similarity"],
                distance_threshold=argv["distance_threshold"],
            )
        else:
            droplet_corrections = None
            correction_count = None

        # Do a scan of the video file for droplets, and create the
        # master droplet catalog for the file.

        video_master = VideoFilePreprocessor(
            video_file_input_path,
            video_threshold,
            argv["border"],
            VERBOSE=VERBOSE,
        )

        #
        # Start CSV data file, if requested.
        #
        if CSV:
            # Collect initial .csv file data
            csv_file = CsvFile(video_master,
                               output_files["csv_file_output_path"], VERBOSE)
        else:
            csv_file = None

        #
        # Start of top 10 frames-by-droplet-counts
        #

        # Gather frame numbers for top 10 frames by number of droplets detected.
        # We'll use this to save those frames to image files, which will be useful for
        # evaluating droplet thresholding without creating an entire video file.
        # We'll also grab the droplet count for the frame with the highest count to
        # scale our frame graph.

        frame_droplet_counts = sorted([
            (frame_id, video_master.frames[frame_id].droplet_count)
            for frame_id in video_master.frames
        ])

        top_10_frames = [
            x[0] for x in sorted(
                frame_droplet_counts, key=lambda x: x[1], reverse=True)[:11]
        ]

        #
        # Second video pass.
        #

        # Start up a frame processor on the video file..

        frame_processor = VideoFrameProcessor(
            file_path=video_file_input_path,
            image_capture_file_output_path=output_files[
                'image_capture_file_output_path'],
            video_file_output_path=output_files['video_file_output_path'],
            output_frames=argv['output_frames'],
            image_threshold=video_threshold,
            similarity_threshold=argv["droplet_similarity"],
            distance_threshold=argv["distance_threshold"],
            history_frames_to_consider=argv["frame_history"],
            border_width=argv["border"],
            video_master=video_master,
            corrections=droplet_corrections,
            hide_droplet_history_in_video=HIDE_DROPLET_HISTORY,
            csv_file=csv_file,
            CAPTURE_VIDEO=CAPTURE_VIDEO,
            VERBOSE=VERBOSE,
            DEBUG=DEBUG,
        )

        #
        # Set up an mpeg file to which to write analyzed frames.
        #

        # There's some implementation-dependent weirdness here, particularly specifying
        # the FourCC codec id. I'm running OSX 10.14.6 Mojave on a MacBook Pro. YMMV.
        # Oh, and this seems to fail on my system if it tries to open a file that
        # already exists. I've added a time-stamp in the file name to
        # reduce that likelihood.

        (video_frame_width, video_frame_height) = frame_processor.frame_shape
        #
        if CAPTURE_VIDEO:
            # Open the output file.
            video_output = cv2.VideoWriter(
                output_files["video_file_output_path"],
                # Picking an output video codec in opencv is buggy.
                # Trying to find a FourCC it likes didn't work. The settings
                # that work are all bogus entries, and it falls back to
                # H.264 in a MP4 v2 container..
                cv2.VideoWriter_fourcc("m", "p", "v",
                                       "4"),  # works, not found message
                # cv2.VideoWriter_fourcc("m", "p", "g", "4"),  # works, not supported, not found msgs
                # cv2.VideoWriter_fourcc("A", "V", "C", "1"),  # writes unreadable file
                # cv2.VideoWriter_fourcc("J", "P", "E", "G"),  # writes unreadable file
                # cv2.VideoWriter_fourcc("A", "C", "V", "1"),  # works w/msg
                # cv2.VideoWriter_fourcc("0", "0", "0", "0"),  # bogus entry, works w/msg
                30,
                (int(video_frame_width), int(video_frame_height)),
                1,
            )

        # Video frames are numbered from 00:00; 00:29 is the 30th frame in a second.
        # I'm using 0 for timecode start, and adding 1 for "frame M of N" visual, so
        # it doesn't start with 0 for a viewer of the video. I've settled on
        # "index_frame_number" and "counting_frame_number" in the code to distinguish
        # the two numbering contexts for frames.

        # index_frame_count = -1  # Vestigal, I think. Delete?

        analysis_start_time = time.time()  # Curiosity.

        #
        # Event dispatcher for actions coming back from keyboard.
        #

        dispatcher = Dispatcher(
            interactive=INTERACTIVE,
            capture_video=CAPTURE_VIDEO,
            hide_video=HIDE_VIDEO,
            top_10=TOP_10,
            csv=CSV,
        )

        # First action and frame advance count for the video frame loop.
        action = "next"
        params = {'frames_to_advance': 1}

        if CAPTURE_VIDEO or CSV or TOP_10 and not HIDE_VIDEO:
            params['back_disabled'] = True
        else:
            params['back_disabled'] = False

        #
        # Video frame loop.
        #

        while True:

            # When the end of a video file is reached,
            # the processor knows about it.
            if frame_processor.has_no_more_frames():
                break

            next_action_function = dispatcher.dispatch(action)
            display_frame = next_action_function(frame_processor)

            # If the dispatcher function returns None instead of
            # a video frame.                                              d
            if display_frame is None:

                # # Experiment in fingerprinting frame
                # print("hash_dict = {")
                # for a, b in sorted(
                #     frame_processor._frame_dispenser.hash_dict.items(),
                #     key=lambda x: x[1],
                # ):
                #     print("    '{}': {},".format(a, b))
                # print("}")

                break

            if CAPTURE_VIDEO:
                for _ in range(argv['output_frames']):
                    video_output.write(display_frame.astype("uint8"))

            # cv2.imwrite("./saved_video_frame.png", frame_processor.processed_frame)

            if not HIDE_VIDEO:
                action, params = manage_display_and_keyboard(
                    display_frame,
                    INTERACTIVE,
                    frame_processor.file_length_in_frames,
                    frame_processor.counting_frame_number,
                    params,
                )

        # We're mostly done.

        # Brags.

        analysis_end_time = time.time()

        if (not INTERACTIVE or CAPTURE_VIDEO) and VERBOSE:
            # Because interruptions.
            fps = calculate_fps(
                analysis_start_time,
                analysis_end_time,
                frame_processor.counting_frame_number,
            )
            print("""
    \n\n2nd pass: {} frames,\nprocessed at {:2.1f} frames per second.""".
                  format(frame_processor.counting_frame_number, fps))
            print("""
{} droplet{} found in initial scan of video file
{} unique droplet{} after duplicate discovery
            """.format(
                sum(video_master.droplet_counts_by_frame),
                ess(sum(video_master.droplet_counts_by_frame)),
                frame_processor.video_total_droplet_count,
                ess(frame_processor.video_total_droplet_count),
            ))

        if (correction_count is not None and correction_count > 0) and VERBOSE:
            print("""
{} correction{} made by hand
            """.format(
                correction_count,
                ess(correction_count),
            ))
            if frame_processor.video_total_droplet_count > 0:
                print("""
(error rate {:.2f}%, {:.2f}% correct)
                """.format(
                    (correction_count /
                     frame_processor.video_total_droplet_count) * 100,
                    100 - ((correction_count /
                            frame_processor.video_total_droplet_count) * 100),
                ))

        # Clean-up.

        if LOG:
            transcript.close()

        if CSV:
            # .csv data file requested?
            csv_file.write()

        if CAPTURE_VIDEO:
            video_output.release()

            # This is last, mostly because it's convenient to hang the
            # audio conversion under the CAPTURE_VIDEO test, and because it's a
            # blind launch of ffmpeg with no progress indicator. But we can time it.

            if INCLUDE_AUDIO and argv['output_frames'] == 1:
                add_audio(
                    in_file=video_file_input_path,
                    out_file=output_files["video_file_output_path"],
                    combined_file=output_files["video_audio_file_output_path"],
                    VERBOSE=VERBOSE,
                    DEBUG=DEBUG,
                )