示例#1
0
    def get_match_ranges(self):

        # Use a random number of frames to calibrate the percent template size.
        self.initialize_template_scale()

        # Iterate through the video to identify when percent is present.
        pct_timeline = self.get_pct_timeline()

        # Fill holes in the history timeline list, and filter out timeline
        # sections that are smaller than a particular size.
        clean_timeline = timeline.fill_filter(pct_timeline,
                                              self.max_tl_gap_size)
        clean_timeline = timeline.size_filter(clean_timeline, self.step_size,
                                              self.min_match_length_s)

        # Display the frames associated with the calculated match ranges.
        timeline_ranges = timeline.get_ranges(clean_timeline)
        match_ranges = np.multiply(timeline_ranges, self.step_size)

        # Display the frames associated with the precise match ranges.
        new_match_ranges = self.get_precise_match_ranges(match_ranges)
        return new_match_ranges.tolist()
示例#2
0
    def timeline_test(self):

        # Use a random number of frames to calibrate the percent template size.
        start_time = time.time()
        self.initialize_template_scale()
        util.display_fps(start_time, self.num_init_frames, "Initialize")

        # Iterate through the video to identify when percent is present.
        start_time = time.time()
        pct_timeline = self.get_pct_timeline()
        frame_count = (self.stop_fnum - self.start_fnum) // self.step_size
        util.display_fps(start_time, frame_count, "Initial Sweep")

        # Fill holes in the history timeline list, and filter out timeline
        # sections that are smaller than a particular size.
        clean_timeline = timeline.fill_filter(pct_timeline,
                                              self.max_tl_gap_size)
        clean_timeline = timeline.size_filter(clean_timeline, self.step_size,
                                              self.min_match_length_s)
        if self.show_flag:
            timeline.show_plots(pct_timeline, clean_timeline, ["pct found"])

        # Display the frames associated with the calculated match ranges.
        timeline_ranges = timeline.get_ranges(clean_timeline)
        match_ranges = np.multiply(timeline_ranges, self.step_size)
        if self.show_flag:
            util.show_frames(self.capture, match_ranges.flatten())

        # Display the frames associated with the precise match ranges.
        start_time = time.time()
        new_match_ranges = self.get_precise_match_ranges(match_ranges)
        util.display_total_time(start_time, "Cleaning Sweep")
        print("\tMatch Ranges: {:}".format(match_ranges.tolist()))
        print("\tPrecise Match Ranges: {:}".format(new_match_ranges.tolist()))
        if self.show_flag:
            util.show_frames(self.capture, new_match_ranges.flatten())
示例#3
0
    def standard_test(self):

        # Create a timeline of the label history where the labels are stored as
        # integers while no result is (-1). Also create a bounding box list.
        dirty_timeline, bbox_hist = list(), list()

        # Iterate through video and use tfnet to perform object detection.
        start_time = time.time()
        for current_frame in range(0, self.end_fnum, self.step_size):
            self.capture.set(cv2.CAP_PROP_POS_FRAMES, current_frame)
            _, frame = self.capture.read()

            # Get the tfnet result with the largest confidence and extract info.
            bbox, label, confidence = self.get_tfnet_result(frame)

            # Store label if result found, or (-1) if no result was found.
            if label:
                dirty_timeline.append(LABELS_LIST.index(label))
                bbox_hist.append(bbox)
            else:
                dirty_timeline.append(-1)
                bbox_hist.append(-1)

            # Display the frame if show_flag is enabled. Exit if q pressed.
            if self.show_flag:
                if confidence:
                    text = '{}: {:.0f}%'.format(label, confidence * 100)
                    util.show_frame(frame, bbox_list=[bbox], text=text)
                else:
                    util.show_frame(frame)
                if cv2.waitKey(1) & 0xFF == ord('q'):
                    break

            # Save the frame if the save_flag is enabled.
            if self.save_flag:
                cv2.imwrite('output/frame%07d.png' % current_frame, frame)

        # End the TfNet session and display time taken to complete.
        util.display_fps(start_time, len(dirty_timeline), "Initial Sweep")

        # Fill holes in the history timeline list, and filter out timeline
        # sections that are smaller than a particular size.
        clean_timeline = timeline.fill_filter(dirty_timeline,
                                              self.timeline_empty_thresh)
        clean_timeline = timeline.size_filter(clean_timeline, self.step_size,
                                              self.min_match_length_s)
        timeline.show_plots(dirty_timeline, clean_timeline, LABELS_LIST)

        # Get a list of the matches and avg bboxes according to clean_timeline.
        match_ranges = timeline.get_ranges(clean_timeline)
        match_bboxes = self.get_match_bboxes(match_ranges, bbox_hist)

        # Show the beginning and end of each match according to the filters.
        display_frames, display_bboxes = list(), list()
        for i, match_range in enumerate(match_ranges):
            display_frames += [
                match_range[0] * self.step_size,
                match_range[1] * self.step_size
            ]
            display_bboxes += [match_bboxes[i], match_bboxes[i]]
        util.show_frames(self.capture, display_frames, display_bboxes)