コード例 #1
0
ファイル: percent_matching.py プロジェクト: xcode6/smashscan
    def calibrate_test(self):

        # Iterate through input video range. During each iteration, fetch the
        # frame and obtain the optimal calibrated template size.
        start_time = time.time()
        for fnum in range(self.start_fnum, self.stop_fnum, self.step_size):
            frame = util.get_frame(self.capture, fnum, self.gray_flag)
            bbox, opt_conf, opt_w, opt_h = self.get_calibrate_results(frame)

            # Get the percent sign accuracy according to the default (480, 584)
            # to (360, 640) rescale change from (24, 32) to (18, 24).
            orig_conf_list, _ = self.get_tm_results(frame, 1, 0)

            # Display frame with a confidence label if show_flag is enabled.
            if self.show_flag:
                label = "({}, {}) {:0.3f} -> {:0.3f}".format(
                    opt_w, opt_h, orig_conf_list[0], opt_conf)
                util.show_frame(frame, [bbox], label, self.save_flag,
                                "output/{:07d}.png".format(fnum))
                if cv2.waitKey(self.wait_length) & 0xFF == ord('q'):
                    break

        # Display the time taken to complete the test.
        frame_count = (self.stop_fnum - self.start_fnum) // self.step_size
        util.display_fps(start_time, frame_count, "Calibrate")
コード例 #2
0
ファイル: percent_matching.py プロジェクト: xcode6/smashscan
    def sweep_test(self):

        # Iterate through input video range. During each iteration, fetch the
        # frame and obtain the percent template confidences and bounding boxes.
        start_time = time.time()
        for fnum in range(self.start_fnum, self.stop_fnum, self.step_size):
            frame = util.get_frame(self.capture, fnum, self.gray_flag)
            confidence_list, bbox_list = self.get_tm_results(frame, 4, 0)

            # Display and save frame if the respective flags are enabled.
            if self.show_flag:
                label_list = ["{:0.3f}".format(i) for i in confidence_list]
                label = " ".join(label_list)
                util.show_frame(frame, bbox_list, label, self.save_flag,
                                "output/{:07d}.png".format(fnum))
                if cv2.waitKey(self.wait_length) & 0xFF == ord('q'):
                    break

        # Display the time taken to complete the test.
        frame_count = (self.stop_fnum - self.start_fnum) // self.step_size
        util.display_fps(start_time, frame_count, "Sweep")
コード例 #3
0
ファイル: percent_matching.py プロジェクト: xcode6/smashscan
    def initialize_test(self):

        # Generate random frames to search for a proper template size.
        start_time, opt_w_list, bbox_list = time.time(), list(), list()
        random_fnum_list = np.random.randint(low=self.start_fnum,
                                             high=self.stop_fnum,
                                             size=self.num_init_frames)

        # Iterate through input video range. During each iteration, fetch the
        # frame and obtain the optimal calibrated template size.
        print("(opt_w, opt_h), (bbox), random_fnum, opt_conf")
        for random_fnum in random_fnum_list:
            frame = util.get_frame(self.capture, random_fnum, self.gray_flag)
            bbox, opt_conf, opt_w, opt_h = self.get_calibrate_results(frame)

            # Store the template width if above a confidence threshold.
            if opt_conf > self.conf_thresh:
                opt_w_list.append(opt_w)
                bbox_list.append(bbox)
                print((opt_w, opt_h), bbox, random_fnum, opt_conf)

            # Display frame with a confidence label if show_flag is enabled.
            if self.show_flag:
                orig_conf_list, _ = self.get_tm_results(frame, 1, 0)
                label = "({}, {}) {:0.3f} -> {:0.3f}".format(
                    opt_w, opt_h, orig_conf_list[0], opt_conf)
                util.show_frame(frame, [bbox], label, self.save_flag,
                                "output/{:07d}.png".format(random_fnum))
                if cv2.waitKey(self.wait_length) & 0xFF == ord('q'):
                    break

        # Display the optimal dims, ROI, and time taken to complete the test.
        opt_w, opt_h = self.get_opt_template_dims(opt_w_list)
        self.template_roi = self.get_opt_template_roi(bbox_list)
        print("Optimal Template Size: ({}, {})".format(opt_w, opt_h))
        print("Optimal ROI bbox: {}".format(self.template_roi))
        util.display_fps(start_time, self.num_init_frames, "Initialize")
        if self.show_flag:
            util.show_frame(frame, [self.template_roi], wait_flag=True)
コード例 #4
0
ファイル: percent_matching.py プロジェクト: xcode6/smashscan
    def timeline_test(self):

        # Use a random number of frames to calibrate the percent template size.
        start_time = time.time()
        self.initialize_template_scale()
        util.display_fps(start_time, self.num_init_frames, "Initialize")

        # Iterate through the video to identify when percent is present.
        start_time = time.time()
        pct_timeline = self.get_pct_timeline()
        frame_count = (self.stop_fnum - self.start_fnum) // self.step_size
        util.display_fps(start_time, frame_count, "Initial Sweep")

        # Fill holes in the history timeline list, and filter out timeline
        # sections that are smaller than a particular size.
        clean_timeline = timeline.fill_filter(pct_timeline,
                                              self.max_tl_gap_size)
        clean_timeline = timeline.size_filter(clean_timeline, self.step_size,
                                              self.min_match_length_s)
        if self.show_flag:
            timeline.show_plots(pct_timeline, clean_timeline, ["pct found"])

        # Display the frames associated with the calculated match ranges.
        timeline_ranges = timeline.get_ranges(clean_timeline)
        match_ranges = np.multiply(timeline_ranges, self.step_size)
        if self.show_flag:
            util.show_frames(self.capture, match_ranges.flatten())

        # Display the frames associated with the precise match ranges.
        start_time = time.time()
        new_match_ranges = self.get_precise_match_ranges(match_ranges)
        util.display_total_time(start_time, "Cleaning Sweep")
        print("\tMatch Ranges: {:}".format(match_ranges.tolist()))
        print("\tPrecise Match Ranges: {:}".format(new_match_ranges.tolist()))
        if self.show_flag:
            util.show_frames(self.capture, new_match_ranges.flatten())
コード例 #5
0
    def standard_test(self):

        # Create a timeline of the label history where the labels are stored as
        # integers while no result is (-1). Also create a bounding box list.
        dirty_timeline, bbox_hist = list(), list()

        # Iterate through video and use tfnet to perform object detection.
        start_time = time.time()
        for current_frame in range(0, self.end_fnum, self.step_size):
            self.capture.set(cv2.CAP_PROP_POS_FRAMES, current_frame)
            _, frame = self.capture.read()

            # Get the tfnet result with the largest confidence and extract info.
            bbox, label, confidence = self.get_tfnet_result(frame)

            # Store label if result found, or (-1) if no result was found.
            if label:
                dirty_timeline.append(LABELS_LIST.index(label))
                bbox_hist.append(bbox)
            else:
                dirty_timeline.append(-1)
                bbox_hist.append(-1)

            # Display the frame if show_flag is enabled. Exit if q pressed.
            if self.show_flag:
                if confidence:
                    text = '{}: {:.0f}%'.format(label, confidence * 100)
                    util.show_frame(frame, bbox_list=[bbox], text=text)
                else:
                    util.show_frame(frame)
                if cv2.waitKey(1) & 0xFF == ord('q'):
                    break

            # Save the frame if the save_flag is enabled.
            if self.save_flag:
                cv2.imwrite('output/frame%07d.png' % current_frame, frame)

        # End the TfNet session and display time taken to complete.
        util.display_fps(start_time, len(dirty_timeline), "Initial Sweep")

        # Fill holes in the history timeline list, and filter out timeline
        # sections that are smaller than a particular size.
        clean_timeline = timeline.fill_filter(dirty_timeline,
                                              self.timeline_empty_thresh)
        clean_timeline = timeline.size_filter(clean_timeline, self.step_size,
                                              self.min_match_length_s)
        timeline.show_plots(dirty_timeline, clean_timeline, LABELS_LIST)

        # Get a list of the matches and avg bboxes according to clean_timeline.
        match_ranges = timeline.get_ranges(clean_timeline)
        match_bboxes = self.get_match_bboxes(match_ranges, bbox_hist)

        # Show the beginning and end of each match according to the filters.
        display_frames, display_bboxes = list(), list()
        for i, match_range in enumerate(match_ranges):
            display_frames += [
                match_range[0] * self.step_size,
                match_range[1] * self.step_size
            ]
            display_bboxes += [match_bboxes[i], match_bboxes[i]]
        util.show_frames(self.capture, display_frames, display_bboxes)