def ProcessFrame(self, frame):
        """Searches for vehicles in the provided video frame and draws their bounding boxes.
        
        Args:
            frame: The video frame to process.
        """

        # the frame needs to be converted to the format that was used during training: float [0.0, 1.0]
        frame = frame.astype(np.float32) / 255.0
        self.hotWindows = []

        window_sizes = ((6, 6), (10, 10), (12, 12))
        overlaps = ((0.25, 0.25), (0.125, 0.125), (0.125, 0.125))
        y_ranges = ((0.55, 0.7), (0.55, 0.8), (0.5, 0.95))
        x_ranges = ((0.5, 1.0), (0.5, 1.0), (0.4, 1.0))

        # loop through all window sizes and search the image
        for window, overlap, y_range, x_range in zip(window_sizes, overlaps,
                                                     y_ranges, x_ranges):
            scene = Scene(frame,
                          window_size=window,
                          color_space=self.colorSpace,
                          hog_color_channel=self.hogColorChannel)
            h, a = scene.SearchWindows(y_range, x_range, overlap,
                                       self.classifier)
            self.hotWindows.append(h)

        # get the heatmap for the current frame and add it to the historical frames
        heatmap = scene.DrawHeatMap(self.hotWindows)
        self.heatmapQueue.appendleft(
            np.zeros(shape=self.heatmapSize, dtype=np.float32))
        for hmap in self.heatmapQueue:
            hmap += heatmap

        # finally, get the detected regions from the multi-frame heatmap
        detected_regions = scene.GetLabeledRegions(
            heatmap=self.heatmapQueue.pop(), threshold=self.threshold)
        detected_regions = self._sanitizeRegions(detected_regions)

        # extract the windows from the detected_regions and low-pass filter them WRT time
        if self.previousRegions is not None and len(
                self.previousRegions) == len(detected_regions[1]):
            try:
                filtered_regions = self._filterRegions(detected_regions)
                out_frame = scene.DrawWindows([filtered_regions])
                self.previousRegions = filtered_regions[1]
            except ValueError:
                out_frame = scene.DrawWindows([detected_regions])
                self.previousRegions = detected_regions[1]
        else:
            # there is a mis-match in number of detected regions so just ignore the previous regions
            out_frame = scene.DrawWindows([detected_regions])
            self.previousRegions = detected_regions[1]

        # format the return image for video stream
        return np.uint8(out_frame * 255)
 def DrawPositiveDetections(self, frame):
     scene = Scene(frame)
     return scene.DrawWindows(self.hotWindows)