class BackgroundExtractor(object): """ a class that averages multiple frames of a movie to do background extraction """ def __init__(self, parameters, blur_function=None, object_radius=0, use_threads=True): """ initialize the background extractor with `parameters` is a dictionary of parameters influencing the algorithm `blur_function` is an optional function that, if given, supplies a blurred image of the background via the `blurred` property `object_radius` is an additional parameter that influences how the background extraction is done. """ self.image = None self.image_uint8 = None self._adaptation_rate = None self.params = parameters self._blurred = None if blur_function: self._blur_worker = WorkerThread(blur_function, use_threads=use_threads) else: self._blur_worker = None if object_radius > 0: # create a simple template of the mouse, which will be used to update # the background image only away from the mouse. # The template consists of a core region of maximal intensity and a ring # region with gradually decreasing intensity. # determine the sizes of the different regions size_core = object_radius size_ring = 3 * object_radius size_total = size_core + size_ring # build a filter for finding the mouse position x, y = np.ogrid[-size_total:size_total + 1, -size_total:size_total + 1] r = np.sqrt(x**2 + y**2) # build the mouse template object_mask = ( # inner circle of ones (r <= size_core).astype(float) # + outer region that falls off + np.exp(-((r - size_core) / size_core)** 2) # smooth function from 1 to 0 * (size_core < r) # mask on ring region ) self._object_mask = 1 - object_mask def update(self, frame, tracks=None): """ update the background with the current frame """ if self.image is None: self.image = frame.astype(np.double, copy=True) self._blur_worker.put(self.image) #< initialize background worker self.image_uint8 = frame.astype(np.uint8, copy=True) self._adaptation_rate = np.empty_like(frame, np.double) # check whether there are currently objects tracked if tracks: # load some values from the cache adaptation_rate = self._adaptation_rate adaptation_rate.fill(self.params['adaptation_rate']) # cut out holes from the adaptation_rate for each object estimate for obj in tracks: # get the slices required for comparing the template to the image t_s, i_s = regions.get_overlapping_slices( obj.last.pos, self._object_mask.shape, frame.shape) adaptation_rate[i_s[0], i_s[1]] *= self._object_mask[t_s[0], t_s[1]] else: # use the default adaptation rate everywhere when mouse is unknown adaptation_rate = self.params['adaptation_rate'] # adapt the background to current frame, but only inside the mask self.image += adaptation_rate * (frame - self.image) # initialize the blurring of the image if requested if self._blur_worker: self._blurred = self._blur_worker.get() self._blur_worker.put(self.image) @property def blurred(self): """ returns a blurred version of the image if the `blur_function` was defined. This blurred image might be from the last background image and not the current one, which shouldn't make any difference since the background typically evolves slowly """ if self._blurred is None: self._blurred = self._blur_worker.get() return self._blurred
class VideoPreprocessor(object): """ class that reads video in a separate thread and apply additional functions using additional threads. Example: Given a `video` and a function `blur_frame` that takes an image and returns a blurred one, the class can be used as follows video_processor = VideoPreprocessor(video, {'blur': blur_frame}) for data in video_processor: frame_raw = data['raw'] frame_blurred = data['blur'] Importantly, the function used for preprocessing should release the python global interpreter lock (GIL) most of the time such that multiple threads can be run concurrently. """ def __init__(self, video, functions, preprocess=None, use_threads=True): """ initializes the preprocessor `video` is the video to be iterated over `functions` is a dictionary of functions that should be applied while iterating `preprocess` can be a function that will be applied to the frame before anything is returned """ if 'raw' in functions: raise KeyError('The key `raw` is reserved for the raw _frame and ' 'may not be used for functions.') self.length = len(video) self.video_iter = iter(video) self.functions = functions self.preprocess = preprocess # initialize internal structures self._frame = None # initialize the background workers self._worker_next_frame = WorkerThread(self._get_next_frame, use_threads=use_threads) self._workers = {name: WorkerThread(func, use_threads=use_threads) for name, func in self.functions.iteritems()} self._init_next_processing(self._get_next_frame()) # def __len__(self): return self.length def _get_next_frame(self): """ get the next frame and preprocess it if necessary """ try: frame = self.video_iter.next() except StopIteration: frame = None else: if self.preprocess: frame = self.preprocess(frame) return frame def _init_next_processing(self, frame_next): """ prepare the next processed frame in the background `frame_next` is the raw data of this _frame """ self._frame = frame_next # ask all workers to process this frame for worker in self._workers.itervalues(): worker.put(frame_next) # ask for the next frame self._worker_next_frame.put() def __iter__(self): return self def next(self): """ grab the raw and processed data of the next frame """ # check whether there is data available if self._frame is None: raise StopIteration # grab all results for the current _frame result = {name: worker.get() for name, worker in self._workers.iteritems()} # store information about the current frame result['raw'] = self._frame # grab the next frame frame_next = self._worker_next_frame.get() if frame_next is None: # stop the iteration in the next step. We still have to exit from # this function since we have results to return self._frame = None else: # start fetching the result for this next frame self._init_next_processing(frame_next) # while this is underway, return the current results return result
class BackgroundExtractor(object): """ a class that averages multiple frames of a movie to do background extraction """ def __init__(self, parameters, blur_function=None, object_radius=0, use_threads=True): """ initialize the background extractor with `parameters` is a dictionary of parameters influencing the algorithm `blur_function` is an optional function that, if given, supplies a blurred image of the background via the `blurred` property `object_radius` is an additional parameter that influences how the background extraction is done. """ self.image = None self.image_uint8 = None self._adaptation_rate = None self.params = parameters self._blurred = None if blur_function: self._blur_worker = WorkerThread(blur_function, use_threads=use_threads) else: self._blur_worker = None if object_radius > 0: # create a simple template of the mouse, which will be used to update # the background image only away from the mouse. # The template consists of a core region of maximal intensity and a ring # region with gradually decreasing intensity. # determine the sizes of the different regions size_core = object_radius size_ring = 3*object_radius size_total = size_core + size_ring # build a filter for finding the mouse position x, y = np.ogrid[-size_total:size_total + 1, -size_total:size_total + 1] r = np.sqrt(x**2 + y**2) # build the mouse template object_mask = ( # inner circle of ones (r <= size_core).astype(float) # + outer region that falls off + np.exp(-((r - size_core)/size_core)**2) # smooth function from 1 to 0 * (size_core < r) # mask on ring region ) self._object_mask = 1 - object_mask def update(self, frame, tracks=None): """ update the background with the current frame """ if self.image is None: self.image = frame.astype(np.double, copy=True) self._blur_worker.put(self.image) #< initialize background worker self.image_uint8 = frame.astype(np.uint8, copy=True) self._adaptation_rate = np.empty_like(frame, np.double) # check whether there are currently objects tracked if tracks: # load some values from the cache adaptation_rate = self._adaptation_rate adaptation_rate.fill(self.params['adaptation_rate']) # cut out holes from the adaptation_rate for each object estimate for obj in tracks: # get the slices required for comparing the template to the image t_s, i_s = regions.get_overlapping_slices(obj.last.pos, self._object_mask.shape, frame.shape) # create a mask with zeros where the object is object_mask = self._object_mask[t_s[0], t_s[1]] # mask the object in the adaptation rate adaptation_rate[i_s[0], i_s[1]] *= object_mask else: # use the default adaptation rate everywhere when mouse is unknown adaptation_rate = self.params['adaptation_rate'] # adapt the background to current frame, but only inside the mask self.image += adaptation_rate*(frame - self.image) # initialize the blurring of the image if requested if self._blur_worker: self._blurred = self._blur_worker.get() self._blur_worker.put(self.image) @property def blurred(self): """ returns a blurred version of the image if the `blur_function` was defined. This blurred image might be from the last background image and not the current one, which shouldn't make any difference since the background typically evolves slowly """ if self._blurred is None: self._blurred = self._blur_worker.get() return self._blurred