def get_last_positions(self,absolute=False): """ The last position of the animal monitored by this `TrackingUnit` :param absolute: Whether the position should be relative to the top left corner of the raw frame (`true`), or to the top left of the used ROI (`false`). :return: A container with the last variable recorded for this roi. :rtype: :class:`~ethoscope.core.data_point.DataPoint` """ if len(self._tracker.positions) < 1: return [] last_positions = self._tracker.positions[-1] if not absolute: return last_positions out =[] for last_pos in last_positions: tmp_out = [] for k,i in last_pos.items(): if isinstance(i, BaseRelativeVariable): tmp_out.append(i.to_absolute(self.roi)) else: tmp_out.append(i) tmp_out = DataPoint(tmp_out) out.append(tmp_out) return out
def make_one_point(self): out = DataPoint([ DummyBoolVariable(bool(int(random.uniform(0, 2)))), DummyIntVariable(random.uniform(0, 1000)), DummyDistVariable(random.uniform(0, 1000)), XVar(random.uniform(0, 1000)), YVar(random.uniform(0, 1000)), ]) return out
def extract_features(self, hull): (x, y), (w, h), angle = cv2.minAreaRect(hull) if w < h: angle -= 90 w, h = h, w angle = angle % 180 h_im = min(self._buff_fg.shape) w_im = max(self._buff_fg.shape) max_h = 2 * h_im if w > max_h or h > max_h: raise NoPositionError self.ellipse = self.object_mask(**{"x": x, "y": y, "w": w, "h": h, "angle": angle}, roi=self._buff_fg) # cv2.ellipse(self._buff_fg, ((x, y), (int(w * 1.5), int(h * 1.5)), angle), 255, -1) # TODO center mass just on the ellipse area cv2.bitwise_and(self._buff_fg_backup, self._buff_fg, self._buff_fg_backup) cv2.bitwise_and(self._buff_fg_backup, self.ellipse, self._buff_fg_backup) y, x = ndimage.measurements.center_of_mass(self._buff_fg_backup) pos = x +1.0j * y pos /= w_im xy_dist = round(log10(1. / float(w_im) + abs(pos - self._old_pos)) * 1000) # cv2.bitwise_and(self._buff_fg_diff,self._buff_fg,dst=self._buff_fg_diff) # sum_diff = cv2.countNonZero(self._buff_fg_diff) # xor_dist = (sum_fg + self._old_sum_fg - 2*sum_diff) / float(sum_fg + self._old_sum_fg) # xor_dist *=1000. # self._old_sum_fg = sum_fg self._old_pos = pos x_var = XPosVariable(int(round(x))) y_var = YPosVariable(int(round(y))) distance = XYDistance(int(xy_dist)) #xor_dist = XorDistance(int(xor_dist)) w_var = WidthVariable(int(round(w))) h_var = HeightVariable(int(round(h))) phi_var = PhiVariable(int(round(angle))) # mlogl = mLogLik(int(distance*1000)) out = DataPoint([ x_var, y_var, w_var, h_var, phi_var, #mlogl, distance, #xor_dist #Label(0) ]) self._previous_shape = np.copy(hull) return [out]
def _run(self, cursor=None): try: for index, (t_ms, img) in self.camera_iterator: if t_ms > self.end_fragment: break if cursor: template = "SELECT x, y, w, h, phi, xy_dist_log10x1000 FROM ROI_%d WHERE t = %d" command = template % (self._region_id, t_ms) cursor.execute(command) try: X = next(iter(cursor)) except Exception: # an exception will happen when the t queried # is not available in the dbfile # even though it is in the video # this happens if the dbfile is generated # passing a drop-each argument != 1 # i.e. the dbfile is subsampled if self._all_frames: self.add(img) continue xpos, ypos, width, height, phi, xy_dist = X x_var = XPosVariable(xpos) y_var = YPosVariable(ypos) h_var = HeightVariable(height) w_var = WidthVariable(width) phi_var = PhiVariable(phi) distance = XYDistance(xy_dist) point = DataPoint( [x_var, y_var, w_var, h_var, phi_var, distance]) self.positions.append(point) abs_pos = self.get_last_positions(absolute=True) self._last_positions[self.roi.idx] = abs_pos out = self.drawer.draw(img, tracking_units=[self], positions=self._last_positions, roi=True) else: out = img self.add(out) except Exception as error: logging.error(traceback.print_exc()) raise error
def _track(self, img, grey, mask, t): ''' The tracking routine Runs once per ROI ''' pmts = self._haar_prmts flies = self.fly_cascade.detectMultiScale( img, scaleFactor=pmts['scaleFactor'], minNeighbors=pmts['minNeighbors'], flags=pmts['flags'], minSize=pmts['minSize'], maxSize=pmts['maxSize']) out_pos = [] for (x, y, w, h) in flies: #cv2.rectangle(img,(x,y),(x+w,y+h),(255,255,0),2) x = x + w / 2 y = y + h / 2 # store the blob info in a list x_var = XPosVariable(int(round(x))) y_var = YPosVariable(int(round(y))) w_var = WidthVariable(int(round(w))) h_var = HeightVariable(int(round(h))) phi_var = PhiVariable(0.0) out = DataPoint([x_var, y_var, w_var, h_var, phi_var]) out_pos.append(out) #and show if asked if self._visualise: cv2.imshow(self._multi_fly_tracker_window, img) return out_pos
def _track(self, img, grey, mask,t): if self._bg_model.bg_img is None: self._buff_fg = np.empty_like(grey) self._buff_object= np.empty_like(grey) self._buff_fg_backup = np.empty_like(grey) # self._buff_fg_diff = np.empty_like(grey) self._old_pos = 0.0 +0.0j # self._old_sum_fg = 0 raise NoPositionError bg = self._bg_model.bg_img.astype(np.uint8) cv2.subtract(grey, bg, self._buff_fg) cv2.threshold(self._buff_fg,20,255,cv2.THRESH_TOZERO, dst=self._buff_fg) # cv2.bitwise_and(self._buff_fg_backup,self._buff_fg,dst=self._buff_fg_diff) # sum_fg = cv2.countNonZero(self._buff_fg) self._buff_fg_backup = np.copy(self._buff_fg) n_fg_pix = np.count_nonzero(self._buff_fg) prop_fg_pix = n_fg_pix / (1.0 * grey.shape[0] * grey.shape[1]) is_ambiguous = False if prop_fg_pix > self._max_area: self._bg_model.increase_learning_rate() raise NoPositionError if prop_fg_pix == 0: self._bg_model.increase_learning_rate() raise NoPositionError if CV_VERSION == 3: _, contours,hierarchy = cv2.findContours(self._buff_fg, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) else: contours,hierarchy = cv2.findContours(self._buff_fg, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) contours = [cv2.approxPolyDP(c,1.2,True) for c in contours] if len(contours) == 0: self._bg_model.increase_learning_rate() raise NoPositionError elif len(contours) > 1: if not self.fg_model.is_ready: raise NoPositionError # hulls = [cv2.convexHull( c) for c in contours] hulls = contours #hulls = merge_blobs(hulls) hulls = [h for h in hulls if h.shape[0] >= 3] if len(hulls) < 1: raise NoPositionError elif len(hulls) > 1: is_ambiguous = True cluster_features = [self.fg_model.compute_features(img, h) for h in hulls] all_distances = [self.fg_model.distance(cf,t) for cf in cluster_features] good_clust = np.argmin(all_distances) hull = hulls[good_clust] distance = all_distances[good_clust] else: hull = contours[0] if hull.shape[0] < 3: self._bg_model.increase_learning_rate() raise NoPositionError features = self.fg_model.compute_features(img, hull) distance = self.fg_model.distance(features,t) if distance > self._max_m_log_lik: self._bg_model.increase_learning_rate() raise NoPositionError (x,y) ,(w,h), angle = cv2.minAreaRect(hull) if w < h: angle -= 90 w,h = h,w angle = angle % 180 h_im = min(grey.shape) w_im = max(grey.shape) max_h = 2*h_im if w>max_h or h>max_h: raise NoPositionError cv2.ellipse(self._buff_fg ,((x,y), (int(w*1.5),int(h*1.5)),angle),255,-1) #todo center mass just on the ellipse area cv2.bitwise_and(self._buff_fg_backup, self._buff_fg,self._buff_fg_backup) y,x = ndimage.measurements.center_of_mass(self._buff_fg_backup) pos = x +1.0j*y pos /= w_im xy_dist = round(log10(1./float(w_im) + abs(pos - self._old_pos))*1000) # cv2.bitwise_and(self._buff_fg_diff,self._buff_fg,dst=self._buff_fg_diff) # sum_diff = cv2.countNonZero(self._buff_fg_diff) # xor_dist = (sum_fg + self._old_sum_fg - 2*sum_diff) / float(sum_fg + self._old_sum_fg) # xor_dist *=1000. # self._old_sum_fg = sum_fg self._old_pos = pos if mask is not None: cv2.bitwise_and(self._buff_fg, mask, self._buff_fg) if is_ambiguous: self._bg_model.increase_learning_rate() self._bg_model.update(grey, t) else: self._bg_model.decrease_learning_rate() self._bg_model.update(grey, t, self._buff_fg) self.fg_model.update(img, hull,t) x_var = XPosVariable(int(round(x))) y_var = YPosVariable(int(round(y))) distance = XYDistance(int(xy_dist)) #xor_dist = XorDistance(int(xor_dist)) w_var = WidthVariable(int(round(w))) h_var = HeightVariable(int(round(h))) phi_var = PhiVariable(int(round(angle))) # mlogl = mLogLik(int(distance*1000)) out = DataPoint([x_var, y_var, w_var, h_var, phi_var, #mlogl, distance, #xor_dist #Label(0) ]) self._previous_shape=np.copy(hull) return [out]
def _track(self, img, grey, mask, t): if self._bg_model.bg_img is None: self._buff_fg = np.empty_like(grey) self._buff_object = np.empty_like(grey) self._buff_fg_backup = np.empty_like(grey) # self._buff_fg_diff = np.empty_like(grey) self._old_pos = 0.0 + 0.0j # self._old_sum_fg = 0 raise NoPositionError bg = self._bg_model.bg_img.astype(np.uint8) cv2.subtract(grey, bg, self._buff_fg) cv2.threshold(self._buff_fg, 20, 255, cv2.THRESH_TOZERO, dst=self._buff_fg) # L.Zi. make a copy of extracted foreground to illustrate detection for debugging fg_cpy = np.copy(self._buff_fg) # cv2.bitwise_and(self._buff_fg_backup,self._buff_fg,dst=self._buff_fg_diff) # sum_fg = cv2.countNonZero(self._buff_fg) self._buff_fg_backup = np.copy(self._buff_fg) n_fg_pix = np.count_nonzero(self._buff_fg) prop_fg_pix = n_fg_pix / (1.0 * grey.shape[0] * grey.shape[1]) is_ambiguous = False if prop_fg_pix > self._max_area: # if non-black pixel count in foreground is bigger than the allowed maximum # (five times the expected animal size) self._bg_model.increase_learning_rate() raise NoPositionError if prop_fg_pix == 0: # if no non-black pixel in foreground is found self._bg_model.increase_learning_rate() raise NoPositionError if CV_VERSION == 3: _, contours, hierarchy = cv2.findContours(self._buff_fg, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) else: contours, hierarchy = cv2.findContours(self._buff_fg, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) contours = [cv2.approxPolyDP(c, 1.2, True) for c in contours] if len(contours) == 0: # if no contour around the non-black foreground pixels may be detected self._bg_model.increase_learning_rate() raise NoPositionError elif len(contours) > 1: # if more than a single contour is found if not self.fg_model.is_ready: raise NoPositionError # hulls = [cv2.convexHull( c) for c in contours] hulls = contours #hulls = merge_blobs(hulls) hulls = [h for h in hulls if h.shape[0] >= 3] if len(hulls) < 1: raise NoPositionError elif len(hulls) > 1: is_ambiguous = True cluster_features = [ self.fg_model.compute_features(img, h) for h in hulls ] all_distances = [ self.fg_model.distance(cf, t) for cf in cluster_features ] good_clust = np.argmin(all_distances) hull = hulls[good_clust] distance = all_distances[good_clust] else: hull = contours[0] if hull.shape[0] < 3: self._bg_model.increase_learning_rate() raise NoPositionError features = self.fg_model.compute_features(img, hull) distance = self.fg_model.distance(features, t) if distance > self._max_m_log_lik: self._bg_model.increase_learning_rate() raise NoPositionError (x, y), (w, h), angle = cv2.minAreaRect(hull) if w < h: angle -= 90 w, h = h, w angle = angle % 180 h_im = min(grey.shape) w_im = max(grey.shape) max_h = 2 * h_im if w > max_h or h > max_h: raise NoPositionError #todo center mass just on the ellipse area cv2.bitwise_and(self._buff_fg_backup, self._buff_fg, self._buff_fg_backup) y, x = ndimage.measurements.center_of_mass(self._buff_fg_backup) pos = x + 1.0j * y # L. Zi.: #pos /= w_im #xy_dist = round(log10(1./float(w_im) + abs(pos - self._old_pos))*1000) # L. Zi.: want linear motion distance xy_dist = round(abs(pos - self._old_pos)) # cv2.bitwise_and(self._buff_fg_diff,self._buff_fg,dst=self._buff_fg_diff) # sum_diff = cv2.countNonZero(self._buff_fg_diff) # xor_dist = (sum_fg + self._old_sum_fg - 2*sum_diff) / float(sum_fg + self._old_sum_fg) # xor_dist *=1000. # self._old_sum_fg = sum_fg self._old_pos = pos if mask is not None: cv2.bitwise_and(self._buff_fg, mask, self._buff_fg) if is_ambiguous: self._bg_model.increase_learning_rate() self._bg_model.update(grey, t) else: self._bg_model.decrease_learning_rate() self._bg_model.update(grey, t, self._buff_fg) self.fg_model.update(img, hull, t) x_var = XPosVariable(int(round(x))) y_var = YPosVariable(int(round(y))) distance = XYDistance(int(xy_dist)) #xor_dist = XorDistance(int(xor_dist)) w_var = WidthVariable(int(round(w))) h_var = HeightVariable(int(round(h))) phi_var = PhiVariable(int(round(angle))) # mlogl = mLogLik(int(distance*1000)) # L. Zi.: produce and show adaptive background processing results if self._dbg_single_roi_do_rec: if self._roi._value == self._dbg_single_roi_value: animal_colour = (255, 255, 255) cv2.ellipse(fg_cpy, ((x, y), (int(w * 1.5), int(h * 1.5)), angle), animal_colour, 1, cv2.LINE_AA) grey_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) h = round(self._roi.rectangle[3]) w = round(self._roi.rectangle[2]) # draw roi value cv2.putText(fg_cpy, str(self._roi._value), (5, h - 10), cv2.FONT_HERSHEY_DUPLEX, 1, (255, 255, 255)) # draw motion distance of detected animal since last frame cv2.putText(fg_cpy, str(int(xy_dist)), (5, 25), cv2.FONT_HERSHEY_DUPLEX, 1, (255, 255, 255)) txt = str(int(t)) (txt_w, txt_h), _ = cv2.getTextSize(txt, cv2.FONT_HERSHEY_DUPLEX, 1, 1) cv2.putText(fg_cpy, txt, (w - txt_w - 5, 25), cv2.FONT_HERSHEY_DUPLEX, 1, (255, 255, 255)) vis = np.concatenate((grey_img, bg, fg_cpy), axis=1) #vis = np.concatenate((grey_img, grey, fg_cpy), axis=1) cv2.namedWindow("Processing", cv2.WINDOW_NORMAL) cv2.resizeWindow("Processing", 1800, 600) cv2.imshow("Processing", vis) cv2.waitKey(100) # record a video if self._dbg_roi_video_writer is None: fourcc_string = 'DIVX' fourcc = cv2.VideoWriter_fourcc(*fourcc_string) self._dbg_roi_video_writer = cv2.VideoWriter( self._dbg_single_roi_video_filename, fourcc, 2.0, (vis.shape[1], vis.shape[0]), isColor=False) self._dbg_roi_video_writer.write(vis) out = DataPoint([ x_var, y_var, w_var, h_var, phi_var, #mlogl, distance, #xor_dist #Label(0) ]) self._previous_shape = np.copy(hull) return [out]
def _track(self, img, grey, mask, t): ''' The tracking routine Runs once per ROI ''' if self._bg_model.bg_img is None: self._buff_fg = np.empty_like(grey) self._buff_object = np.empty_like(grey) self._buff_fg_backup = np.empty_like(grey) raise NoPositionError bg = self._bg_model.bg_img.astype(np.uint8) cv2.subtract(grey, bg, self._buff_fg) cv2.threshold(self._buff_fg, 20, 255, cv2.THRESH_TOZERO, dst=self._buff_fg) self._buff_fg_backup = np.copy(self._buff_fg) n_fg_pix = np.count_nonzero(self._buff_fg) prop_fg_pix = n_fg_pix / (1.0 * grey.shape[0] * grey.shape[1]) is_ambiguous = False if prop_fg_pix > self._max_area: self._bg_model.increase_learning_rate() raise NoPositionError if prop_fg_pix == 0: self._bg_model.increase_learning_rate() raise NoPositionError if CV_VERSION == 3: _, contours, hierarchy = cv2.findContours(self._buff_fg, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) else: contours, hierarchy = cv2.findContours(self._buff_fg, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) contours = [cv2.approxPolyDP(c, 1.2, True) for c in contours] valid_contours = [] if len(contours) == 0: self._bg_model.increase_learning_rate() print("no contour detected") raise NoPositionError else: for c in contours: if self._fg_model.is_contour_valid(c, img): valid_contours.append(c) out_pos = [] #raw_pos = [] for n_vc, vc in enumerate(valid_contours): #calculates the parameters to draw the centroid (x, y), (w, h), angle = cv2.minAreaRect(vc) #adjust the orientation for consistency if w < h: angle -= 90 w, h = h, w angle = angle % 180 #ignore if the ellipse is drawn outside the actual picture h_im = min(grey.shape) w_im = max(grey.shape) max_h = 2 * h_im if w > max_h or h > max_h: continue pos = x + 1.0j * y pos /= w_im #draw the ellipse around the blob cv2.ellipse(self._buff_fg, ((x, y), (int(w * 1.5), int(h * 1.5)), angle), 255, 1) ## Some debugging info ##contour_area = cv2.contourArea(vc) ##contour_moments = cv2.moments(vc) ##cX = int(contour_moments["m10"] / contour_moments["m00"]) ##cY = int(contour_moments["m01"] / contour_moments["m00"]) ##contour_crentroid = (cX, cY) # this is actually the same as x,y - so pointless to calculate # # idx = 0 # # nf = 0 # # d = 0 # # ox = 0 # # oy = 0 # # if not self._firstrun: # # d, idx = self._closest_node((x, y), self.last_positions) # # print (d, idx) # # self.last_positions[idx] = [x,y] # # if d < 10: # # nf = idx # # ox, oy = self.last_positions[idx] # # else: # # nf = "new" # # label = "%s: %.1f" % (nf, d) # # cv2.putText(self._buff_fg , label, (cX, cY), cv2.FONT_HERSHEY_SIMPLEX, 1, (255,255,255), 1) # # cv2.circle(self._buff_fg, (cX, cY), 3, (255, 0, 0), -1) # # cv2.drawMarker(self._buff_fg, (int(ox), int(oy)), (255, 0, 0), cv2.MARKER_CROSS, 10) # store the blob info in a list x_var = XPosVariable(int(round(x))) y_var = YPosVariable(int(round(y))) w_var = WidthVariable(int(round(w))) h_var = HeightVariable(int(round(h))) phi_var = PhiVariable(int(round(angle))) #raw = (x, y, w, h, angle) #raw_pos.append(raw) out = DataPoint([x_var, y_var, w_var, h_var, phi_var]) out_pos.append(out) # end the for loop iterating within contours if self._visualise: cv2.imshow(self.multi_fly_tracker_window, self._buff_fg) if len(out_pos) == 0: self._bg_model.increase_learning_rate() raise NoPositionError cv2.bitwise_and(self._buff_fg_backup, self._buff_fg, self._buff_fg_backup) if mask is not None: cv2.bitwise_and(self._buff_fg, mask, self._buff_fg) if is_ambiguous: self._bg_model.increase_learning_rate() self._bg_model.update(grey, t) else: self._bg_model.decrease_learning_rate() self._bg_model.update(grey, t, self._buff_fg) return out_pos
def _track(self, img, grey, mask, t): if self._bg_model.bg_img is None: self._buff_fg = np.empty_like(grey) raise NoPositionError bg = self._bg_model.bg_img.astype(np.uint8) cv2.subtract(grey, bg, self._buff_fg) #fixme magic number cv2.threshold(self._buff_fg, 15, 255, cv2.THRESH_BINARY, dst=self._buff_fg) n_fg_pix = np.count_nonzero(self._buff_fg) prop_fg_pix = n_fg_pix / (1.0 * grey.shape[0] * grey.shape[1]) is_ambiguous = False if prop_fg_pix > self._max_area: self._bg_model.increase_learning_rate() print("too big") raise NoPositionError if prop_fg_pix == 0: self._bg_model.increase_learning_rate() print("no pixs") raise NoPositionError # show(self._buff_fg,100) if CV_VERSION == 3: _, contours, hierarchy = cv2.findContours(self._buff_fg, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) else: contours, hierarchy = cv2.findContours(self._buff_fg, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) if len(contours) == 0: self._bg_model.increase_learning_rate() print("No contours") raise NoPositionError elif len(contours) > 1: hulls = [cv2.convexHull(c) for c in contours] hulls = merge_blobs(hulls) hulls = [h for h in hulls if h.shape[0] >= 3] print("before exclusion", len(hulls)) hulls = self._exclude_incorrect_hull(hulls) print("after exclusion", len(hulls)) if len(hulls) == 0: raise NoPositionError elif len(hulls) > 1: raise NoPositionError else: is_ambiguous = False hull = hulls[0] else: hull = cv2.convexHull(contours[0]) if hull.shape[0] < 3: self._bg_model.increase_learning_rate() raise NoPositionError (_, _), (w, h), angle = cv2.minAreaRect(hull) M = cv2.moments(hull) x = int(M['m10'] / M['m00']) y = int(M['m01'] / M['m00']) if w < h: angle -= 90 w, h = h, w angle = angle % 180 h_im = min(grey.shape) max_h = 2 * h_im if w > max_h or h > max_h: raise NoPositionError x_var = XPosVariable(int(round(x))) y_var = YPosVariable(int(round(y))) w_var = WidthVariable(int(round(w))) h_var = HeightVariable(int(round(h))) phi_var = PhiVariable(int(round(angle))) self._buff_fg.fill(0) cv2.drawContours(self._buff_fg, [hull], 0, 1, -1) if mask is not None: cv2.bitwise_and(self._buff_fg, mask, self._buff_fg) if is_ambiguous: self._bg_model.increase_learning_rate() self._bg_model.update(grey, t) else: self._bg_model.decrease_learning_rate() self._bg_model.update(grey, t, self._buff_fg) out = DataPoint([x_var, y_var, w_var, h_var, phi_var]) return out
def run(self, result_writer = None, drawer = None): """ Runs the monitor indefinitely. :param result_writer: A result writer used to control how data are saved. `None` means no results will be saved. :type result_writer: :class:`~ethoscope.utils.io.ResultWriter` :param drawer: A drawer to plot the data on frames, display frames and/or save videos. `None` means none of the aforementioned actions will performed. :type drawer: :class:`~ethoscope.drawers.drawers.BaseDrawer` """ try: logging.info("Monitor starting a run") self._is_running = True for i, (t, frame) in enumerate(self._camera): #logging.info("Monitor: frame: %d, time: %d" % (i, t)) if self._force_stop: logging.info("Monitor object stopped from external request") break self._last_frame_idx = i self._last_time_stamp = t self._frame_buffer = frame #logStr = "Monitor: frame: %d, time: %d" % (i, t) empty_cnt = 0 for j, track_u in enumerate(self._unit_trackers): data_rows = track_u.track(t, frame) if len(data_rows) == 0: self._last_positions[track_u.roi.idx] = [] empty_cnt += 1 # L. Zi do not skip trackers not returning a detection # but fill in a data row with zero values #continue data_rows = [DataPoint([XPosVariable(0), YPosVariable(0), WidthVariable(0), HeightVariable(0), PhiVariable(0), XYDistance(0), IsInferredVariable(0), HasInteractedVariable(0)]) ] abs_pos = track_u.get_last_positions(absolute=True) # if abs_pos is not None: self._last_positions[track_u.roi.idx] = abs_pos if not result_writer is None: #logging.info(logStr + ", Roi: %d, %s" % (track_u.roi.idx, data_rows)) result_writer.write(t, track_u.roi, data_rows) #logging.info(logStr + " empty data cnt: %d" % (empty_cnt)) if result_writer is not None: result_writer.flush(t, frame) if drawer is not None: drawer.draw(frame, t, self._last_positions, self._unit_trackers) self._last_t = t except Exception as e: logging.error("Monitor closing with an exception: '%s'" % traceback.format_exc(e)) raise e finally: self._is_running = False logging.info("Monitor closing")
def _track(self, img, grey, mask,t): if self._bg_model.bg_img is None: self._buff_fg = np.empty_like(grey) self._buff_object= np.empty_like(grey) self._buff_fg_backup = np.empty_like(grey) # self._buff_fg_diff = np.empty_like(grey) # self._old_pos = 0.0 +0.0j # self._old_sum_fg = 0 raise NoPositionError bg = self._bg_model.bg_img.astype(np.uint8) cv2.subtract(grey, bg, self._buff_fg) cv2.threshold(self._buff_fg,20,255,cv2.THRESH_TOZERO, dst=self._buff_fg) # cv2.bitwise_and(self._buff_fg_backup,self._buff_fg,dst=self._buff_fg_diff) # sum_fg = cv2.countNonZero(self._buff_fg) self._buff_fg_backup = np.copy(self._buff_fg) n_fg_pix = np.count_nonzero(self._buff_fg) prop_fg_pix = n_fg_pix / (1.0 * grey.shape[0] * grey.shape[1]) is_ambiguous = False if prop_fg_pix > self._max_area: self._bg_model.increase_learning_rate() raise NoPositionError if prop_fg_pix == 0: self._bg_model.increase_learning_rate() raise NoPositionError if CV_VERSION == 3: _, contours,hierarchy = cv2.findContours(self._buff_fg, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) else: contours,hierarchy = cv2.findContours(self._buff_fg, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) contours = [cv2.approxPolyDP(c,1.2,True) for c in contours] valid_contours = [] if len(contours) == 0: self._bg_model.increase_learning_rate() raise NoPositionError else : for c in contours: if self._fg_model.is_contour_valid(c,img): valid_contours.append(c) if len(valid_contours) == 0: self._bg_model.increase_learning_rate() raise NoPositionError out_pos = [] for vc in valid_contours: (x,y) ,(w,h), angle = cv2.minAreaRect(vc) if w < h: angle -= 90 w,h = h,w angle = angle % 180 h_im = min(grey.shape) w_im = max(grey.shape) max_h = 2*h_im if w>max_h or h>max_h: continue pos = x +1.0j*y pos /= w_im # fixme some matching needed here #xy_dist = round(log10(1./float(w_im) + abs(pos - self._old_pos))*1000) cv2.ellipse(self._buff_fg ,((x,y), (int(w*1.5),int(h*1.5)),angle),255,-1) x_var = XPosVariable(int(round(x))) y_var = YPosVariable(int(round(y))) # distance = XYDistance(int(xy_dist)) #xor_dist = XorDistance(int(xor_dist)) w_var = WidthVariable(int(round(w))) h_var = HeightVariable(int(round(h))) phi_var = PhiVariable(int(round(angle))) # mlogl = mLogLik(int(distance*1000)) out = DataPoint([x_var, y_var, w_var, h_var, phi_var, #mlogl, # distance, #xor_dist #Label(0) ]) out_pos.append(out) if len(out_pos) == 0: self._bg_model.increase_learning_rate() raise NoPositionError # accurate measurment for multi animal tracking: #cv2.ellipse(self._buff_fg ,((x,y), (int(w*1.5),int(h*1.5)),angle),255,-1) # cv2.bitwise_and(self._buff_fg_backup, self._buff_fg,self._buff_fg_backup) # self._old_pos = out_points if mask is not None: cv2.bitwise_and(self._buff_fg, mask, self._buff_fg) if is_ambiguous: self._bg_model.increase_learning_rate() self._bg_model.update(grey, t) else: self._bg_model.decrease_learning_rate() self._bg_model.update(grey, t, self._buff_fg) return out_pos