def __init__(self, name=None, params={}, color='white', bEqualizeHist=False): MotionTrackedBodypart.__init__(self, name, params, color, bEqualizeHist) self.name = name self.detector = EdgeDetectorByHoughTransform(self.name, params) self.state = MsgState() self.windowEdges = ImageWindow(False, self.name+'Edges') self.set_params(params) # Services, for live intensities plots via live_wing_histograms.py self.service_trackerdata = rospy.Service('trackerdata_'+self.name, SrvTrackerdata, self.serve_trackerdata_callback)
def __init__(self, name=None, params={}, color='white', bEqualizeHist=False): MotionTrackedBodypart.__init__(self, name, params, color, bEqualizeHist) self.name = name self.detector = EdgeDetectorByHoughTransform(self.name, params) self.state = MsgState() self.windowEdges = ImageWindow(False, self.name + 'Edges') self.set_params(params)
def __init__(self, name=None, params={}, color='white', bEqualizeHist=False): MotionTrackedBodypartPolar.__init__(self, name, params, color, bEqualizeHist) self.name = name self.detector = TipDetector() self.state = MsgState() self.set_params(params) self.windowTip = ImageWindow(False, self.name+'Tip') self.iAngle = 0 self.iRadius = 0 # Services, for live intensities plots via live_wing_histograms.py self.service_trackerdata = rospy.Service('trackerdata_'+self.name, SrvTrackerdata, self.serve_trackerdata_callback)
def __init__(self, name=None, params={}, color='white', bEqualizeHist=False): MotionTrackedBodypartPolar.__init__(self, name, params, color, bEqualizeHist) self.phasecorr = imageprocessing.PhaseCorrelation() self.state = MsgState() # In the bodypart frame. self.stateOrigin_p = MsgState() # In the bodypart frame. self.stateLo_p = MsgState() # In the bodypart frame. self.stateHi_p = MsgState() # In the bodypart frame. self.imgComparison = None self.windowStabilized = ImageWindow(False, self.name+'Stable') self.windowComparison = ImageWindow(True, self.name+'Comparison') self.set_params(params)
def __init__(self, name=None, params={}, color='white', bEqualizeHist=False): MotionTrackedBodypartPolar.__init__(self, name, params, color, bEqualizeHist) self.name = name self.detector = TipDetector() self.state = MsgState() self.set_params(params) self.windowTip = ImageWindow(False, self.name + 'Tip') self.iAngle = 0 self.iRadius = 0
def openFile(self): """Slot for the openFile action.""" from SXM import FileIO,Data fname = str(QFileDialog.getOpenFileName(self.widget,self.tr("Open File"), \ ".",FileIO.getFilterString(types=(Data.Image,)))) if len(fname) > 0: root, ext = os.path.splitext(fname) self.statusBar().showMessage(self.tr("Loading data: %1").arg(fname),2000) image = FileIO.fromFile(fname) image.load() imwin = ImageWindow(self,image) self.Images.append(imwin) self.updateImageList() imwin.windowModality = False imwin.show()
def __init__(self, name=None, params={}, color='white', bEqualizeHist=False): self.name = name self.bEqualizeHist = bEqualizeHist self.bValidMask = False self.color = color self.bgra = ui.bgra_dict[color] self.bgra_dim = tuple(0.5*np.array(ui.bgra_dict[color])) self.bgra_state = ui.bgra_dict[color]#ui.bgra_dict['red'] self.pixelmax = 255.0 self.shape = (np.inf, np.inf) self.ptHinge_i = np.array([0,0]) self.roi = None self.mask = Struct() self.mask.img = None self.mask.sum = 1.0 self.mask.xMin = None self.mask.yMin = None self.mask.xMax = None self.mask.yMax = None self.dt = np.inf self.params = {} self.handles = {'hinge':ui.Handle(np.array([0,0]), self.bgra, name='hinge'), 'angle_hi':ui.Handle(np.array([0,0]), self.bgra, name='angle_hi'), 'angle_lo':ui.Handle(np.array([0,0]), self.bgra, name='angle_lo'), 'radius_inner':ui.Handle(np.array([0,0]), self.bgra, name='radius_inner') } self.lockBackground = threading.Lock() # Region of interest images. self.imgFullBackground = None self.imgRoiBackground = None self.imgRoi = None # Untouched roi image. self.imgRoiFg = None # Background subtracted. self.imgRoiFgMasked = None self.imgFinal = None # The image to use for tracking. self.imgHeadroom = None # Extra windows. self.windowBG = ImageWindow(False, self.name+'BG') self.windowFG = ImageWindow(False, self.name+'FG') self.windowMask = ImageWindow(gbShowMasks, self.name+'Mask')
def openFile(self): """Slot for the openFile action.""" from SXM import FileIO, Data fname = str( QFileDialog.getOpenFileName( self.widget, self.tr("Open File"), ".", FileIO.getFilterString(types=(Data.Image,)) ) ) if len(fname) > 0: root, ext = os.path.splitext(fname) self.statusBar().showMessage(self.tr("Loading data: %1").arg(fname), 2000) image = FileIO.fromFile(fname) image.load() imwin = ImageWindow(self, image) self.Images.append(imwin) self.updateImageList() imwin.windowModality = False imwin.show()
def __init__(self, name=None, params={}, color='white', bEqualizeHist=False): MotionTrackedBodypart.__init__(self, name, params, color, bEqualizeHist) self.polartransforms = imageprocessing.PolarTransforms() self.create_wfn = imageprocessing.WindowFunctions().create_tukey self.wfnRoiMaskedPolarCropped = None self.imgRoiFgMaskedPolar = None self.imgRoiFgMaskedPolarCropped = None self.imgRoiFgMaskedPolarCroppedWindowed = None self.imgFinal = None # The image to use for tracking. self.imgHeadroomPolar = None self.windowPolar = ImageWindow(False, self.name + 'Polar')
def __init__(self, name=None, params={}, color="white", bEqualizeHist=False): MotionTrackedBodypartPolar.__init__(self, name, params, color, bEqualizeHist) self.name = name self.detector = EdgeDetectorByIntensityProfile() self.state = MsgState() self.windowEdges = ImageWindow(False, self.name + "Edges") self.set_params(params) # Services, for live intensities plots via live_wing_histograms.py self.service_trackerdata = rospy.Service("trackerdata_" + name, SrvTrackerdata, self.serve_trackerdata_callback)
def __init__(self, name=None, params={}, color='white', bEqualizeHist=False): self.name = name self.bEqualizeHist = bEqualizeHist self.bValidMask = False self.bgra = ui.bgra_dict[color] self.bgra_dim = tuple(0.5*np.array(ui.bgra_dict[color])) self.bgra_state = ui.bgra_dict[color]#ui.bgra_dict['red'] self.pixelmax = 255.0 self.shape = (np.inf, np.inf) self.ptCenter_i = np.array([0,0]) self.roi = None self.mask = Struct() self.mask.img = None self.mask.sum = 1.0 self.dt = np.inf self.params = {} self.handles = {'center':ui.Handle(np.array([0,0]), self.bgra, name='center'), 'radius1':ui.Handle(np.array([0,0]), self.bgra, name='radius1'), 'radius2':ui.Handle(np.array([0,0]), self.bgra, name='radius2') } # Region of interest images. self.imgFullBackground = None self.imgRoiBackground = None self.imgRoi = None # Untouched roi image. self.imgRoiFg = None # Background subtracted. self.imgRoiFgMasked = None # Extra windows. self.windowBG = ImageWindow(False, self.name+'BG') self.windowFG = ImageWindow(False, self.name+'FG') self.windowMask = ImageWindow(gbShowMasks, self.name+'Mask')
def __init__(self, name=None, params={}, color='white', bEqualizeHist=False): MotionTrackedBodypart.__init__(self, name, params, color, bEqualizeHist) self.polartransforms = imageprocessing.PolarTransforms() self.create_wfn = imageprocessing.WindowFunctions().create_tukey self.wfnRoiMaskedPolarCropped = None self.imgRoiFgMaskedPolar = None self.imgRoiFgMaskedPolarCropped = None self.imgRoiFgMaskedPolarCroppedWindowed = None self.imgFinal = None # The image to use for tracking. self.imgHeadroomPolar = None self.windowPolar = ImageWindow(False, self.name+'Polar')
def __init__(self, name=None, params={}, color='white', bEqualizeHist=False): self.name = name self.bEqualizeHist = bEqualizeHist self.bValidMask = False self.color = color self.bgra = ui.bgra_dict[color] self.bgra_dim = tuple(0.5 * np.array(ui.bgra_dict[color])) self.bgra_state = ui.bgra_dict[color] #ui.bgra_dict['red'] self.pixelmax = 255.0 self.shape = (np.inf, np.inf) self.ptHinge_i = np.array([0, 0]) self.roi = None self.mask = Struct() self.mask.img = None self.mask.sum = 1.0 self.mask.xMin = None self.mask.yMin = None self.mask.xMax = None self.mask.yMax = None self.dt = np.inf self.params = {} self.handles = { 'hinge': ui.Handle(np.array([0, 0]), self.bgra, name='hinge'), 'angle_hi': ui.Handle(np.array([0, 0]), self.bgra, name='angle_hi'), 'angle_lo': ui.Handle(np.array([0, 0]), self.bgra, name='angle_lo'), 'radius_inner': ui.Handle(np.array([0, 0]), self.bgra, name='radius_inner') } self.lockBackground = threading.Lock() # Region of interest images. self.imgFullBackground = None self.imgRoiBackground = None self.imgRoi = None # Untouched roi image. self.imgRoiFg = None # Background subtracted. self.imgRoiFgMasked = None self.imgFinal = None # The image to use for tracking. self.imgHeadroom = None # Extra windows. self.windowBG = ImageWindow(False, self.name + 'BG') self.windowFG = ImageWindow(False, self.name + 'FG') self.windowMask = ImageWindow(gbShowMasks, self.name + 'Mask')
def __init__(self, name=None, params={}, color='white', bEqualizeHist=False): self.name = name self.bEqualizeHist = bEqualizeHist self.bValidMask = False self.bgra = ui.bgra_dict[color] self.bgra_dim = tuple(0.5 * np.array(ui.bgra_dict[color])) self.bgra_state = ui.bgra_dict[color] #ui.bgra_dict['red'] self.pixelmax = 255.0 self.shape = (np.inf, np.inf) self.ptCenter_i = np.array([0, 0]) self.roi = None self.mask = Struct() self.mask.img = None self.mask.sum = 1.0 self.dt = np.inf self.params = {} self.handles = { 'center': ui.Handle(np.array([0, 0]), self.bgra, name='center'), 'radius1': ui.Handle(np.array([0, 0]), self.bgra, name='radius1'), 'radius2': ui.Handle(np.array([0, 0]), self.bgra, name='radius2') } # Region of interest images. self.imgFullBackground = None self.imgRoiBackground = None self.imgRoi = None # Untouched roi image. self.imgRoiFg = None # Background subtracted. self.imgRoiFgMasked = None # Extra windows. self.windowBG = ImageWindow(False, self.name + 'BG') self.windowFG = ImageWindow(False, self.name + 'FG') self.windowMask = ImageWindow(gbShowMasks, self.name + 'Mask')
class EdgeTrackerByIntensityProfile(MotionTrackedBodypartPolar): def __init__(self, name=None, params={}, color='white', bEqualizeHist=False): MotionTrackedBodypartPolar.__init__(self, name, params, color, bEqualizeHist) self.name = name self.detector = EdgeDetectorByIntensityProfile() self.state = MsgState() self.windowEdges = ImageWindow(False, self.name+'Edges') self.set_params(params) # Services, for live intensities plots via live_wing_histograms.py self.service_trackerdata = rospy.Service('trackerdata_'+name, SrvTrackerdata, self.serve_trackerdata_callback) # set_params() # Set the given params dict into this object. # def set_params(self, params): MotionTrackedBodypartPolar.set_params(self, params) self.imgRoiBackground = None self.iCount = 0 self.state.intensity = 0.0 self.state.angles = [] self.state.gradients = [] # Compute the 'handedness' of the head/abdomen and wing/wing axes. self.sense specifies the direction of positive angles. matAxes = np.array([[self.params['gui']['head']['hinge']['x']-self.params['gui']['abdomen']['hinge']['x'], self.params['gui']['head']['hinge']['y']-self.params['gui']['abdomen']['hinge']['y']], [self.params['gui']['right']['hinge']['x']-self.params['gui']['left']['hinge']['x'], self.params['gui']['right']['hinge']['y']-self.params['gui']['left']['hinge']['y']]]) if (self.name in ['left','right']): self.senseAxes = np.sign(np.linalg.det(matAxes)) a = -1 if (self.name=='right') else 1 self.sense = a*self.senseAxes else: self.sense = 1 self.detector.set_params(params[self.name]['threshold'], params['n_edges_max'], self.sense) # update_state() # def update_state(self): imgNow = self.imgRoiFgMaskedPolarCropped self.windowEdges.set_image(imgNow) # Get the rotation & expansion between images. if (imgNow is not None): # Pixel position and strength of the edges. (edges, gradients) = self.detector.detect(imgNow) anglePerPixel = (self.params['gui'][self.name]['angle_hi']-self.params['gui'][self.name]['angle_lo']) / float(imgNow.shape[1]) # Convert pixel to angle units, and put angle into the wing frame. self.state.angles = [] self.state.gradients = [] for i in range(len(edges)): edge = edges[i] gradient = gradients[i] angle_b = self.params['gui'][self.name]['angle_lo'] + edge * anglePerPixel angle_p = (self.transform_angle_p_from_b(angle_b) + np.pi) % (2*np.pi) - np.pi self.state.angles.append(angle_p) self.state.gradients.append(gradient) self.state.intensity = np.mean(imgNow)/255.0 # update() # Update all the internals given a foreground camera image. # def update(self, dt, image, bInvertColor): MotionTrackedBodypartPolar.update(self, dt, image, bInvertColor) if (self.params['gui'][self.name]['track']): self.update_state() # draw() # Draw the outline. # def draw(self, image): MotionTrackedBodypartPolar.draw(self, image) if (self.params['gui'][self.name]['track']): # Draw the major and minor edges alternately, until the max number has been reached. bgra = self.bgra for i in range(len(self.state.angles)): angle_b = self.transform_angle_b_from_p(self.state.angles[i]) angle_i = self.transform_angle_i_from_b(angle_b) x0 = self.ptHinge_i[0] + self.params['gui'][self.name]['radius_inner'] * np.cos(angle_i) y0 = self.ptHinge_i[1] + self.params['gui'][self.name]['radius_inner'] * np.sin(angle_i) x1 = self.ptHinge_i[0] + self.params['gui'][self.name]['radius_outer'] * np.cos(angle_i) y1 = self.ptHinge_i[1] + self.params['gui'][self.name]['radius_outer'] * np.sin(angle_i) cv2.line(image, (int(x0),int(y0)), (int(x1),int(y1)), bgra, 1) bgra = tuple(0.5*np.array(bgra)) self.windowEdges.show() def serve_trackerdata_callback(self, request): title1 = 'Intensities' title2 = 'Diffs' abscissa = np.linspace(self.params['gui'][self.name]['angle_lo'], self.params['gui'][self.name]['angle_hi'], len(self.detector.intensities)) angles = [] gradients = [] for i in range(len(self.state.angles)): angle = self.state.angles[i] gradient = self.state.gradients[i] angle_b = ((self.transform_angle_b_from_p(angle) + np.pi) % (2*np.pi)) - np.pi angles.append(angle_b) gradients.append(gradient) return SrvTrackerdataResponse(self.color, title1, title2, abscissa, self.detector.intensities, self.detector.diff, angles, gradients)
class EdgeTrackerByHoughTransform(MotionTrackedBodypart): def __init__(self, name=None, params={}, color="white", bEqualizeHist=False): MotionTrackedBodypart.__init__(self, name, params, color, bEqualizeHist) self.name = name self.detector = EdgeDetectorByHoughTransform(self.name, params) self.state = MsgState() self.windowEdges = ImageWindow(False, self.name + "Edges") self.set_params(params) # Services, for live intensities plots via live_wing_histograms.py self.service_trackerdata = rospy.Service( "trackerdata_" + self.name, SrvTrackerdata, self.serve_trackerdata_callback ) # set_params() # Set the given params dict into this object. # def set_params(self, params): MotionTrackedBodypart.set_params(self, params) self.imgRoiBackground = None self.iCount = 0 self.state.intensity = 0.0 self.state.angles = [] self.state.gradients = [] # Compute the 'handedness' of the head/abdomen and wing/wing axes. self.sense specifies the direction of positive angles. matAxes = np.array( [ [ self.params["gui"]["head"]["hinge"]["x"] - self.params["gui"]["abdomen"]["hinge"]["x"], self.params["gui"]["head"]["hinge"]["y"] - self.params["gui"]["abdomen"]["hinge"]["y"], ], [ self.params["gui"]["right"]["hinge"]["x"] - self.params["gui"]["left"]["hinge"]["x"], self.params["gui"]["right"]["hinge"]["y"] - self.params["gui"]["left"]["hinge"]["y"], ], ] ) if self.name in ["left", "right"]: self.senseAxes = np.sign(np.linalg.det(matAxes)) a = -1 if (self.name == "right") else 1 self.sense = a * self.senseAxes else: self.sense = 1 self.windowEdges.set_enable(self.params["gui"]["windows"] and self.params["gui"][self.name]["track"]) self.bValidDetector = False # update_state() # def update_state(self): imgNow = self.imgRoiFgMasked if imgNow is not None: # Pixel position and strength of the edges. (angles, magnitudes) = self.detector.detect(imgNow) self.windowEdges.set_image(self.detector.imgMasked) # Put angle into the bodypart frame. self.state.angles = [] self.state.gradients = [] for i in range(len(angles)): angle = angles[i] magnitude = magnitudes[i] angle_b = self.params["gui"][self.name]["angle_lo"] + angle angle_p = (self.transform_angle_p_from_b(angle_b) + np.pi) % (2 * np.pi) - np.pi self.state.angles.append(angle) self.state.gradients.append(magnitude) self.state.intensity = np.mean(self.imgRoiFgMasked) / 255.0 # update() # Update all the internals given a foreground camera image. # def update(self, dt, image, bInvertColor): MotionTrackedBodypart.update(self, dt, image, bInvertColor) if self.params["gui"][self.name]["track"]: if not self.bValidDetector: if self.mask.xMin is not None: self.detector.set_params( self.name, self.params, self.sense, self.angleBodypart_i, self.angleBodypart_b, image.shape, self.mask, ) self.bValidDetector = True self.update_state() # draw() # Draw the outline. # def draw(self, image): MotionTrackedBodypart.draw(self, image) if self.params["gui"][self.name]["track"]: # Draw the major and minor edges alternately, until the max number has been reached. bgra = self.bgra for i in range(len(self.state.angles)): angle_b = self.transform_angle_b_from_p(self.state.angles[i]) angle_i = self.transform_angle_i_from_b(angle_b) x0 = self.ptHinge_i[0] + self.params["gui"][self.name]["radius_inner"] * np.cos(angle_i) y0 = self.ptHinge_i[1] + self.params["gui"][self.name]["radius_inner"] * np.sin(angle_i) x1 = self.ptHinge_i[0] + self.params["gui"][self.name]["radius_outer"] * np.cos(angle_i) y1 = self.ptHinge_i[1] + self.params["gui"][self.name]["radius_outer"] * np.sin(angle_i) cv2.line(image, (int(x0), int(y0)), (int(x1), int(y1)), bgra, 1) bgra = tuple(0.5 * np.array(bgra)) self.windowEdges.show() def serve_trackerdata_callback(self, request): if len(self.detector.intensities) > 0: title1 = "Intensities" title2 = "Diffs" diffs = self.detector.diff # np.zeros(len(self.detector.intensities)) abscissa = np.linspace( self.params["gui"][self.name]["angle_lo"], self.params["gui"][self.name]["angle_hi"], len(self.detector.intensities), ) abscissa -= self.angleBodypart_b abscissa *= self.sense markersH = self.state.angles markersV = self.state.gradients # [self.params[self.name]['threshold']] rv = SrvTrackerdataResponse( self.color, title1, title2, abscissa, self.detector.intensities, diffs, markersH, markersV ) else: rv = SrvTrackerdataResponse() return rv
class MotionTrackedBodypart(object): def __init__(self, name=None, params={}, color='white', bEqualizeHist=False): self.name = name self.bEqualizeHist = bEqualizeHist self.bValidMask = False self.color = color self.bgra = ui.bgra_dict[color] self.bgra_dim = tuple(0.5 * np.array(ui.bgra_dict[color])) self.bgra_state = ui.bgra_dict[color] #ui.bgra_dict['red'] self.pixelmax = 255.0 self.shape = (np.inf, np.inf) self.ptHinge_i = np.array([0, 0]) self.roi = None self.mask = Struct() self.mask.img = None self.mask.sum = 1.0 self.mask.xMin = None self.mask.yMin = None self.mask.xMax = None self.mask.yMax = None self.dt = np.inf self.params = {} self.handles = { 'hinge': ui.Handle(np.array([0, 0]), self.bgra, name='hinge'), 'angle_hi': ui.Handle(np.array([0, 0]), self.bgra, name='angle_hi'), 'angle_lo': ui.Handle(np.array([0, 0]), self.bgra, name='angle_lo'), 'radius_inner': ui.Handle(np.array([0, 0]), self.bgra, name='radius_inner') } self.lockBackground = threading.Lock() # Region of interest images. self.imgFullBackground = None self.imgRoiBackground = None self.imgRoi = None # Untouched roi image. self.imgRoiFg = None # Background subtracted. self.imgRoiFgMasked = None self.imgFinal = None # The image to use for tracking. self.imgHeadroom = None # Extra windows. self.windowBG = ImageWindow(False, self.name + 'BG') self.windowFG = ImageWindow(False, self.name + 'FG') self.windowMask = ImageWindow(gbShowMasks, self.name + 'Mask') # set_params() # Set the given params dict into this object, and cache a few values. # def set_params(self, params): self.params = params self.rc_background = self.params['rc_background'] self.angleBody_i = self.get_bodyangle_i() self.cosAngleBody_i = np.cos(self.angleBody_i) self.sinAngleBody_i = np.sin(self.angleBody_i) self.ptHinge_i = np.array([ self.params['gui'][self.name]['hinge']['x'], self.params['gui'][self.name]['hinge']['y'] ]) self.ptHingeHead_i = np.array([ self.params['gui']['head']['hinge']['x'], self.params['gui']['head']['hinge']['y'] ]) self.ptHingeAbdomen_i = np.array([ self.params['gui']['abdomen']['hinge']['x'], self.params['gui']['abdomen']['hinge']['y'] ]) # Compute the body-outward-facing angle, which is the angle from the body center to the bodypart hinge. # pt1 = [params['head']['hinge']['x'], params['head']['hinge']['y']] # pt2 = [params['abdomen']['hinge']['x'], params['abdomen']['hinge']['y']] # pt3 = [params['gui']['left']['hinge']['x'], params['gui']['left']['hinge']['y']] # pt4 = [params['right']['hinge']['x'], params['right']['hinge']['y']] # ptBodyCenter_i = get_intersection(pt1,pt2,pt3,pt4) # self.angleBodypart_i = float(np.arctan2(self.ptHinge_i[1]-ptBodyCenter_i[1], self.ptHinge_i[0]-ptBodyCenter_i[0])) # Compute the body-outward-facing angle, which is the angle to the current point from the forward body axis. if (self.name in ['head', 'abdomen']): nameRelative = { 'head': 'abdomen', 'abdomen': 'head', 'left': 'right', 'right': 'left' } self.angleBodypart_i = float( np.arctan2( self.params['gui'][self.name]['hinge']['y'] - self.params['gui'][nameRelative[self.name]]['hinge']['y'], self.params['gui'][self.name]['hinge']['x'] - self.params['gui'][nameRelative[self.name]]['hinge']['x'])) else: ptBodyaxis_i = imageprocessing.get_projection_onto_axis( self.ptHinge_i, (self.ptHingeAbdomen_i, self.ptHingeHead_i)) self.angleBodypart_i = float( np.arctan2( self.params['gui'][self.name]['hinge']['y'] - ptBodyaxis_i[1], self.params['gui'][self.name]['hinge']['x'] - ptBodyaxis_i[0])) self.angleBodypart_b = self.angleBodypart_i - self.angleBody_i cosAngleBodypart_i = np.cos(self.angleBodypart_i) sinAngleBodypart_i = np.sin(self.angleBodypart_i) self.R = np.array([[cosAngleBodypart_i, -sinAngleBodypart_i], [sinAngleBodypart_i, cosAngleBodypart_i]]) # Turn on/off the extra windows. self.windowBG.set_enable( self.params['gui']['windows'] and self.params['gui'][self.name]['track'] and self.params['gui'][self.name]['subtract_bg']) self.windowFG.set_enable(self.params['gui']['windows'] and self.params['gui'][self.name]['track']) self.angle_hi_i = self.transform_angle_i_from_b( self.params['gui'][self.name]['angle_hi']) self.angle_lo_i = self.transform_angle_i_from_b( self.params['gui'][self.name]['angle_lo']) # Refresh the handle points. self.update_handle_points() # transform_angle_b_from_p() # Transform an angle from the bodypart frame to the fly body frame. # def transform_angle_b_from_p(self, angle_p): angle_b = self.sense * angle_p + self.angleBodypart_b return angle_b # transform_angle_p_from_b() # Transform an angle from the fly body frame to the bodypart frame. # def transform_angle_p_from_b(self, angle_b): angle_p = self.sense * (angle_b - self.angleBodypart_b) return angle_p # transform_angle_i_from_b() # Transform an angle from the fly body frame to the camera image frame. # def transform_angle_i_from_b(self, angle_b): angle_i = angle_b + self.angleBody_i return angle_i # transform_angle_b_from_i() # Transform an angle from the camera image frame to the fly frame: longitudinal axis head is 0, CW positive. # def transform_angle_b_from_i(self, angle_i): angle_b = angle_i - self.angleBody_i angle_b = ((angle_b + np.pi) % (2.0 * np.pi)) - np.pi return angle_b def get_bodyangle_i(self): angle_i = imageprocessing.get_angle_from_points_i( np.array([ self.params['gui']['abdomen']['hinge']['x'], self.params['gui']['abdomen']['hinge']['y'] ]), np.array([ self.params['gui']['head']['hinge']['x'], self.params['gui']['head']['hinge']['y'] ])) #angleBody_i = (angle_i + np.pi) % (2.0*np.pi) - np.pi angleBody_i = float(angle_i) return angleBody_i # create_mask() # Create elliptical wedge masks, and window functions. # def create_mask(self, shape): # Create the mask. img = np.zeros(shape, dtype=np.uint8) # Args for the ellipse calls. x = int(self.params['gui'][self.name]['hinge']['x']) y = int(self.params['gui'][self.name]['hinge']['y']) r_outer = int(np.ceil(self.params['gui'][self.name]['radius_outer'])) r_inner = int(np.floor( self.params['gui'][self.name]['radius_inner'])) - 1 hi = int(np.ceil(np.rad2deg(self.angle_hi_i))) lo = int(np.floor(np.rad2deg(self.angle_lo_i))) # Draw the mask. cv_filled = -1 cv2.ellipse(img, (x, y), (r_outer, r_outer), 0, hi, lo, ui.bgra_dict['white'], cv_filled) cv2.ellipse(img, (x, y), (r_inner, r_inner), 0, 0, 360, ui.bgra_dict['black'], cv_filled) img = cv2.dilate(img, np.ones([ 3, 3 ])) # Make the mask one pixel bigger to account for pixel aliasing. self.windowMask.set_image(img) # Find the ROI of the mask. xSum = np.sum(img, 0) ySum = np.sum(img, 1) x_list = np.where(xSum > 0)[0] y_list = np.where(ySum > 0)[0] if (len(x_list) > 0) and (len(y_list) > 0): # Get the extents. xMin = np.where(xSum > 0)[0][0] xMax = np.where(xSum > 0)[0][-1] + 1 yMin = np.where(ySum > 0)[0][0] yMax = np.where(ySum > 0)[0][-1] + 1 # Clip border to image edges. self.mask.xMin = np.max([0, xMin]) self.mask.yMin = np.max([0, yMin]) self.mask.xMax = np.min([xMax, shape[1] - 1]) self.mask.yMax = np.min([yMax, shape[0] - 1]) self.roi = np.array([ self.mask.xMin, self.mask.yMin, self.mask.xMax, self.mask.yMax ]) self.mask.img = img[self.mask.yMin:self.mask.yMax, self.mask.xMin:self.mask.xMax] self.mask.sum = np.sum(self.mask.img).astype(np.float32) self.bValidMask = True else: self.mask.xMin = None self.mask.yMin = None self.mask.xMax = None self.mask.yMax = None rospy.logwarn('%s: Empty mask.' % self.name) # set_background() # Set the given image as the background image. # def set_background(self, image): with self.lockBackground: self.imgFullBackground = image.astype(np.float32) self.imgRoiBackground = None # invert_background() # Invert the color of the background image. # def invert_background(self): with self.lockBackground: if (self.imgRoiBackground is not None): self.imgRoiBackground = 255 - self.imgRoiBackground def update_background(self): alphaBackground = 1.0 - np.exp(-self.dt / self.rc_background) with self.lockBackground: if (self.imgRoiBackground is not None): if (self.imgRoi is not None): if (self.imgRoiBackground.size == self.imgRoi.size): cv2.accumulateWeighted(self.imgRoi.astype(np.float32), self.imgRoiBackground, alphaBackground) else: self.imgRoiBackground = None self.imgRoi = None else: if (self.imgFullBackground is not None) and (self.roi is not None): self.imgRoiBackground = copy.deepcopy( self.imgFullBackground[self.roi[1]:self.roi[3], self.roi[0]:self.roi[2]]) if (self.imgRoiBackground is not None): self.imgHeadroom = (255.0 - self.imgRoiBackground) else: self.imgHeadroom = None self.windowBG.set_image(self.imgRoiBackground) def update_roi(self, image, bInvertColor): self.image = image self.shape = image.shape # Extract the ROI images. if (self.roi is not None): self.imgRoi = copy.deepcopy(image[self.roi[1]:self.roi[3], self.roi[0]:self.roi[2]]) # Color inversion. if (bInvertColor): self.imgRoiFg = 255 - self.imgRoi else: self.imgRoiFg = self.imgRoi # Background Subtraction. if (self.params['gui'][self.name]['subtract_bg']): with self.lockBackground: if (self.imgRoiBackground is not None): if (self.imgRoiBackground.shape == self.imgRoiFg.shape ): if (bInvertColor): self.imgRoiFg = cv2.absdiff( self.imgRoiFg, 255 - self.imgRoiBackground.astype(np.uint8)) else: self.imgRoiFg = cv2.absdiff( self.imgRoiFg, self.imgRoiBackground.astype(np.uint8)) # Equalize the brightness/contrast. if (self.bEqualizeHist): if (self.imgRoiFg is not None): self.imgRoiFg -= np.min(self.imgRoiFg) max2 = np.max([1.0, np.max(self.imgRoiFg)]) self.imgRoiFg *= (255.0 / float(max2)) # update_handle_points() # Update the dictionary of handle point names and locations. # Compute the various handle points. # def update_handle_points(self): x = self.params['gui'][self.name]['hinge']['x'] y = self.params['gui'][self.name]['hinge']['y'] radius_outer = self.params['gui'][self.name]['radius_outer'] radius_inner = self.params['gui'][self.name]['radius_inner'] angle = (self.angle_hi_i + self.angle_lo_i) / 2.0 self.handles['hinge'].pt = np.array([x, y]) self.handles['radius_inner'].pt = np.array([x, y]) + ( (radius_inner) * np.array([np.cos(angle), np.sin(angle)])) self.handles['angle_hi'].pt = np.array( [x, y]) + np.array([(radius_outer) * np.cos(self.angle_hi_i), (radius_outer) * np.sin(self.angle_hi_i)]) self.handles['angle_lo'].pt = np.array( [x, y]) + np.array([(radius_outer) * np.cos(self.angle_lo_i), (radius_outer) * np.sin(self.angle_lo_i)]) self.ptWedgeHi_outer = tuple((np.array([x, y]) + np.array([ radius_outer * np.cos(self.angle_hi_i), (radius_outer) * np.sin(self.angle_hi_i) ])).astype(int)) self.ptWedgeHi_inner = tuple((np.array([x, y]) + np.array([ radius_inner * np.cos(self.angle_hi_i), (radius_inner) * np.sin(self.angle_hi_i) ])).astype(int)) self.ptWedgeLo_outer = tuple((np.array([x, y]) + np.array([ radius_outer * np.cos(self.angle_lo_i), (radius_outer) * np.sin(self.angle_lo_i) ])).astype(int)) self.ptWedgeLo_inner = tuple((np.array([x, y]) + np.array([ radius_inner * np.cos(self.angle_lo_i), (radius_inner) * np.sin(self.angle_lo_i) ])).astype(int)) # update() # Update all the internals given a foreground camera image. # def update(self, dt, image, bInvertColor): self.iCount += 1 self.dt = dt if (self.params['gui'][self.name]['track']): if (not self.bValidMask): self.create_mask(image.shape) self.bValidMask = True if (self.params['gui'][self.name]['subtract_bg']): self.update_background() self.update_roi(image, bInvertColor) self.windowFG.set_image(self.imgRoiFg) # Apply the mask. if (self.imgRoiFg is not None) and (self.mask.img is not None): self.imgRoiFgMasked = cv2.bitwise_and( self.imgRoiFg.astype(self.mask.img.dtype), self.mask.img) self.imgFinal = self.imgRoiFgMasked else: self.imgFinal = None # hit_object() # Get the UI object, if any, that the mouse is on. def hit_object(self, ptMouse): tag = None # Check for handle hits. if (self.params['gui'][self.name]['track']): for tagHandle, handle in self.handles.iteritems(): if (handle.hit_test(ptMouse)): tag = tagHandle break else: tagHandle, handle = ('hinge', self.handles['hinge']) if (handle.hit_test(ptMouse)): tag = tagHandle return (self.name, tag) def draw_handles(self, image): # Draw all handle points, or only just the hinge handle. if (self.params['gui'][self.name]['track']): for tagHandle, handle in self.handles.iteritems(): handle.draw(image) else: tagHandle, handle = ('hinge', self.handles['hinge']) handle.draw(image) # draw() # Draw the outline. # def draw(self, image): self.draw_handles(image) if (self.params['gui'][self.name]['track']): x = int(self.params['gui'][self.name]['hinge']['x']) y = int(self.params['gui'][self.name]['hinge']['y']) radius_outer = int(self.params['gui'][self.name]['radius_outer']) radius_inner = int(self.params['gui'][self.name]['radius_inner']) radius_mid = int( (self.params['gui'][self.name]['radius_outer'] + self.params['gui'][self.name]['radius_inner']) / 2.0) # if () # angle1 = self.angle_lo_i # angle2 = self.angle_hi_i # Draw the mid arc. cv2.ellipse(image, (x, y), (radius_mid, radius_mid), np.rad2deg(0.0), np.rad2deg(self.angle_hi_i), np.rad2deg(self.angle_lo_i), self.bgra_dim, 1) # Draw the outer arc. cv2.ellipse(image, (x, y), (radius_outer, radius_outer), np.rad2deg(0.0), np.rad2deg(self.angle_hi_i), np.rad2deg(self.angle_lo_i), self.bgra_dim, 1) # Draw the inner arc. cv2.ellipse(image, (x, y), (radius_inner, radius_inner), np.rad2deg(0.0), np.rad2deg(self.angle_hi_i), np.rad2deg(self.angle_lo_i), self.bgra_dim, 1) # Draw wedge lines. cv2.line(image, self.ptWedgeHi_inner, self.ptWedgeHi_outer, self.bgra_dim, 1) cv2.line(image, self.ptWedgeLo_inner, self.ptWedgeLo_outer, self.bgra_dim, 1) # Show the extra windows. self.windowBG.show() self.windowFG.show() self.windowMask.show()
class EdgeTrackerByIntensityProfile(MotionTrackedBodypartPolar): def __init__(self, name=None, params={}, color="white", bEqualizeHist=False): MotionTrackedBodypartPolar.__init__(self, name, params, color, bEqualizeHist) self.name = name self.detector = EdgeDetectorByIntensityProfile() self.state = MsgState() self.windowEdges = ImageWindow(False, self.name + "Edges") self.set_params(params) # Services, for live intensities plots via live_wing_histograms.py self.service_trackerdata = rospy.Service("trackerdata_" + name, SrvTrackerdata, self.serve_trackerdata_callback) # set_params() # Set the given params dict into this object. # def set_params(self, params): MotionTrackedBodypartPolar.set_params(self, params) self.imgRoiBackground = None self.iCount = 0 self.state.intensity = 0.0 self.state.angles = [] self.state.gradients = [] # Compute the 'handedness' of the head/abdomen and wing/wing axes. self.sense specifies the direction of positive angles. matAxes = np.array( [ [ self.params["gui"]["head"]["hinge"]["x"] - self.params["gui"]["abdomen"]["hinge"]["x"], self.params["gui"]["head"]["hinge"]["y"] - self.params["gui"]["abdomen"]["hinge"]["y"], ], [ self.params["gui"]["right"]["hinge"]["x"] - self.params["gui"]["left"]["hinge"]["x"], self.params["gui"]["right"]["hinge"]["y"] - self.params["gui"]["left"]["hinge"]["y"], ], ] ) if self.name in ["left", "right"]: self.senseAxes = np.sign(np.linalg.det(matAxes)) a = -1 if (self.name == "right") else 1 self.sense = a * self.senseAxes else: self.sense = 1 self.detector.set_params(params[self.name]["threshold"], params["n_edges_max"], self.sense) # update_state() # def update_state(self): imgNow = self.imgRoiFgMaskedPolarCropped self.windowEdges.set_image(imgNow) # Get the rotation & expansion between images. if imgNow is not None: # Pixel position and strength of the edges. (edges, gradients) = self.detector.detect(imgNow) anglePerPixel = ( self.params["gui"][self.name]["angle_hi"] - self.params["gui"][self.name]["angle_lo"] ) / float(imgNow.shape[1]) # Convert pixel to angle units, and put angle into the wing frame. self.state.angles = [] self.state.gradients = [] for i in range(len(edges)): edge = edges[i] gradient = gradients[i] angle_b = self.params["gui"][self.name]["angle_lo"] + edge * anglePerPixel angle_p = (self.transform_angle_p_from_b(angle_b) + np.pi) % (2 * np.pi) - np.pi self.state.angles.append(angle_p) self.state.gradients.append(gradient) self.state.intensity = np.mean(imgNow) / 255.0 # update() # Update all the internals given a foreground camera image. # def update(self, dt, image, bInvertColor): MotionTrackedBodypartPolar.update(self, dt, image, bInvertColor) if self.params["gui"][self.name]["track"]: self.update_state() # draw() # Draw the outline. # def draw(self, image): MotionTrackedBodypartPolar.draw(self, image) if self.params["gui"][self.name]["track"]: # Draw the major and minor edges alternately, until the max number has been reached. bgra = self.bgra for i in range(len(self.state.angles)): angle_b = self.transform_angle_b_from_p(self.state.angles[i]) angle_i = self.transform_angle_i_from_b(angle_b) x0 = self.ptHinge_i[0] + self.params["gui"][self.name]["radius_inner"] * np.cos(angle_i) y0 = self.ptHinge_i[1] + self.params["gui"][self.name]["radius_inner"] * np.sin(angle_i) x1 = self.ptHinge_i[0] + self.params["gui"][self.name]["radius_outer"] * np.cos(angle_i) y1 = self.ptHinge_i[1] + self.params["gui"][self.name]["radius_outer"] * np.sin(angle_i) cv2.line(image, (int(x0), int(y0)), (int(x1), int(y1)), bgra, 1) bgra = tuple(0.5 * np.array(bgra)) self.windowEdges.show() def serve_trackerdata_callback(self, request): title1 = "Intensities" title2 = "Diffs" abscissa = np.linspace( self.params["gui"][self.name]["angle_lo"], self.params["gui"][self.name]["angle_hi"], len(self.detector.intensities), ) angles = [] gradients = [] for i in range(len(self.state.angles)): angle = self.state.angles[i] gradient = self.state.gradients[i] angle_b = ((self.transform_angle_b_from_p(angle) + np.pi) % (2 * np.pi)) - np.pi angles.append(angle_b) gradients.append(gradient) return SrvTrackerdataResponse( self.color, title1, title2, abscissa, self.detector.intensities, self.detector.diff, angles, gradients )
class MotionTrackedBodypart(object): def __init__(self, name=None, params={}, color='white', bEqualizeHist=False): self.name = name self.bEqualizeHist = bEqualizeHist self.bValidMask = False self.color = color self.bgra = ui.bgra_dict[color] self.bgra_dim = tuple(0.5*np.array(ui.bgra_dict[color])) self.bgra_state = ui.bgra_dict[color]#ui.bgra_dict['red'] self.pixelmax = 255.0 self.shape = (np.inf, np.inf) self.ptHinge_i = np.array([0,0]) self.roi = None self.mask = Struct() self.mask.img = None self.mask.sum = 1.0 self.mask.xMin = None self.mask.yMin = None self.mask.xMax = None self.mask.yMax = None self.dt = np.inf self.params = {} self.handles = {'hinge':ui.Handle(np.array([0,0]), self.bgra, name='hinge'), 'angle_hi':ui.Handle(np.array([0,0]), self.bgra, name='angle_hi'), 'angle_lo':ui.Handle(np.array([0,0]), self.bgra, name='angle_lo'), 'radius_inner':ui.Handle(np.array([0,0]), self.bgra, name='radius_inner') } self.lockBackground = threading.Lock() # Region of interest images. self.imgFullBackground = None self.imgRoiBackground = None self.imgRoi = None # Untouched roi image. self.imgRoiFg = None # Background subtracted. self.imgRoiFgMasked = None self.imgFinal = None # The image to use for tracking. self.imgHeadroom = None # Extra windows. self.windowBG = ImageWindow(False, self.name+'BG') self.windowFG = ImageWindow(False, self.name+'FG') self.windowMask = ImageWindow(gbShowMasks, self.name+'Mask') # set_params() # Set the given params dict into this object, and cache a few values. # def set_params(self, params): self.params = params self.rc_background = self.params['rc_background'] self.angleBody_i = self.get_bodyangle_i() self.cosAngleBody_i = np.cos(self.angleBody_i) self.sinAngleBody_i = np.sin(self.angleBody_i) self.ptHinge_i = np.array([self.params['gui'][self.name]['hinge']['x'], self.params['gui'][self.name]['hinge']['y']]) self.ptHingeHead_i = np.array([self.params['gui']['head']['hinge']['x'], self.params['gui']['head']['hinge']['y']]) self.ptHingeAbdomen_i = np.array([self.params['gui']['abdomen']['hinge']['x'], self.params['gui']['abdomen']['hinge']['y']]) # Compute the body-outward-facing angle, which is the angle from the body center to the bodypart hinge. # pt1 = [params['head']['hinge']['x'], params['head']['hinge']['y']] # pt2 = [params['abdomen']['hinge']['x'], params['abdomen']['hinge']['y']] # pt3 = [params['gui']['left']['hinge']['x'], params['gui']['left']['hinge']['y']] # pt4 = [params['right']['hinge']['x'], params['right']['hinge']['y']] # ptBodyCenter_i = get_intersection(pt1,pt2,pt3,pt4) # self.angleBodypart_i = float(np.arctan2(self.ptHinge_i[1]-ptBodyCenter_i[1], self.ptHinge_i[0]-ptBodyCenter_i[0])) # Compute the body-outward-facing angle, which is the angle to the current point from the forward body axis. if (self.name in ['head','abdomen']): nameRelative = {'head':'abdomen', 'abdomen':'head', 'left':'right', 'right':'left'} self.angleBodypart_i = float(np.arctan2(self.params['gui'][self.name]['hinge']['y']-self.params['gui'][nameRelative[self.name]]['hinge']['y'], self.params['gui'][self.name]['hinge']['x']-self.params['gui'][nameRelative[self.name]]['hinge']['x'])) else: ptBodyaxis_i = imageprocessing.get_projection_onto_axis(self.ptHinge_i, (self.ptHingeAbdomen_i, self.ptHingeHead_i)) self.angleBodypart_i = float(np.arctan2(self.params['gui'][self.name]['hinge']['y']-ptBodyaxis_i[1], self.params['gui'][self.name]['hinge']['x']-ptBodyaxis_i[0])) self.angleBodypart_b = self.angleBodypart_i - self.angleBody_i cosAngleBodypart_i = np.cos(self.angleBodypart_i) sinAngleBodypart_i = np.sin(self.angleBodypart_i) self.R = np.array([[cosAngleBodypart_i, -sinAngleBodypart_i], [sinAngleBodypart_i, cosAngleBodypart_i]]) # Turn on/off the extra windows. self.windowBG.set_enable(self.params['gui']['windows'] and self.params['gui'][self.name]['track'] and self.params['gui'][self.name]['subtract_bg']) self.windowFG.set_enable(self.params['gui']['windows'] and self.params['gui'][self.name]['track']) self.angle_hi_i = self.transform_angle_i_from_b(self.params['gui'][self.name]['angle_hi']) self.angle_lo_i = self.transform_angle_i_from_b(self.params['gui'][self.name]['angle_lo']) # Refresh the handle points. self.update_handle_points() # transform_angle_b_from_p() # Transform an angle from the bodypart frame to the fly body frame. # def transform_angle_b_from_p(self, angle_p): angle_b = self.sense * angle_p + self.angleBodypart_b return angle_b # transform_angle_p_from_b() # Transform an angle from the fly body frame to the bodypart frame. # def transform_angle_p_from_b(self, angle_b): angle_p = self.sense * (angle_b - self.angleBodypart_b) return angle_p # transform_angle_i_from_b() # Transform an angle from the fly body frame to the camera image frame. # def transform_angle_i_from_b(self, angle_b): angle_i = angle_b + self.angleBody_i return angle_i # transform_angle_b_from_i() # Transform an angle from the camera image frame to the fly frame: longitudinal axis head is 0, CW positive. # def transform_angle_b_from_i(self, angle_i): angle_b = angle_i - self.angleBody_i angle_b = ((angle_b+np.pi) % (2.0*np.pi)) - np.pi return angle_b def get_bodyangle_i(self): angle_i = imageprocessing.get_angle_from_points_i(np.array([self.params['gui']['abdomen']['hinge']['x'], self.params['gui']['abdomen']['hinge']['y']]), np.array([self.params['gui']['head']['hinge']['x'], self.params['gui']['head']['hinge']['y']])) #angleBody_i = (angle_i + np.pi) % (2.0*np.pi) - np.pi angleBody_i = float(angle_i) return angleBody_i # create_mask() # Create elliptical wedge masks, and window functions. # def create_mask(self, shape): # Create the mask. img = np.zeros(shape, dtype=np.uint8) # Args for the ellipse calls. x = int(self.params['gui'][self.name]['hinge']['x']) y = int(self.params['gui'][self.name]['hinge']['y']) r_outer = int(np.ceil(self.params['gui'][self.name]['radius_outer'])) r_inner = int(np.floor(self.params['gui'][self.name]['radius_inner']))-1 hi = int(np.ceil(np.rad2deg(self.angle_hi_i))) lo = int(np.floor(np.rad2deg(self.angle_lo_i))) # Draw the mask. cv2.ellipse(img, (x, y), (r_outer, r_outer), 0, hi, lo, ui.bgra_dict['white'], cv.CV_FILLED) cv2.ellipse(img, (x, y), (r_inner, r_inner), 0, 0, 360, ui.bgra_dict['black'], cv.CV_FILLED) img = cv2.dilate(img, np.ones([3,3])) # Make the mask one pixel bigger to account for pixel aliasing. self.windowMask.set_image(img) # Find the ROI of the mask. xSum = np.sum(img, 0) ySum = np.sum(img, 1) x_list = np.where(xSum>0)[0] y_list = np.where(ySum>0)[0] if (len(x_list)>0) and (len(y_list)>0): # Get the extents. xMin = np.where(xSum>0)[0][0] xMax = np.where(xSum>0)[0][-1] + 1 yMin = np.where(ySum>0)[0][0] yMax = np.where(ySum>0)[0][-1] + 1 # Clip border to image edges. self.mask.xMin = np.max([0,xMin]) self.mask.yMin = np.max([0,yMin]) self.mask.xMax = np.min([xMax, shape[1]-1]) self.mask.yMax = np.min([yMax, shape[0]-1]) self.roi = np.array([self.mask.xMin, self.mask.yMin, self.mask.xMax, self.mask.yMax]) self.mask.img = img[self.mask.yMin:self.mask.yMax, self.mask.xMin:self.mask.xMax] self.mask.sum = np.sum(self.mask.img).astype(np.float32) self.bValidMask = True else: self.mask.xMin = None self.mask.yMin = None self.mask.xMax = None self.mask.yMax = None rospy.logwarn('%s: Empty mask.' % self.name) # set_background() # Set the given image as the background image. # def set_background(self, image): with self.lockBackground: self.imgFullBackground = image.astype(np.float32) self.imgRoiBackground = None # invert_background() # Invert the color of the background image. # def invert_background(self): with self.lockBackground: if (self.imgRoiBackground is not None): self.imgRoiBackground = 255-self.imgRoiBackground def update_background(self): alphaBackground = 1.0 - np.exp(-self.dt / self.rc_background) with self.lockBackground: if (self.imgRoiBackground is not None): if (self.imgRoi is not None): if (self.imgRoiBackground.size==self.imgRoi.size): cv2.accumulateWeighted(self.imgRoi.astype(np.float32), self.imgRoiBackground, alphaBackground) else: self.imgRoiBackground = None self.imgRoi = None else: if (self.imgFullBackground is not None) and (self.roi is not None): self.imgRoiBackground = copy.deepcopy(self.imgFullBackground[self.roi[1]:self.roi[3], self.roi[0]:self.roi[2]]) if (self.imgRoiBackground is not None): self.imgHeadroom = (255.0 - self.imgRoiBackground) else: self.imgHeadroom = None self.windowBG.set_image(self.imgRoiBackground) def update_roi(self, image, bInvertColor): self.image = image self.shape = image.shape # Extract the ROI images. if (self.roi is not None): self.imgRoi = copy.deepcopy(image[self.roi[1]:self.roi[3], self.roi[0]:self.roi[2]]) # Color inversion. if (bInvertColor): self.imgRoiFg = 255-self.imgRoi else: self.imgRoiFg = self.imgRoi # Background Subtraction. if (self.params['gui'][self.name]['subtract_bg']): with self.lockBackground: if (self.imgRoiBackground is not None): if (self.imgRoiBackground.shape==self.imgRoiFg.shape): if (bInvertColor): self.imgRoiFg = cv2.absdiff(self.imgRoiFg, 255-self.imgRoiBackground.astype(np.uint8)) else: self.imgRoiFg = cv2.absdiff(self.imgRoiFg, self.imgRoiBackground.astype(np.uint8)) # Equalize the brightness/contrast. if (self.bEqualizeHist): if (self.imgRoiFg is not None): self.imgRoiFg -= np.min(self.imgRoiFg) max2 = np.max([1.0, np.max(self.imgRoiFg)]) self.imgRoiFg *= (255.0/float(max2)) # update_handle_points() # Update the dictionary of handle point names and locations. # Compute the various handle points. # def update_handle_points (self): x = self.params['gui'][self.name]['hinge']['x'] y = self.params['gui'][self.name]['hinge']['y'] radius_outer = self.params['gui'][self.name]['radius_outer'] radius_inner = self.params['gui'][self.name]['radius_inner'] angle = (self.angle_hi_i+self.angle_lo_i)/2.0 self.handles['hinge'].pt = np.array([x, y]) self.handles['radius_inner'].pt = np.array([x, y]) + ((radius_inner) * np.array([np.cos(angle),np.sin(angle)])) self.handles['angle_hi'].pt = np.array([x, y]) + np.array([(radius_outer)*np.cos(self.angle_hi_i), (radius_outer)*np.sin(self.angle_hi_i)]) self.handles['angle_lo'].pt = np.array([x, y]) + np.array([(radius_outer)*np.cos(self.angle_lo_i), (radius_outer)*np.sin(self.angle_lo_i)]) self.ptWedgeHi_outer = tuple((np.array([x, y]) + np.array([radius_outer*np.cos(self.angle_hi_i), (radius_outer)*np.sin(self.angle_hi_i)])).astype(int)) self.ptWedgeHi_inner = tuple((np.array([x, y]) + np.array([radius_inner*np.cos(self.angle_hi_i), (radius_inner)*np.sin(self.angle_hi_i)])).astype(int)) self.ptWedgeLo_outer = tuple((np.array([x, y]) + np.array([radius_outer*np.cos(self.angle_lo_i), (radius_outer)*np.sin(self.angle_lo_i)])).astype(int)) self.ptWedgeLo_inner = tuple((np.array([x, y]) + np.array([radius_inner*np.cos(self.angle_lo_i), (radius_inner)*np.sin(self.angle_lo_i)])).astype(int)) # update() # Update all the internals given a foreground camera image. # def update(self, dt, image, bInvertColor): self.iCount += 1 self.dt = dt if (self.params['gui'][self.name]['track']): if (not self.bValidMask): self.create_mask(image.shape) self.bValidMask = True if (self.params['gui'][self.name]['subtract_bg']): self.update_background() self.update_roi(image, bInvertColor) self.windowFG.set_image(self.imgRoiFg) # Apply the mask. if (self.imgRoiFg is not None) and (self.mask.img is not None): self.imgRoiFgMasked = cv2.bitwise_and(self.imgRoiFg.astype(self.mask.img.dtype), self.mask.img) self.imgFinal = self.imgRoiFgMasked else: self.imgFinal = None # hit_object() # Get the UI object, if any, that the mouse is on. def hit_object(self, ptMouse): tag = None # Check for handle hits. if (self.params['gui'][self.name]['track']): for tagHandle,handle in self.handles.iteritems(): if (handle.hit_test(ptMouse)): tag = tagHandle break else: tagHandle,handle = ('hinge',self.handles['hinge']) if (handle.hit_test(ptMouse)): tag = tagHandle return (self.name, tag) def draw_handles(self, image): # Draw all handle points, or only just the hinge handle. if (self.params['gui'][self.name]['track']): for tagHandle,handle in self.handles.iteritems(): handle.draw(image) else: tagHandle,handle = ('hinge',self.handles['hinge']) handle.draw(image) # draw() # Draw the outline. # def draw(self, image): self.draw_handles(image) if (self.params['gui'][self.name]['track']): x = int(self.params['gui'][self.name]['hinge']['x']) y = int(self.params['gui'][self.name]['hinge']['y']) radius_outer = int(self.params['gui'][self.name]['radius_outer']) radius_inner = int(self.params['gui'][self.name]['radius_inner']) radius_mid = int((self.params['gui'][self.name]['radius_outer']+self.params['gui'][self.name]['radius_inner'])/2.0) # if () # angle1 = self.angle_lo_i # angle2 = self.angle_hi_i # Draw the mid arc. cv2.ellipse(image, (x, y), (radius_mid, radius_mid), np.rad2deg(0.0), np.rad2deg(self.angle_hi_i), np.rad2deg(self.angle_lo_i), self.bgra_dim, 1) # Draw the outer arc. cv2.ellipse(image, (x, y), (radius_outer, radius_outer), np.rad2deg(0.0), np.rad2deg(self.angle_hi_i), np.rad2deg(self.angle_lo_i), self.bgra_dim, 1) # Draw the inner arc. cv2.ellipse(image, (x, y), (radius_inner, radius_inner), np.rad2deg(0.0), np.rad2deg(self.angle_hi_i), np.rad2deg(self.angle_lo_i), self.bgra_dim, 1) # Draw wedge lines. cv2.line(image, self.ptWedgeHi_inner, self.ptWedgeHi_outer, self.bgra_dim, 1) cv2.line(image, self.ptWedgeLo_inner, self.ptWedgeLo_outer, self.bgra_dim, 1) # Show the extra windows. self.windowBG.show() self.windowFG.show() self.windowMask.show()
class TipTracker(MotionTrackedBodypartPolar): def __init__(self, name=None, params={}, color='white', bEqualizeHist=False): MotionTrackedBodypartPolar.__init__(self, name, params, color, bEqualizeHist) self.name = name self.detector = TipDetector() self.state = MsgState() self.set_params(params) self.windowTip = ImageWindow(False, self.name+'Tip') self.iAngle = 0 self.iRadius = 0 # Services, for live intensities plots via live_wing_histograms.py self.service_trackerdata = rospy.Service('trackerdata_'+self.name, SrvTrackerdata, self.serve_trackerdata_callback) # set_params() # Set the given params dict into this object. # def set_params(self, params): MotionTrackedBodypartPolar.set_params(self, params) self.imgRoiBackground = None self.iCount = 0 self.state.intensity = 0.0 self.state.angles = [] self.state.gradients = [] # Compute the 'handedness' of the head/abdomen and wing/wing axes. self.sense specifies the direction of positive angles. matAxes = np.array([[self.params['gui']['head']['hinge']['x']-self.params['gui']['abdomen']['hinge']['x'], self.params['gui']['head']['hinge']['y']-self.params['gui']['abdomen']['hinge']['y']], [self.params['gui']['right']['hinge']['x']-self.params['gui']['left']['hinge']['x'], self.params['gui']['right']['hinge']['y']-self.params['gui']['left']['hinge']['y']]]) if (self.name in ['left','right']): self.senseAxes = np.sign(np.linalg.det(matAxes)) a = -1 if (self.name=='right') else 1 self.sense = a*self.senseAxes else: self.sense = 1 self.detector.set_params(params[self.name]['threshold'], self.sense) # update_state() # def update_state(self): imgNow = self.imgRoiFgMaskedPolarCropped if (imgNow is not None): # Pixel position and strength of the edges. (self.iAngle, self.iRadius) = self.detector.detect(imgNow) if (self.iAngle is not None) and (self.iRadius is not None): # Convert pixel to angle units, and put angle into the wing frame. anglePerPixel = (self.params['gui'][self.name]['angle_hi']-self.params['gui'][self.name]['angle_lo']) / float(imgNow.shape[1]) angle_b = self.params['gui'][self.name]['angle_lo'] + self.iAngle * anglePerPixel angle_p = (self.transform_angle_p_from_b(angle_b) + np.pi) % (2*np.pi) - np.pi self.state.angles = [angle_p] self.state.gradients = self.detector.diffs radius = self.params['gui'][self.name]['radius_inner'] + self.iRadius self.state.radii = [radius] self.state.intensity = np.mean(imgNow)/255.0 else: self.state.angles = [] self.state.radii = [] self.state.gradients = self.detector.diffs self.state.intensity = np.mean(imgNow)/255.0 # update() # Update all the internals given a foreground camera image. # def update(self, dt, image, bInvertColor): MotionTrackedBodypartPolar.update(self, dt, image, bInvertColor) if (self.params['gui'][self.name]['track']): self.update_state() self.windowTip.set_image(self.imgRoiFgMaskedPolarCropped) # draw() # Draw the state. # def draw(self, image): MotionTrackedBodypartPolar.draw(self, image) if (self.params['gui'][self.name]['track']) and (len(self.state.angles)>0): # Draw the bodypart state position. angle = self.state.angles[0] * self.sense pt = self.R.dot([self.state.radii[0]*np.cos(angle), self.state.radii[0]*np.sin(angle)]) ptState_i = imageprocessing.clip_pt((int(pt[0]+self.params['gui'][self.name]['hinge']['x']), int(pt[1]+self.params['gui'][self.name]['hinge']['y'])), image.shape) cv2.ellipse(image, ptState_i, (2,2), 0, 0, 360, self.bgra_state, 1) # Draw line from hinge to state point. ptHinge_i = imageprocessing.clip_pt((int(self.ptHinge_i[0]), int(self.ptHinge_i[1])), image.shape) cv2.line(image, ptHinge_i, ptState_i, self.bgra_state, 1) self.windowTip.show() def serve_trackerdata_callback(self, request): title1 = 'Intensities' title2 = 'Diffs' intensities = list(self.detector.intensities) diffs = list(np.abs(self.detector.diffs)) abscissa = range(len(intensities)) #range(self.params['gui'][self.name]['radius_inner'], self.params['gui'][self.name]['radius_inner'] + len(intensities)) markersH = [self.iRadius] #self.state.radii markersV = [self.params[self.name]['threshold']] return SrvTrackerdataResponse(self.color, title1, title2, abscissa, intensities, diffs, markersH, markersV)
class MotionTrackedBodypartPolar(MotionTrackedBodypart): def __init__(self, name=None, params={}, color='white', bEqualizeHist=False): MotionTrackedBodypart.__init__(self, name, params, color, bEqualizeHist) self.polartransforms = imageprocessing.PolarTransforms() self.create_wfn = imageprocessing.WindowFunctions().create_tukey self.wfnRoiMaskedPolarCropped = None self.imgRoiFgMaskedPolar = None self.imgRoiFgMaskedPolarCropped = None self.imgRoiFgMaskedPolarCroppedWindowed = None self.imgFinal = None # The image to use for tracking. self.imgHeadroomPolar = None self.windowPolar = ImageWindow(False, self.name+'Polar') def set_params(self, params): MotionTrackedBodypart.set_params(self, params) self.windowPolar.set_enable(self.params['gui']['windows'] and self.params['gui'][self.name]['track']) def create_mask(self, shape): MotionTrackedBodypart.create_mask(self, shape) self.wfnRoiMaskedPolarCropped = None if (self.mask.xMin is not None): self.i_0 = self.params['gui'][self.name]['hinge']['y'] - self.roi[1] self.j_0 = self.params['gui'][self.name]['hinge']['x'] - self.roi[0] # Args for the ellipse calls. x = int(self.params['gui'][self.name]['hinge']['x']) y = int(self.params['gui'][self.name]['hinge']['y']) r_outer = int(np.ceil(self.params['gui'][self.name]['radius_outer'])) r_inner = int(np.floor(self.params['gui'][self.name]['radius_inner']))-1 hi = int(np.ceil(np.rad2deg(self.angle_hi_i))) lo = int(np.floor(np.rad2deg(self.angle_lo_i))) # Find where the mask might be clipped. First, draw an unclipped ellipse. delta = 1 ptsUnclipped = cv2.ellipse2Poly((x, y), (r_outer, r_outer), 0, hi, lo, delta) ptsUnclipped = np.append(ptsUnclipped, cv2.ellipse2Poly((x, y), (r_inner, r_inner), 0, hi, lo, delta), 0) #ptsUnclipped = np.append(ptsUnclipped, [list of line1 pixels connecting the two arcs], 0) #ptsUnclipped = np.append(ptsUnclipped, [list of line2 pixels connecting the two arcs], 0) # These are the unclipped locations. minUnclipped = ptsUnclipped.min(0) maxUnclipped = ptsUnclipped.max(0) xMinUnclipped = minUnclipped[0] yMinUnclipped = minUnclipped[1] xMaxUnclipped = maxUnclipped[0]+1 yMaxUnclipped = maxUnclipped[1]+1 # Compare unclipped with the as-drawn locations. xClip0 = self.mask.xMin - xMinUnclipped yClip0 = self.mask.yMin - yMinUnclipped xClip1 = xMaxUnclipped - self.mask.xMax yClip1 = yMaxUnclipped - self.mask.yMax roiClipped = np.array([xClip0, yClip0, xClip1, yClip1]) (i_n, j_n) = shape[:2] # Determine how much of the bottom of the polar image to trim off (i.e. rClip) based on if the ellipse is partially offimage. (rClip0, rClip1, rClip2, rClip3) = (1.0, 1.0, 1.0, 1.0) if (roiClipped[0]>0): # Left rClip0 = 1.0 - (float(roiClipped[0])/float(r_outer-r_inner))#self.j_0)) if (roiClipped[1]>0): # Top rClip1 = 1.0 - (float(roiClipped[1])/float(r_outer-r_inner))#self.i_0)) if (roiClipped[2]>0): # Right rClip2 = 1.0 - (float(roiClipped[2])/float(r_outer-r_inner))#j_n-self.j_0)) if (roiClipped[3]>0): # Bottom rClip3 = 1.0 - (float(roiClipped[3])/float(r_outer-r_inner))#i_n-self.i_0)) self.rClip = np.min([rClip0, rClip1, rClip2, rClip3]) # End create_mask() def update_polarimage(self): if (self.imgRoiFgMasked is not None): theta_0a = self.angle_lo_i - self.angleBodypart_i theta_1a = self.angle_hi_i - self.angleBodypart_i radius_mid = (self.params['gui'][self.name]['radius_outer'] + self.params['gui'][self.name]['radius_inner'])/2.0 dr = (self.params['gui'][self.name]['radius_outer'] - self.params['gui'][self.name]['radius_inner'])/2.0 self.imgRoiFgMasked[self.imgRoiFgMasked==255] = 254 # Make room the for +1, next. try: self.imgRoiFgMaskedPolar = self.polartransforms.transform_polar_elliptical(self.imgRoiFgMasked+1, # +1 so we find cropped pixels, next, rather than merely black pixels. self.i_0, self.j_0, raxial=radius_mid, rortho=radius_mid, dradiusStrip=int(dr), amplifyRho = 1.0, rClip = self.rClip, angleEllipse=self.angleBodypart_i, theta_0 = min(theta_0a,theta_1a), theta_1 = max(theta_0a,theta_1a), amplifyTheta = 1.0) except imageprocessing.TransformException: rospy.logwarn('%s: Mask is outside the image, no points transformed.' % self.name) # Find the y value where the black band should be cropped out (but leave at least one raster if image is all-black). sumY = np.sum(self.imgRoiFgMaskedPolar,1) iSumY = np.where(sumY==0)[0] if (len(iSumY)>0): iMinY = np.max([1,np.min(iSumY)]) else: iMinY = self.imgRoiFgMaskedPolar.shape[0] # Crop the bottom of the images. self.imgRoiFgMaskedPolarCropped = self.imgRoiFgMaskedPolar[0:iMinY] # Push each pixel toward the column mean, depending on the amount of headroom. if (self.imgHeadroom is not None) and (self.params['gui'][self.name]['subtract_bg']) and (self.params[self.name]['saturation_correction']): try: self.imgHeadroomPolar = self.polartransforms.transform_polar_elliptical(self.imgHeadroom, self.i_0, self.j_0, raxial=radius_mid, rortho=radius_mid, dradiusStrip=int(dr), amplifyRho = 1.0, rClip = self.rClip, angleEllipse=self.angleBodypart_i, theta_0 = min(theta_0a,theta_1a), theta_1 = max(theta_0a,theta_1a), amplifyTheta = 1.0) except imageprocessing.TransformException: pass self.imgHeadroomPolarCropped = self.imgHeadroomPolar[0:iMinY] # Perform the correction. TH = self.imgHeadroomPolarCropped / 255.0 M = np.mean(self.imgRoiFgMaskedPolarCropped, 0).astype(np.float32) TF = self.imgRoiFgMaskedPolarCropped.astype(np.float32) self.imgRoiFgMaskedPolarCropped = M + cv2.multiply(TH, TF-M) if (self.bValidMask): if (self.wfnRoiMaskedPolarCropped is None) or (self.imgRoiFgMaskedPolarCropped.shape != self.wfnRoiMaskedPolarCropped.shape): self.wfnRoiMaskedPolarCropped = self.create_wfn(self.imgRoiFgMaskedPolarCropped.shape, self.params[self.name]['feathering']) self.imgRoiFgMaskedPolarCroppedWindowed = cv2.multiply(self.imgRoiFgMaskedPolarCropped.astype(np.float32), self.wfnRoiMaskedPolarCropped) # Show the image. img = self.imgRoiFgMaskedPolarCroppedWindowed #img = self.imgRoiFgMaskedPolarCropped self.windowPolar.set_image(img) # End update_polarimage() def update(self, dt, image, bInvertColor): MotionTrackedBodypart.update(self, dt, image, bInvertColor) if (self.params['gui'][self.name]['track']): self.update_polarimage() self.imgFinal = self.imgRoiFgMaskedPolarCroppedWindowed def draw(self, image): MotionTrackedBodypart.draw(self, image) self.windowPolar.show()
class EdgeTrackerByHoughTransform(MotionTrackedBodypart): def __init__(self, name=None, params={}, color='white', bEqualizeHist=False): MotionTrackedBodypart.__init__(self, name, params, color, bEqualizeHist) self.name = name self.detector = EdgeDetectorByHoughTransform(self.name, params) self.state = MsgState() self.windowEdges = ImageWindow(False, self.name+'Edges') self.set_params(params) # Services, for live intensities plots via live_wing_histograms.py self.service_trackerdata = rospy.Service('trackerdata_'+self.name, SrvTrackerdata, self.serve_trackerdata_callback) # set_params() # Set the given params dict into this object. # def set_params(self, params): MotionTrackedBodypart.set_params(self, params) self.imgRoiBackground = None self.iCount = 0 self.state.intensity = 0.0 self.state.angles = [] self.state.gradients = [] # Compute the 'handedness' of the head/abdomen and wing/wing axes. self.sense specifies the direction of positive angles. matAxes = np.array([[self.params['gui']['head']['hinge']['x']-self.params['gui']['abdomen']['hinge']['x'], self.params['gui']['head']['hinge']['y']-self.params['gui']['abdomen']['hinge']['y']], [self.params['gui']['right']['hinge']['x']-self.params['gui']['left']['hinge']['x'], self.params['gui']['right']['hinge']['y']-self.params['gui']['left']['hinge']['y']]]) if (self.name in ['left','right']): self.senseAxes = np.sign(np.linalg.det(matAxes)) a = -1 if (self.name=='right') else 1 self.sense = a*self.senseAxes else: self.sense = 1 self.windowEdges.set_enable(self.params['gui']['windows'] and self.params['gui'][self.name]['track']) self.bValidDetector = False # update_state() # def update_state(self): imgNow = self.imgRoiFgMasked if (imgNow is not None): # Pixel position and strength of the edges. (angles, magnitudes) = self.detector.detect(imgNow) self.windowEdges.set_image(self.detector.imgMasked) # Put angle into the bodypart frame. self.state.angles = [] self.state.gradients = [] for i in range(len(angles)): angle = angles[i] magnitude = magnitudes[i] angle_b = self.params['gui'][self.name]['angle_lo'] + angle angle_p = (self.transform_angle_p_from_b(angle_b) + np.pi) % (2*np.pi) - np.pi self.state.angles.append(angle) self.state.gradients.append(magnitude) self.state.intensity = np.mean(self.imgRoiFgMasked)/255.0 # update() # Update all the internals given a foreground camera image. # def update(self, dt, image, bInvertColor): MotionTrackedBodypart.update(self, dt, image, bInvertColor) if (self.params['gui'][self.name]['track']): if (not self.bValidDetector): if (self.mask.xMin is not None): self.detector.set_params(self.name, self.params, self.sense, self.angleBodypart_i, self.angleBodypart_b, image.shape, self.mask) self.bValidDetector = True self.update_state() # draw() # Draw the outline. # def draw(self, image): MotionTrackedBodypart.draw(self, image) if (self.params['gui'][self.name]['track']): # Draw the major and minor edges alternately, until the max number has been reached. bgra = self.bgra for i in range(len(self.state.angles)): angle_b = self.transform_angle_b_from_p(self.state.angles[i]) angle_i = self.transform_angle_i_from_b(angle_b) x0 = self.ptHinge_i[0] + self.params['gui'][self.name]['radius_inner'] * np.cos(angle_i) y0 = self.ptHinge_i[1] + self.params['gui'][self.name]['radius_inner'] * np.sin(angle_i) x1 = self.ptHinge_i[0] + self.params['gui'][self.name]['radius_outer'] * np.cos(angle_i) y1 = self.ptHinge_i[1] + self.params['gui'][self.name]['radius_outer'] * np.sin(angle_i) cv2.line(image, (int(x0),int(y0)), (int(x1),int(y1)), bgra, 1) bgra = tuple(0.5*np.array(bgra)) self.windowEdges.show() def serve_trackerdata_callback(self, request): if (len(self.detector.intensities)>0): title1 = 'Intensities' title2 = 'Diffs' diffs = self.detector.diff #np.zeros(len(self.detector.intensities)) abscissa = np.linspace(self.params['gui'][self.name]['angle_lo'], self.params['gui'][self.name]['angle_hi'], len(self.detector.intensities)) abscissa -= self.angleBodypart_b abscissa *= self.sense markersH = self.state.angles markersV = self.state.gradients#[self.params[self.name]['threshold']] rv = SrvTrackerdataResponse(self.color, title1, title2, abscissa, self.detector.intensities, diffs, markersH, markersV) else: rv = SrvTrackerdataResponse() return rv
class IntensityTrackedBodypart(object): def __init__(self, name=None, params={}, color='white', bEqualizeHist=False): self.name = name self.bEqualizeHist = bEqualizeHist self.bValidMask = False self.bgra = ui.bgra_dict[color] self.bgra_dim = tuple(0.5*np.array(ui.bgra_dict[color])) self.bgra_state = ui.bgra_dict[color]#ui.bgra_dict['red'] self.pixelmax = 255.0 self.shape = (np.inf, np.inf) self.ptCenter_i = np.array([0,0]) self.roi = None self.mask = Struct() self.mask.img = None self.mask.sum = 1.0 self.dt = np.inf self.params = {} self.handles = {'center':ui.Handle(np.array([0,0]), self.bgra, name='center'), 'radius1':ui.Handle(np.array([0,0]), self.bgra, name='radius1'), 'radius2':ui.Handle(np.array([0,0]), self.bgra, name='radius2') } # Region of interest images. self.imgFullBackground = None self.imgRoiBackground = None self.imgRoi = None # Untouched roi image. self.imgRoiFg = None # Background subtracted. self.imgRoiFgMasked = None # Extra windows. self.windowBG = ImageWindow(False, self.name+'BG') self.windowFG = ImageWindow(False, self.name+'FG') self.windowMask = ImageWindow(gbShowMasks, self.name+'Mask') # set_params() # Set the given params dict into this object, and cache a few values. # def set_params(self, params): self.params = params self.rc_background = self.params['rc_background'] self.ptCenter_i = np.array([self.params['gui'][self.name]['center']['x'], self.params['gui'][self.name]['center']['y']]) self.cosAngle = np.cos(self.params['gui'][self.name]['angle']) self.sinAngle = np.sin(self.params['gui'][self.name]['angle']) # Turn on/off the extra windows. self.windowBG.set_enable(self.params['gui']['windows'] and self.params['gui'][self.name]['track'] and self.params['gui'][self.name]['subtract_bg']) self.windowFG.set_enable(self.params['gui']['windows'] and self.params['gui'][self.name]['track']) # Refresh the handle points. self.update_handle_points() # create_mask() # Create elliptical wedge masks, and window functions. # def create_mask(self, shape): x = int(self.params['gui'][self.name]['center']['x']) y = int(self.params['gui'][self.name]['center']['y']) r1 = int(self.params['gui'][self.name]['radius1']) r2 = int(self.params['gui'][self.name]['radius2']) angle = self.params['gui'][self.name]['angle'] bgra = ui.bgra_dict['white'] # Create the mask. img = np.zeros(shape, dtype=np.uint8) cv2.ellipse(img, (x, y), (r1, r2), int(np.rad2deg(angle)), 0, 360, bgra, cv.CV_FILLED) self.windowMask.set_image(img) # Find the ROI of the mask. xSum = np.sum(img, 0) ySum = np.sum(img, 1) xMask = np.where(xSum>0)[0] yMask = np.where(ySum>0)[0] if (len(xMask)>0) and (len(yMask)>0): # Dilate with a border. xMin = np.where(xSum>0)[0][0] xMax = np.where(xSum>0)[0][-1] + 1 yMin = np.where(ySum>0)[0][0] yMax = np.where(ySum>0)[0][-1] + 1 # Clip border to image edges. self.mask.xMin = np.max([0,xMin]) self.mask.yMin = np.max([0,yMin]) self.mask.xMax = np.min([xMax, shape[1]-1]) self.mask.yMax = np.min([yMax, shape[0]-1]) self.roi = np.array([self.mask.xMin, self.mask.yMin, self.mask.xMax, self.mask.yMax]) self.mask.img = img[self.mask.yMin:self.mask.yMax, self.mask.xMin:self.mask.xMax] self.mask.sum = np.sum(self.mask.img).astype(np.float32) self.bValidMask = True else: self.mask.xMin = None self.mask.yMin = None self.mask.xMax = None self.mask.yMax = None rospy.logwarn('%s: Empty mask.' % self.name) # set_background() # Set the given image as the background image. # def set_background(self, image): self.imgFullBackground = image.astype(np.float32) self.imgRoiBackground = None # invert_background() # Invert the color of the background image. # def invert_background(self): if (self.imgRoiBackground is not None): self.imgRoiBackground = 255-self.imgRoiBackground def update_background(self): alphaBackground = 1.0 - np.exp(-self.dt / self.rc_background) if (self.imgRoiBackground is not None): if (self.imgRoi is not None): if (self.imgRoiBackground.size==self.imgRoi.size): cv2.accumulateWeighted(self.imgRoi.astype(np.float32), self.imgRoiBackground, alphaBackground) else: self.imgRoiBackground = None self.imgRoi = None else: if (self.imgFullBackground is not None) and (self.roi is not None): self.imgRoiBackground = copy.deepcopy(self.imgFullBackground[self.roi[1]:self.roi[3], self.roi[0]:self.roi[2]]) self.windowBG.set_image(self.imgRoiBackground) def update_roi(self, image, bInvertColor): self.shape = image.shape # Extract the ROI images. if (self.roi is not None): self.imgRoi = copy.deepcopy(image[self.roi[1]:self.roi[3], self.roi[0]:self.roi[2]]) # Background Subtraction. if (bInvertColor): self.imgRoiFg = 255-self.imgRoi else: self.imgRoiFg = self.imgRoi if (self.params['gui'][self.name]['subtract_bg']): if (self.imgRoiBackground is not None): if (self.imgRoiBackground.shape==self.imgRoiFg.shape): if (bInvertColor): self.imgRoiFg = cv2.absdiff(self.imgRoiFg, 255-self.imgRoiBackground.astype(np.uint8)) else: self.imgRoiFg = cv2.absdiff(self.imgRoiFg, self.imgRoiBackground.astype(np.uint8)) # Equalize the brightness/contrast. if (self.bEqualizeHist): if (self.imgRoiFg is not None): self.imgRoiFg -= np.min(self.imgRoiFg) max2 = np.max(self.imgRoiFg) self.imgRoiFg *= (255.0/float(max2)) # Apply the mask. if (self.mask.img is not None): self.imgRoiFgMasked = cv2.bitwise_and(self.imgRoiFg, self.mask.img) self.windowFG.set_image(self.imgRoiFgMasked) # update_handle_points() # Update the dictionary of handle point names and locations. # Compute the various handle points. # def update_handle_points (self): x = self.params['gui'][self.name]['center']['x'] y = self.params['gui'][self.name]['center']['y'] radius1 = self.params['gui'][self.name]['radius1'] radius2 = self.params['gui'][self.name]['radius2'] angle = self.params['gui'][self.name]['angle'] self.handles['center'].pt = np.array([x, y]) self.handles['radius1'].pt = np.array([x, y]) + radius1 * np.array([self.cosAngle, self.sinAngle]) self.handles['radius2'].pt = np.array([x, y]) + radius2 * np.array([self.sinAngle,-self.cosAngle]) # update() # Update all the internals given a foreground camera image. # def update(self, dt, image, bInvertColor): self.iCount += 1 self.dt = dt if (self.params['gui'][self.name]['track']): if (not self.bValidMask): self.create_mask(image.shape) self.bValidMask = True self.update_background() self.update_roi(image, bInvertColor) # hit_object() # Get the UI object, if any, that the mouse is on. def hit_object(self, ptMouse): tag = None # Check for handle hits. if (self.params['gui'][self.name]['track']): for tagHandle,handle in self.handles.iteritems(): if (handle.hit_test(ptMouse)): tag = tagHandle break return (self.name, tag) def draw_handles(self, image): # Draw all handle points, or only just the hinge handle. if (self.params['gui'][self.name]['track']): for tagHandle,handle in self.handles.iteritems(): handle.draw(image) # draw() # Draw the outline. # def draw(self, image): self.draw_handles(image) if (self.params['gui'][self.name]['track']): x = int(self.params['gui'][self.name]['center']['x']) y = int(self.params['gui'][self.name]['center']['y']) radius1 = int(self.params['gui'][self.name]['radius1']) radius2 = int(self.params['gui'][self.name]['radius2']) # Draw the outer arc. cv2.ellipse(image, (x, y), (radius1, radius2), np.rad2deg(self.params['gui'][self.name]['angle']), 0, 360, self.bgra, 1) # Show the extra windows. self.windowBG.show() self.windowFG.show() self.windowMask.show()
def __init__(self, params={}): self.nodename = rospy.get_name().rstrip('/') EdgeTracker = EdgeTrackerByIntensityProfile #EdgeTracker = EdgeTrackerByHoughTransform # Create the body axis tracker. self.axis = AxisTracker(name='axis', params=params, color='yellow') # Create the head tracker. if (params['head']['tracker'] == 'area'): self.head = AreaTracker(name='head', params=params, color='cyan', bEqualizeHist=False) elif (params['head']['tracker'] == 'edge'): self.head = EdgeTracker(name='head', params=params, color='cyan', bEqualizeHist=False) elif (params['head']['tracker'] == 'tip'): self.head = TipTracker(name='head', params=params, color='cyan', bEqualizeHist=False) elif (params['head']['tracker'] == 'intensity'): self.head = IntensityTracker(name='head', params=params, color='cyan', bEqualizeHist=False) else: rospy.logwarn('Head tracker parameter must be one of [' 'area' ', ' 'edge' ', ' 'tip' ', ' 'intensity' ']') # Create the abdomen tracker. if (params['abdomen']['tracker'] == 'area'): self.abdomen = AreaTracker(name='abdomen', params=params, color='magenta', bEqualizeHist=False) elif (params['abdomen']['tracker'] == 'edge'): self.abdomen = EdgeTracker(name='abdomen', params=params, color='magenta', bEqualizeHist=False) elif (params['abdomen']['tracker'] == 'tip'): self.abdomen = TipTracker(name='abdomen', params=params, color='magenta', bEqualizeHist=False) elif (params['abdomen']['tracker'] == 'intensity'): self.abdomen = IntensityTracker(name='abdomen', params=params, color='magenta', bEqualizeHist=False) else: rospy.logwarn('Abdomen tracker parameter must be one of [' 'area' ', ' 'edge' ', ' 'tip' ', ' 'intensity' ']') # Create the right wing tracker. if (params['right']['tracker'] == 'area'): self.right = AreaTracker(name='right', params=params, color='red', bEqualizeHist=False) elif (params['right']['tracker'] == 'edge'): self.right = EdgeTracker(name='right', params=params, color='red', bEqualizeHist=False) elif (params['right']['tracker'] == 'tip'): self.right = TipTracker(name='right', params=params, color='red', bEqualizeHist=False) elif (params['right']['tracker'] == 'intensity'): self.right = IntensityTracker(name='right', params=params, color='red', bEqualizeHist=False) else: rospy.logwarn('Right wing tracker parameter must be one of [' 'area' ', ' 'edge' ', ' 'tip' ', ' 'intensity' ']') # Create the left wing tracker. if (params['left']['tracker'] == 'area'): self.left = AreaTracker(name='left', params=params, color='green', bEqualizeHist=False) elif (params['left']['tracker'] == 'edge'): self.left = EdgeTracker(name='left', params=params, color='green', bEqualizeHist=False) elif (params['left']['tracker'] == 'tip'): self.left = TipTracker(name='left', params=params, color='green', bEqualizeHist=False) elif (params['left']['tracker'] == 'intensity'): self.left = IntensityTracker(name='left', params=params, color='green', bEqualizeHist=False) else: rospy.logwarn('Left wing tracker parameter must be one of [' 'area' ', ' 'edge' ', ' 'tip' ', ' 'intensity' ']') # Create the aux tracker. self.aux = IntensityTracker(name='aux', params=params, color='yellow', bEqualizeHist=False) self.windowInvertColorArea = ImageWindow(False, 'InvertColorArea') self.bgra_body = ui.bgra_dict['light_gray'] self.ptBodyIndicator1 = None self.ptBodyIndicator2 = None self.bInvertColor = False self.bInvertColorAuto = True self.iCount = 0 self.stampPrev = None self.stampPrevAlt = None self.stamp = rospy.Time(0) self.pubFlystate = rospy.Publisher(self.nodename + '/flystate', MsgFlystate, queue_size=100)
class IntensityTrackedBodypart(object): def __init__(self, name=None, params={}, color='white', bEqualizeHist=False): self.name = name self.bEqualizeHist = bEqualizeHist self.bValidMask = False self.bgra = ui.bgra_dict[color] self.bgra_dim = tuple(0.5 * np.array(ui.bgra_dict[color])) self.bgra_state = ui.bgra_dict[color] #ui.bgra_dict['red'] self.pixelmax = 255.0 self.shape = (np.inf, np.inf) self.ptCenter_i = np.array([0, 0]) self.roi = None self.mask = Struct() self.mask.img = None self.mask.sum = 1.0 self.dt = np.inf self.params = {} self.handles = { 'center': ui.Handle(np.array([0, 0]), self.bgra, name='center'), 'radius1': ui.Handle(np.array([0, 0]), self.bgra, name='radius1'), 'radius2': ui.Handle(np.array([0, 0]), self.bgra, name='radius2') } # Region of interest images. self.imgFullBackground = None self.imgRoiBackground = None self.imgRoi = None # Untouched roi image. self.imgRoiFg = None # Background subtracted. self.imgRoiFgMasked = None # Extra windows. self.windowBG = ImageWindow(False, self.name + 'BG') self.windowFG = ImageWindow(False, self.name + 'FG') self.windowMask = ImageWindow(gbShowMasks, self.name + 'Mask') # set_params() # Set the given params dict into this object, and cache a few values. # def set_params(self, params): self.params = params self.rc_background = self.params['rc_background'] self.ptCenter_i = np.array([ self.params['gui'][self.name]['center']['x'], self.params['gui'][self.name]['center']['y'] ]) self.cosAngle = np.cos(self.params['gui'][self.name]['angle']) self.sinAngle = np.sin(self.params['gui'][self.name]['angle']) # Turn on/off the extra windows. self.windowBG.set_enable( self.params['gui']['windows'] and self.params['gui'][self.name]['track'] and self.params['gui'][self.name]['subtract_bg']) self.windowFG.set_enable(self.params['gui']['windows'] and self.params['gui'][self.name]['track']) # Refresh the handle points. self.update_handle_points() # create_mask() # Create elliptical wedge masks, and window functions. # def create_mask(self, shape): x = int(self.params['gui'][self.name]['center']['x']) y = int(self.params['gui'][self.name]['center']['y']) r1 = int(self.params['gui'][self.name]['radius1']) r2 = int(self.params['gui'][self.name]['radius2']) angle = self.params['gui'][self.name]['angle'] bgra = ui.bgra_dict['white'] # Create the mask. img = np.zeros(shape, dtype=np.uint8) cv2.ellipse(img, (x, y), (r1, r2), int(np.rad2deg(angle)), 0, 360, bgra, cv.CV_FILLED) self.windowMask.set_image(img) # Find the ROI of the mask. xSum = np.sum(img, 0) ySum = np.sum(img, 1) xMask = np.where(xSum > 0)[0] yMask = np.where(ySum > 0)[0] if (len(xMask) > 0) and (len(yMask) > 0): # Dilate with a border. xMin = np.where(xSum > 0)[0][0] xMax = np.where(xSum > 0)[0][-1] + 1 yMin = np.where(ySum > 0)[0][0] yMax = np.where(ySum > 0)[0][-1] + 1 # Clip border to image edges. self.mask.xMin = np.max([0, xMin]) self.mask.yMin = np.max([0, yMin]) self.mask.xMax = np.min([xMax, shape[1] - 1]) self.mask.yMax = np.min([yMax, shape[0] - 1]) self.roi = np.array([ self.mask.xMin, self.mask.yMin, self.mask.xMax, self.mask.yMax ]) self.mask.img = img[self.mask.yMin:self.mask.yMax, self.mask.xMin:self.mask.xMax] self.mask.sum = np.sum(self.mask.img).astype(np.float32) self.bValidMask = True else: self.mask.xMin = None self.mask.yMin = None self.mask.xMax = None self.mask.yMax = None rospy.logwarn('%s: Empty mask.' % self.name) # set_background() # Set the given image as the background image. # def set_background(self, image): self.imgFullBackground = image.astype(np.float32) self.imgRoiBackground = None # invert_background() # Invert the color of the background image. # def invert_background(self): if (self.imgRoiBackground is not None): self.imgRoiBackground = 255 - self.imgRoiBackground def update_background(self): alphaBackground = 1.0 - np.exp(-self.dt / self.rc_background) if (self.imgRoiBackground is not None): if (self.imgRoi is not None): if (self.imgRoiBackground.size == self.imgRoi.size): cv2.accumulateWeighted(self.imgRoi.astype(np.float32), self.imgRoiBackground, alphaBackground) else: self.imgRoiBackground = None self.imgRoi = None else: if (self.imgFullBackground is not None) and (self.roi is not None): self.imgRoiBackground = copy.deepcopy( self.imgFullBackground[self.roi[1]:self.roi[3], self.roi[0]:self.roi[2]]) self.windowBG.set_image(self.imgRoiBackground) def update_roi(self, image, bInvertColor): self.shape = image.shape # Extract the ROI images. if (self.roi is not None): self.imgRoi = copy.deepcopy(image[self.roi[1]:self.roi[3], self.roi[0]:self.roi[2]]) # Background Subtraction. if (bInvertColor): self.imgRoiFg = 255 - self.imgRoi else: self.imgRoiFg = self.imgRoi if (self.params['gui'][self.name]['subtract_bg']): if (self.imgRoiBackground is not None): if (self.imgRoiBackground.shape == self.imgRoiFg.shape): if (bInvertColor): self.imgRoiFg = cv2.absdiff( self.imgRoiFg, 255 - self.imgRoiBackground.astype(np.uint8)) else: self.imgRoiFg = cv2.absdiff( self.imgRoiFg, self.imgRoiBackground.astype(np.uint8)) # Equalize the brightness/contrast. if (self.bEqualizeHist): if (self.imgRoiFg is not None): self.imgRoiFg -= np.min(self.imgRoiFg) max2 = np.max(self.imgRoiFg) self.imgRoiFg *= (255.0 / float(max2)) # Apply the mask. if (self.mask.img is not None): self.imgRoiFgMasked = cv2.bitwise_and(self.imgRoiFg, self.mask.img) self.windowFG.set_image(self.imgRoiFgMasked) # update_handle_points() # Update the dictionary of handle point names and locations. # Compute the various handle points. # def update_handle_points(self): x = self.params['gui'][self.name]['center']['x'] y = self.params['gui'][self.name]['center']['y'] radius1 = self.params['gui'][self.name]['radius1'] radius2 = self.params['gui'][self.name]['radius2'] angle = self.params['gui'][self.name]['angle'] self.handles['center'].pt = np.array([x, y]) self.handles['radius1'].pt = np.array( [x, y]) + radius1 * np.array([self.cosAngle, self.sinAngle]) self.handles['radius2'].pt = np.array( [x, y]) + radius2 * np.array([self.sinAngle, -self.cosAngle]) # update() # Update all the internals given a foreground camera image. # def update(self, dt, image, bInvertColor): self.iCount += 1 self.dt = dt if (self.params['gui'][self.name]['track']): if (not self.bValidMask): self.create_mask(image.shape) self.bValidMask = True self.update_background() self.update_roi(image, bInvertColor) # hit_object() # Get the UI object, if any, that the mouse is on. def hit_object(self, ptMouse): tag = None # Check for handle hits. if (self.params['gui'][self.name]['track']): for tagHandle, handle in self.handles.iteritems(): if (handle.hit_test(ptMouse)): tag = tagHandle break return (self.name, tag) def draw_handles(self, image): # Draw all handle points, or only just the hinge handle. if (self.params['gui'][self.name]['track']): for tagHandle, handle in self.handles.iteritems(): handle.draw(image) # draw() # Draw the outline. # def draw(self, image): self.draw_handles(image) if (self.params['gui'][self.name]['track']): x = int(self.params['gui'][self.name]['center']['x']) y = int(self.params['gui'][self.name]['center']['y']) radius1 = int(self.params['gui'][self.name]['radius1']) radius2 = int(self.params['gui'][self.name]['radius2']) # Draw the outer arc. cv2.ellipse(image, (x, y), (radius1, radius2), np.rad2deg(self.params['gui'][self.name]['angle']), 0, 360, self.bgra, 1) # Show the extra windows. self.windowBG.show() self.windowFG.show() self.windowMask.show()
class Fly(object): def __init__(self, params={}): self.nodename = rospy.get_name().rstrip('/') EdgeTracker = EdgeTrackerByIntensityProfile #EdgeTracker = EdgeTrackerByHoughTransform # Create the body axis tracker. self.axis = AxisTracker(name='axis', params=params, color='yellow') # Create the head tracker. if (params['head']['tracker'] == 'area'): self.head = AreaTracker(name='head', params=params, color='cyan', bEqualizeHist=False) elif (params['head']['tracker'] == 'edge'): self.head = EdgeTracker(name='head', params=params, color='cyan', bEqualizeHist=False) elif (params['head']['tracker'] == 'tip'): self.head = TipTracker(name='head', params=params, color='cyan', bEqualizeHist=False) elif (params['head']['tracker'] == 'intensity'): self.head = IntensityTracker(name='head', params=params, color='cyan', bEqualizeHist=False) else: rospy.logwarn('Head tracker parameter must be one of [' 'area' ', ' 'edge' ', ' 'tip' ', ' 'intensity' ']') # Create the abdomen tracker. if (params['abdomen']['tracker'] == 'area'): self.abdomen = AreaTracker(name='abdomen', params=params, color='magenta', bEqualizeHist=False) elif (params['abdomen']['tracker'] == 'edge'): self.abdomen = EdgeTracker(name='abdomen', params=params, color='magenta', bEqualizeHist=False) elif (params['abdomen']['tracker'] == 'tip'): self.abdomen = TipTracker(name='abdomen', params=params, color='magenta', bEqualizeHist=False) elif (params['abdomen']['tracker'] == 'intensity'): self.abdomen = IntensityTracker(name='abdomen', params=params, color='magenta', bEqualizeHist=False) else: rospy.logwarn('Abdomen tracker parameter must be one of [' 'area' ', ' 'edge' ', ' 'tip' ', ' 'intensity' ']') # Create the right wing tracker. if (params['right']['tracker'] == 'area'): self.right = AreaTracker(name='right', params=params, color='red', bEqualizeHist=False) elif (params['right']['tracker'] == 'edge'): self.right = EdgeTracker(name='right', params=params, color='red', bEqualizeHist=False) elif (params['right']['tracker'] == 'tip'): self.right = TipTracker(name='right', params=params, color='red', bEqualizeHist=False) elif (params['right']['tracker'] == 'intensity'): self.right = IntensityTracker(name='right', params=params, color='red', bEqualizeHist=False) else: rospy.logwarn('Right wing tracker parameter must be one of [' 'area' ', ' 'edge' ', ' 'tip' ', ' 'intensity' ']') # Create the left wing tracker. if (params['left']['tracker'] == 'area'): self.left = AreaTracker(name='left', params=params, color='green', bEqualizeHist=False) elif (params['left']['tracker'] == 'edge'): self.left = EdgeTracker(name='left', params=params, color='green', bEqualizeHist=False) elif (params['left']['tracker'] == 'tip'): self.left = TipTracker(name='left', params=params, color='green', bEqualizeHist=False) elif (params['left']['tracker'] == 'intensity'): self.left = IntensityTracker(name='left', params=params, color='green', bEqualizeHist=False) else: rospy.logwarn('Left wing tracker parameter must be one of [' 'area' ', ' 'edge' ', ' 'tip' ', ' 'intensity' ']') # Create the aux tracker. self.aux = IntensityTracker(name='aux', params=params, color='yellow', bEqualizeHist=False) self.windowInvertColorArea = ImageWindow(False, 'InvertColorArea') self.bgra_body = ui.bgra_dict['light_gray'] self.ptBodyIndicator1 = None self.ptBodyIndicator2 = None self.bInvertColor = False self.bInvertColorAuto = True self.iCount = 0 self.stampPrev = None self.stampPrevAlt = None self.stamp = rospy.Time(0) self.pubFlystate = rospy.Publisher(self.nodename + '/flystate', MsgFlystate, queue_size=100) def set_params(self, params): self.params = params self.axis.set_params(params) self.head.set_params(params) self.abdomen.set_params(params) self.left.set_params(params) self.right.set_params(params) self.aux.set_params(params) pt1 = [ params['gui']['head']['hinge']['x'], params['gui']['head']['hinge']['y'] ] pt2 = [ params['gui']['abdomen']['hinge']['x'], params['gui']['abdomen']['hinge']['y'] ] pt3 = [ params['gui']['left']['hinge']['x'], params['gui']['left']['hinge']['y'] ] pt4 = [ params['gui']['right']['hinge']['x'], params['gui']['right']['hinge']['y'] ] self.ptBodyCenter_i = imageprocessing.get_intersection( pt1, pt2, pt3, pt4) r = max(params['gui']['left']['radius_outer'], params['gui']['right']['radius_outer']) self.angleBody_i = self.get_bodyangle_i() self.ptBodyIndicator1 = tuple( (self.ptBodyCenter_i + r * np.array([np.cos(self.angleBody_i), np.sin(self.angleBody_i)])).astype(int)) self.ptBodyIndicator2 = tuple( (self.ptBodyCenter_i - r * np.array([np.cos(self.angleBody_i), np.sin(self.angleBody_i)])).astype(int)) # Radius of an area approximately where the thorax would be. self.rInvertColorArea = np.linalg.norm( np.array([ params['gui']['head']['hinge']['x'], params['gui']['head'] ['hinge']['y'] ]) - np.array([ params['gui']['abdomen']['hinge']['x'], params['gui'] ['abdomen']['hinge']['y'] ])) / 2.0 self.bInvertColorValid = False def create_masks(self, shapeImage): if (self.params['gui']['axis']['track']): if (not self.axis.bValidMask): self.axis.create_mask(shapeImage) self.axis.bValidMask = True if (self.params['gui']['head']['track']): if (not self.head.bValidMask): self.head.create_mask(shapeImage) self.head.bValidMask = True if (self.params['gui']['abdomen']['track']): if (not self.abdomen.bValidMask): self.abdomen.create_mask(shapeImage) self.abdomen.bValidMask = True if (self.params['gui']['right']['track']): if (not self.right.bValidMask): self.right.create_mask(shapeImage) self.right.bValidMask = True if (self.params['gui']['left']['track']): if (not self.left.bValidMask): self.left.create_mask(shapeImage) self.left.bValidMask = True if (self.params['gui']['aux']['track']): if (not self.aux.bValidMask): self.aux.create_mask(shapeImage) self.aux.bValidMask = True def get_bodyangle_i(self): angle_i = imageprocessing.get_angle_from_points_i( self.abdomen.ptHinge_i, self.head.ptHinge_i) #angleBody_i = (angle_i + np.pi) % (2.0*np.pi) - np.pi angleBody_i = angle_i return angleBody_i # Calculate what we think the bInvertColor flag should be to make white-on-black. def get_invertcolor(self, image): # Get a roi around the body center. xMin = int( max(0, self.ptBodyCenter_i[0] - int(0.75 * self.rInvertColorArea))) yMin = int( max(0, self.ptBodyCenter_i[1] - int(0.75 * self.rInvertColorArea))) xMax = int( min(self.ptBodyCenter_i[0] + int(0.75 * self.rInvertColorArea), image.shape[1] - 1)) yMax = int( min(self.ptBodyCenter_i[1] + int(0.75 * self.rInvertColorArea), image.shape[0] - 1)) imgInvertColorArea = image[yMin:yMax, xMin:xMax] self.windowInvertColorArea.set_image(imgInvertColorArea) # Midpoint between darkest & lightest colors. threshold = np.mean(image) #rospy.logwarn((np.min(image), np.median(image), np.mean(image), np.max(image), np.mean(imgInvertColorArea))) # If the roi is too dark, then set bInvertColor. if (np.mean(imgInvertColorArea) <= threshold): bInvertColor = True else: bInvertColor = False return bInvertColor def set_background(self, image): self.head.set_background(image) self.abdomen.set_background(image) self.left.set_background(image) self.right.set_background(image) self.aux.set_background(image) self.axis.set_background(image) def update_handle_points(self): self.head.update_handle_points() self.abdomen.update_handle_points() self.left.update_handle_points() self.right.update_handle_points() self.aux.update_handle_points() self.axis.update_handle_points() def update(self, header=None, image=None): if (image is not None): self.header = header if (not self.bInvertColorValid) and (self.bInvertColorAuto): self.bInvertColor = self.get_invertcolor(image) self.bInvertColorValid = True # Get the dt. Keep track of both the camera timestamp, and the now() timestamp, # and use the now() timestamp only if the camera timestamp isn't changing. self.stamp = self.header.stamp stampAlt = rospy.Time.now() if (self.stampPrev is not None): dt = max(0.0, (self.header.stamp - self.stampPrev).to_sec()) # If the camera is not giving good timestamps, then use our own clock. if (dt == 0.0): dt = max(0.0, (stampAlt - self.stampPrevAlt).to_sec()) self.stamp = stampAlt # If time wrapped, then just assume a value. if (dt == 0.0): dt = 1.0 else: dt = np.inf self.stampPrev = self.header.stamp self.stampPrevAlt = stampAlt self.head.update(dt, image, self.bInvertColor) self.abdomen.update(dt, image, self.bInvertColor) self.left.update(dt, image, self.bInvertColor) self.right.update(dt, image, self.bInvertColor) self.aux.update(dt, image, self.bInvertColor) self.axis.update(dt, image, self.bInvertColor) def draw(self, image): # Draw line to indicate the body axis. cv2.line(image, self.ptBodyIndicator1, self.ptBodyIndicator2, self.bgra_body, 1) # Draw a line longer than just head-to-abdomen. self.axis.draw(image) self.head.draw(image) self.abdomen.draw(image) self.left.draw(image) self.right.draw(image) self.aux.draw(image) self.windowInvertColorArea.show() def publish(self): flystate = MsgFlystate() flystate.header = Header(seq=self.iCount, stamp=self.stamp, frame_id='Fly') if (self.params['gui']['head']['track']): flystate.head = self.head.state else: flystate.head = MsgState() if (self.params['gui']['abdomen']['track']): flystate.abdomen = self.abdomen.state else: flystate.abdomen = MsgState() if (self.params['gui']['left']['track']): flystate.left = self.left.state else: flystate.left = MsgState() if (self.params['gui']['right']['track']): flystate.right = self.right.state else: flystate.right = MsgState() if (self.params['gui']['aux']['track']): flystate.aux = self.aux.state else: flystate.aux = MsgState() self.iCount += 1 self.pubFlystate.publish(flystate)
class AreaTracker(MotionTrackedBodypartPolar): def __init__(self, name=None, params={}, color='white', bEqualizeHist=False): MotionTrackedBodypartPolar.__init__(self, name, params, color, bEqualizeHist) self.phasecorr = imageprocessing.PhaseCorrelation() self.state = MsgState() # In the bodypart frame. self.stateOrigin_p = MsgState() # In the bodypart frame. self.stateLo_p = MsgState() # In the bodypart frame. self.stateHi_p = MsgState() # In the bodypart frame. self.imgComparison = None self.windowStabilized = ImageWindow(False, self.name + 'Stable') self.windowComparison = ImageWindow(True, self.name + 'Comparison') self.set_params(params) # set_params() # Set the given params dict into this object. # def set_params(self, params): MotionTrackedBodypartPolar.set_params(self, params) self.imgRoiBackground = None self.imgComparison = None self.windowComparison.set_image(self.imgComparison) self.iCount = 0 # Compute the 'handedness' of the head/abdomen and wing/wing axes. self.sense specifies the direction of positive angles. matAxes = np.array([[ self.params['gui']['head']['hinge']['x'] - self.params['gui']['abdomen']['hinge']['x'], self.params['gui']['head']['hinge']['y'] - self.params['gui']['abdomen']['hinge']['y'] ], [ self.params['gui']['right']['hinge']['x'] - self.params['gui']['left']['hinge']['x'], self.params['gui']['right']['hinge']['y'] - self.params['gui']['left']['hinge']['y'] ]]) if (self.name in ['left', 'right']): self.senseAxes = np.sign(np.linalg.det(matAxes)) a = -1 if (self.name == 'right') else 1 self.sense = a * self.senseAxes else: self.sense = 1 self.stateOrigin_p.intensity = 0.0 self.stateOrigin_p.angles = [ self.transform_angle_p_from_b( (self.params['gui'][self.name]['angle_hi'] + self.params['gui'][self.name]['angle_lo']) / 2.0) ] # In the bodypart frame. self.stateOrigin_p.angles[0] = ( (self.stateOrigin_p.angles[0] + np.pi) % (2 * np.pi)) - np.pi self.stateOrigin_p.radii = [ (self.params['gui'][self.name]['radius_outer'] + self.params['gui'][self.name]['radius_inner']) / 2.0 ] self.state.intensity = 0.0 self.state.angles = [0.0] self.state.radii = [0.0] self.stateLo_p.intensity = np.inf self.stateLo_p.angles = [4.0 * np.pi] self.stateLo_p.radii = [np.inf] self.stateHi_p.intensity = -np.inf self.stateHi_p.angles = [-4.0 * np.pi] self.stateHi_p.radii = [-np.inf] self.windowStabilized.set_enable( self.params['gui']['python'] and self.params['gui'][self.name]['track'] and self.params['gui'][self.name]['stabilize']) self.windowComparison.set_enable( self.params['gui']['python'] and self.params['gui'][self.name]['track']) # update_state() # Update the bodypart translation & rotation. # def update_state(self): imgNow = self.imgRoiFgMaskedPolarCroppedWindowed if (imgNow is not None) and (self.imgComparison is not None): # Get the rotation & expansion between images. (rShift, aShift) = self.phasecorr.get_shift(imgNow, self.imgComparison) # Convert polar pixel shifts to radians rotation & pixel expansion. angleOffset = self.sense * aShift * ( self.params['gui'][self.name]['angle_hi'] - self.params['gui'][self.name]['angle_lo']) / float( imgNow.shape[1]) radiusOffset = rShift self.state.angles = [(self.stateOrigin_p.angles[0] + angleOffset)] self.state.angles = [ ((self.state.angles[0] + np.pi) % (2 * np.pi)) - np.pi ] self.state.radii = [self.stateOrigin_p.radii[0] + radiusOffset] # Get min,max's self.stateLo_p.angles = [ min(self.stateLo_p.angles[0], self.state.angles[0]) ] self.stateHi_p.angles = [ max(self.stateHi_p.angles[0], self.state.angles[0]) ] self.stateLo_p.radii = [ min(self.stateLo_p.radii[0], self.state.radii[0]) ] self.stateHi_p.radii = [ max(self.stateHi_p.radii[0], self.state.radii[0]) ] # Control the (angle,radius) origin to be at the midpoint of loangle, hiangle # Whenever an image appears that is closer to the midpoint, then # take that image as the new origin image. Thus driving the origin image # toward the midpoint image over time. if (self.params[self.name]['autozero']) and (self.iCount > 100): # If we see a current angle that is closer to the mask midpoint than the ref angle # (defined as the mipoint of the movement range), then take a new comparison image, and # shift the movement range (so that the ref angle becomes the current angle). angleRef_p = (self.stateHi_p.angles[0] + self.stateLo_p.angles[0]) / 2.0 if (angleRef_p < self.state.angles[0] < self.stateOrigin_p.angles[0]) or ( self.stateOrigin_p.angles[0] < self.state.angles[0] < angleRef_p): self.imgComparison = imgNow self.windowComparison.set_image(self.imgComparison) # Converge the origin to zero. self.stateLo_p.angles[0] -= angleOffset self.stateHi_p.angles[0] -= angleOffset self.state.angles[0] = self.stateOrigin_p.angles[0] # Stabilized image. if (self.params['gui'][self.name]['stabilize']): # Stabilize the polar image. #size = (self.imgRoiFgMaskedPolar.shape[1], # self.imgRoiFgMaskedPolar.shape[0]) #center = (self.imgRoiFgMaskedPolar.shape[1]/2.0+aShift, # self.imgRoiFgMaskedPolar.shape[0]/2.0+rShift) #self.imgStabilized = cv2.getRectSubPix(self.imgRoiFgMaskedPolar, size, center) # Stabilize the bodypart in the entire camera image. center = (self.params['gui'][self.name]['hinge']['x'], self.params['gui'][self.name]['hinge']['y']) size = (self.image.shape[1], self.image.shape[0]) # Stabilize the rotation. T = cv2.getRotationMatrix2D(center, np.rad2deg(self.state.angles[0]), 1.0) # Stabilize the expansion. T[0, 2] -= rShift * self.cosAngleBody_i T[1, 2] -= rShift * self.sinAngleBody_i self.imgStabilized = cv2.warpAffine(self.image, T, size) self.windowStabilized.set_image(self.imgStabilized) if (self.imgRoiFgMasked is not None): self.state.intensity = float( np.sum(self.imgRoiFgMasked) / self.mask.sum) else: self.state.intensity = 0.0 # update() # Update all the internals given a foreground camera image. # def update(self, dt, image, bInvertColor): MotionTrackedBodypartPolar.update(self, dt, image, bInvertColor) if (self.imgComparison is None) and (self.iCount > 1): self.imgComparison = self.imgRoiFgMaskedPolarCroppedWindowed self.windowComparison.set_image(self.imgComparison) if (self.params['gui'][self.name]['track']): self.update_state() # draw() # Draw the outline. # def draw(self, image): MotionTrackedBodypartPolar.draw(self, image) if (self.params['gui'][self.name]['track']): # Draw the bodypart state position. angle = self.state.angles[0] * self.sense pt = self.R.dot([ self.state.radii[0] * np.cos(angle), self.state.radii[0] * np.sin(angle) ]) ptState_i = imageprocessing.clip_pt( (int(pt[0] + self.params['gui'][self.name]['hinge']['x']), int(pt[1] + self.params['gui'][self.name]['hinge']['y'])), image.shape) cv2.ellipse(image, ptState_i, (2, 2), 0, 0, 360, self.bgra_state, 1) # Set a pixel at the min/max state positions. try: angle = self.stateLo_p.angles[0] * self.sense pt = self.R.dot([ self.state.radii[0] * np.cos(angle), self.state.radii[0] * np.sin(angle) ]) ptStateLo_i = imageprocessing.clip_pt( (int(pt[0] + self.params['gui'][self.name]['hinge']['x']), int(pt[1] + self.params['gui'][self.name]['hinge']['y'])), image.shape) angle = self.stateHi_p.angles[0] * self.sense pt = self.R.dot([ self.state.radii[0] * np.cos(angle), self.state.radii[0] * np.sin(angle) ]) ptStateHi_i = imageprocessing.clip_pt( (int(pt[0] + self.params['gui'][self.name]['hinge']['x']), int(pt[1] + self.params['gui'][self.name]['hinge']['y'])), image.shape) # Set the pixels. image[ptStateLo_i[1]][ptStateLo_i[0]] = np.array( [255, 255, 255]) - image[ptStateLo_i[1]][ptStateLo_i[0]] image[ptStateHi_i[1]][ptStateHi_i[0]] = np.array( [255, 255, 255]) - image[ptStateHi_i[1]][ptStateHi_i[0]] except ValueError: pass # Draw line from hinge to state point. ptHinge_i = imageprocessing.clip_pt( (int(self.ptHinge_i[0]), int(self.ptHinge_i[1])), image.shape) cv2.line(image, ptHinge_i, ptState_i, self.bgra_state, 1) self.windowStabilized.show() self.windowComparison.show()
class AreaTracker(MotionTrackedBodypartPolar): def __init__(self, name=None, params={}, color='white', bEqualizeHist=False): MotionTrackedBodypartPolar.__init__(self, name, params, color, bEqualizeHist) self.phasecorr = imageprocessing.PhaseCorrelation() self.state = MsgState() # In the bodypart frame. self.stateOrigin_p = MsgState() # In the bodypart frame. self.stateLo_p = MsgState() # In the bodypart frame. self.stateHi_p = MsgState() # In the bodypart frame. self.imgComparison = None self.windowStabilized = ImageWindow(False, self.name+'Stable') self.windowComparison = ImageWindow(True, self.name+'Comparison') self.set_params(params) # set_params() # Set the given params dict into this object. # def set_params(self, params): MotionTrackedBodypartPolar.set_params(self, params) self.imgRoiBackground = None self.imgComparison = None self.windowComparison.set_image(self.imgComparison) self.iCount = 0 # Compute the 'handedness' of the head/abdomen and wing/wing axes. self.sense specifies the direction of positive angles. matAxes = np.array([[self.params['gui']['head']['hinge']['x']-self.params['gui']['abdomen']['hinge']['x'], self.params['gui']['head']['hinge']['y']-self.params['gui']['abdomen']['hinge']['y']], [self.params['gui']['right']['hinge']['x']-self.params['gui']['left']['hinge']['x'], self.params['gui']['right']['hinge']['y']-self.params['gui']['left']['hinge']['y']]]) if (self.name in ['left','right']): self.senseAxes = np.sign(np.linalg.det(matAxes)) a = -1 if (self.name=='right') else 1 self.sense = a*self.senseAxes else: self.sense = 1 self.stateOrigin_p.intensity = 0.0 self.stateOrigin_p.angles = [self.transform_angle_p_from_b((self.params['gui'][self.name]['angle_hi']+self.params['gui'][self.name]['angle_lo'])/2.0)] # In the bodypart frame. self.stateOrigin_p.angles[0] = ((self.stateOrigin_p.angles[0] + np.pi) % (2*np.pi)) - np.pi self.stateOrigin_p.radii = [(self.params['gui'][self.name]['radius_outer']+self.params['gui'][self.name]['radius_inner'])/2.0] self.state.intensity = 0.0 self.state.angles = [0.0] self.state.radii = [0.0] self.stateLo_p.intensity = np.inf self.stateLo_p.angles = [4.0*np.pi] self.stateLo_p.radii = [np.inf] self.stateHi_p.intensity = -np.inf self.stateHi_p.angles = [-4.0*np.pi] self.stateHi_p.radii = [-np.inf] self.windowStabilized.set_enable(self.params['gui']['windows'] and self.params['gui'][self.name]['track'] and self.params['gui'][self.name]['stabilize']) self.windowComparison.set_enable(self.params['gui']['windows'] and self.params['gui'][self.name]['track']) # update_state() # Update the bodypart translation & rotation. # def update_state(self): imgNow = self.imgRoiFgMaskedPolarCroppedWindowed if (imgNow is not None) and (self.imgComparison is not None): # Get the rotation & expansion between images. (rShift, aShift) = self.phasecorr.get_shift(imgNow, self.imgComparison) # Convert polar pixel shifts to radians rotation & pixel expansion. angleOffset = self.sense * aShift * (self.params['gui'][self.name]['angle_hi']-self.params['gui'][self.name]['angle_lo']) / float(imgNow.shape[1]) radiusOffset = rShift self.state.angles = [(self.stateOrigin_p.angles[0] + angleOffset)] self.state.angles = [((self.state.angles[0] + np.pi) % (2*np.pi)) - np.pi] self.state.radii = [self.stateOrigin_p.radii[0] + radiusOffset] # Get min,max's self.stateLo_p.angles = [min(self.stateLo_p.angles[0], self.state.angles[0])] self.stateHi_p.angles = [max(self.stateHi_p.angles[0], self.state.angles[0])] self.stateLo_p.radii = [min(self.stateLo_p.radii[0], self.state.radii[0])] self.stateHi_p.radii = [max(self.stateHi_p.radii[0], self.state.radii[0])] # Control the (angle,radius) origin to be at the midpoint of loangle, hiangle # Whenever an image appears that is closer to the midpoint, then # take that image as the new origin image. Thus driving the origin image # toward the midpoint image over time. if (self.params[self.name]['autozero']) and (self.iCount>100): # If we see a current angle that is closer to the mask midpoint than the ref angle # (defined as the mipoint of the movement range), then take a new comparison image, and # shift the movement range (so that the ref angle becomes the current angle). angleRef_p = (self.stateHi_p.angles[0] + self.stateLo_p.angles[0])/2.0 if (angleRef_p < self.state.angles[0] < self.stateOrigin_p.angles[0]) or (self.stateOrigin_p.angles[0] < self.state.angles[0] < angleRef_p): self.imgComparison = imgNow self.windowComparison.set_image(self.imgComparison) # Converge the origin to zero. self.stateLo_p.angles[0] -= angleOffset self.stateHi_p.angles[0] -= angleOffset self.state.angles[0] = self.stateOrigin_p.angles[0] # Stabilized image. if (self.params['gui'][self.name]['stabilize']): # Stabilize the polar image. #size = (self.imgRoiFgMaskedPolar.shape[1], # self.imgRoiFgMaskedPolar.shape[0]) #center = (self.imgRoiFgMaskedPolar.shape[1]/2.0+aShift, # self.imgRoiFgMaskedPolar.shape[0]/2.0+rShift) #self.imgStabilized = cv2.getRectSubPix(self.imgRoiFgMaskedPolar, size, center) # Stabilize the bodypart in the entire camera image. center = (self.params['gui'][self.name]['hinge']['x'], self.params['gui'][self.name]['hinge']['y']) size = (self.image.shape[1], self.image.shape[0]) # Stabilize the rotation. T = cv2.getRotationMatrix2D(center, np.rad2deg(self.state.angles[0]), 1.0) # Stabilize the expansion. T[0,2] -= rShift * self.cosAngleBody_i T[1,2] -= rShift * self.sinAngleBody_i self.imgStabilized = cv2.warpAffine(self.image, T, size) self.windowStabilized.set_image(self.imgStabilized) if (self.imgRoiFgMasked is not None): self.state.intensity = float(np.sum(self.imgRoiFgMasked) / self.mask.sum) else: self.state.intensity = 0.0 # update() # Update all the internals given a foreground camera image. # def update(self, dt, image, bInvertColor): MotionTrackedBodypartPolar.update(self, dt, image, bInvertColor) if (self.imgComparison is None) and (self.iCount>1): self.imgComparison = self.imgRoiFgMaskedPolarCroppedWindowed self.windowComparison.set_image(self.imgComparison) if (self.params['gui'][self.name]['track']): self.update_state() # draw() # Draw the outline. # def draw(self, image): MotionTrackedBodypartPolar.draw(self, image) if (self.params['gui'][self.name]['track']): # Draw the bodypart state position. angle = self.state.angles[0] * self.sense pt = self.R.dot([self.state.radii[0]*np.cos(angle), self.state.radii[0]*np.sin(angle)]) ptState_i = imageprocessing.clip_pt((int(pt[0]+self.params['gui'][self.name]['hinge']['x']), int(pt[1]+self.params['gui'][self.name]['hinge']['y'])), image.shape) cv2.ellipse(image, ptState_i, (2,2), 0, 0, 360, self.bgra_state, 1) # Set a pixel at the min/max state positions. try: angle = self.stateLo_p.angles[0] * self.sense pt = self.R.dot([self.state.radii[0]*np.cos(angle), self.state.radii[0]*np.sin(angle)]) ptStateLo_i = imageprocessing.clip_pt((int(pt[0]+self.params['gui'][self.name]['hinge']['x']), int(pt[1]+self.params['gui'][self.name]['hinge']['y'])), image.shape) angle = self.stateHi_p.angles[0] * self.sense pt = self.R.dot([self.state.radii[0]*np.cos(angle), self.state.radii[0]*np.sin(angle)]) ptStateHi_i = imageprocessing.clip_pt((int(pt[0]+self.params['gui'][self.name]['hinge']['x']), int(pt[1]+self.params['gui'][self.name]['hinge']['y'])), image.shape) # Set the pixels. image[ptStateLo_i[1]][ptStateLo_i[0]] = np.array([255,255,255]) - image[ptStateLo_i[1]][ptStateLo_i[0]] image[ptStateHi_i[1]][ptStateHi_i[0]] = np.array([255,255,255]) - image[ptStateHi_i[1]][ptStateHi_i[0]] except ValueError: pass # Draw line from hinge to state point. ptHinge_i = imageprocessing.clip_pt((int(self.ptHinge_i[0]), int(self.ptHinge_i[1])), image.shape) cv2.line(image, ptHinge_i, ptState_i, self.bgra_state, 1) self.windowStabilized.show() self.windowComparison.show()
def __init__(self, params={}): self.nodename = rospy.get_name().rstrip('/') EdgeTracker = EdgeTrackerByIntensityProfile #EdgeTracker = EdgeTrackerByHoughTransform # Create the body axis tracker. self.axis = AxisTracker(name='axis', params=params, color='yellow') # Create the head tracker. if (params['head']['tracker']=='area'): self.head = AreaTracker(name='head', params=params, color='cyan', bEqualizeHist=False) elif (params['head']['tracker']=='edge'): self.head = EdgeTracker(name='head', params=params, color='cyan', bEqualizeHist=False) elif (params['head']['tracker']=='tip'): self.head = TipTracker(name='head', params=params, color='cyan', bEqualizeHist=False) elif (params['head']['tracker']=='intensity'): self.head = IntensityTracker(name='head', params=params, color='cyan', bEqualizeHist=False) else: rospy.logwarn('Head tracker parameter must be one of [''area'', ''edge'', ''tip'', ''intensity'']') # Create the abdomen tracker. if (params['abdomen']['tracker']=='area'): self.abdomen = AreaTracker(name='abdomen', params=params, color='magenta', bEqualizeHist=False) elif (params['abdomen']['tracker']=='edge'): self.abdomen = EdgeTracker(name='abdomen', params=params, color='magenta', bEqualizeHist=False) elif (params['abdomen']['tracker']=='tip'): self.abdomen = TipTracker(name='abdomen', params=params, color='magenta', bEqualizeHist=False) elif (params['abdomen']['tracker']=='intensity'): self.abdomen = IntensityTracker(name='abdomen', params=params, color='magenta', bEqualizeHist=False) else: rospy.logwarn('Abdomen tracker parameter must be one of [''area'', ''edge'', ''tip'', ''intensity'']') # Create the right wing tracker. if (params['right']['tracker']=='area'): self.right = AreaTracker(name='right', params=params, color='red', bEqualizeHist=False) elif (params['right']['tracker']=='edge'): self.right = EdgeTracker(name='right', params=params, color='red', bEqualizeHist=False) elif (params['right']['tracker']=='tip'): self.right = TipTracker(name='right', params=params, color='red', bEqualizeHist=False) elif (params['right']['tracker']=='intensity'): self.right = IntensityTracker(name='right', params=params, color='red', bEqualizeHist=False) else: rospy.logwarn('Right wing tracker parameter must be one of [''area'', ''edge'', ''tip'', ''intensity'']') # Create the left wing tracker. if (params['left']['tracker']=='area'): self.left = AreaTracker(name='left', params=params, color='green', bEqualizeHist=False) elif (params['left']['tracker']=='edge'): self.left = EdgeTracker(name='left', params=params, color='green', bEqualizeHist=False) elif (params['left']['tracker']=='tip'): self.left = TipTracker(name='left', params=params, color='green', bEqualizeHist=False) elif (params['left']['tracker']=='intensity'): self.left = IntensityTracker(name='left', params=params, color='green', bEqualizeHist=False) else: rospy.logwarn('Left wing tracker parameter must be one of [''area'', ''edge'', ''tip'', ''intensity'']') # Create the aux tracker. self.aux = IntensityTracker(name='aux', params=params, color='yellow', bEqualizeHist=False) self.windowInvertColorArea = ImageWindow(False, 'InvertColorArea') self.bgra_body = ui.bgra_dict['light_gray'] self.ptBodyIndicator1 = None self.ptBodyIndicator2 = None self.bInvertColor = False self.bInvertColorAuto = True self.iCount = 0 self.stampPrev = None self.stampPrevAlt = None self.stamp = rospy.Time(0) self.pubFlystate = rospy.Publisher(self.nodename+'/flystate', MsgFlystate, queue_size=100)
class MotionTrackedBodypartPolar(MotionTrackedBodypart): def __init__(self, name=None, params={}, color='white', bEqualizeHist=False): MotionTrackedBodypart.__init__(self, name, params, color, bEqualizeHist) self.polartransforms = imageprocessing.PolarTransforms() self.create_wfn = imageprocessing.WindowFunctions().create_tukey self.wfnRoiMaskedPolarCropped = None self.imgRoiFgMaskedPolar = None self.imgRoiFgMaskedPolarCropped = None self.imgRoiFgMaskedPolarCroppedWindowed = None self.imgFinal = None # The image to use for tracking. self.imgHeadroomPolar = None self.windowPolar = ImageWindow(False, self.name + 'Polar') def set_params(self, params): MotionTrackedBodypart.set_params(self, params) self.windowPolar.set_enable(self.params['gui']['windows'] and self.params['gui'][self.name]['track']) def create_mask(self, shape): MotionTrackedBodypart.create_mask(self, shape) self.wfnRoiMaskedPolarCropped = None if (self.mask.xMin is not None): self.i_0 = self.params['gui'][ self.name]['hinge']['y'] - self.roi[1] self.j_0 = self.params['gui'][ self.name]['hinge']['x'] - self.roi[0] # Args for the ellipse calls. x = int(self.params['gui'][self.name]['hinge']['x']) y = int(self.params['gui'][self.name]['hinge']['y']) r_outer = int( np.ceil(self.params['gui'][self.name]['radius_outer'])) r_inner = int( np.floor(self.params['gui'][self.name]['radius_inner'])) - 1 hi = int(np.ceil(np.rad2deg(self.angle_hi_i))) lo = int(np.floor(np.rad2deg(self.angle_lo_i))) # Find where the mask might be clipped. First, draw an unclipped ellipse. delta = 1 ptsUnclipped = cv2.ellipse2Poly((x, y), (r_outer, r_outer), 0, hi, lo, delta) ptsUnclipped = np.append( ptsUnclipped, cv2.ellipse2Poly((x, y), (r_inner, r_inner), 0, hi, lo, delta), 0) #ptsUnclipped = np.append(ptsUnclipped, [list of line1 pixels connecting the two arcs], 0) #ptsUnclipped = np.append(ptsUnclipped, [list of line2 pixels connecting the two arcs], 0) # These are the unclipped locations. minUnclipped = ptsUnclipped.min(0) maxUnclipped = ptsUnclipped.max(0) xMinUnclipped = minUnclipped[0] yMinUnclipped = minUnclipped[1] xMaxUnclipped = maxUnclipped[0] + 1 yMaxUnclipped = maxUnclipped[1] + 1 # Compare unclipped with the as-drawn locations. xClip0 = self.mask.xMin - xMinUnclipped yClip0 = self.mask.yMin - yMinUnclipped xClip1 = xMaxUnclipped - self.mask.xMax yClip1 = yMaxUnclipped - self.mask.yMax roiClipped = np.array([xClip0, yClip0, xClip1, yClip1]) (i_n, j_n) = shape[:2] # Determine how much of the bottom of the polar image to trim off (i.e. rClip) based on if the ellipse is partially offimage. (rClip0, rClip1, rClip2, rClip3) = (1.0, 1.0, 1.0, 1.0) if (roiClipped[0] > 0): # Left rClip0 = 1.0 - (float(roiClipped[0]) / float(r_outer - r_inner) ) #self.j_0)) if (roiClipped[1] > 0): # Top rClip1 = 1.0 - (float(roiClipped[1]) / float(r_outer - r_inner) ) #self.i_0)) if (roiClipped[2] > 0): # Right rClip2 = 1.0 - (float(roiClipped[2]) / float(r_outer - r_inner) ) #j_n-self.j_0)) if (roiClipped[3] > 0): # Bottom rClip3 = 1.0 - (float(roiClipped[3]) / float(r_outer - r_inner) ) #i_n-self.i_0)) self.rClip = np.min([rClip0, rClip1, rClip2, rClip3]) # End create_mask() def update_polarimage(self): if (self.imgRoiFgMasked is not None): theta_0a = self.angle_lo_i - self.angleBodypart_i theta_1a = self.angle_hi_i - self.angleBodypart_i radius_mid = (self.params['gui'][self.name]['radius_outer'] + self.params['gui'][self.name]['radius_inner']) / 2.0 dr = (self.params['gui'][self.name]['radius_outer'] - self.params['gui'][self.name]['radius_inner']) / 2.0 self.imgRoiFgMasked[self.imgRoiFgMasked == 255] = 254 # Make room the for +1, next. try: self.imgRoiFgMaskedPolar = self.polartransforms.transform_polar_elliptical( self.imgRoiFgMasked + 1, # +1 so we find cropped pixels, next, rather than merely black pixels. self.i_0, self.j_0, raxial=radius_mid, rortho=radius_mid, dradiusStrip=int(dr), amplifyRho=1.0, rClip=self.rClip, angleEllipse=self.angleBodypart_i, theta_0=min(theta_0a, theta_1a), theta_1=max(theta_0a, theta_1a), amplifyTheta=1.0) except imageprocessing.TransformException: rospy.logwarn( '%s: Mask is outside the image, no points transformed.' % self.name) # Find the y value where the black band should be cropped out (but leave at least one raster if image is all-black). sumY = np.sum(self.imgRoiFgMaskedPolar, 1) iSumY = np.where(sumY == 0)[0] if (len(iSumY) > 0): iMinY = np.max([1, np.min(iSumY)]) else: iMinY = self.imgRoiFgMaskedPolar.shape[0] # Crop the bottom of the images. self.imgRoiFgMaskedPolarCropped = self.imgRoiFgMaskedPolar[0:iMinY] # Push each pixel toward the column mean, depending on the amount of headroom. if (self.imgHeadroom is not None) and ( self.params['gui'][self.name]['subtract_bg']) and ( self.params[self.name]['saturation_correction']): try: self.imgHeadroomPolar = self.polartransforms.transform_polar_elliptical( self.imgHeadroom, self.i_0, self.j_0, raxial=radius_mid, rortho=radius_mid, dradiusStrip=int(dr), amplifyRho=1.0, rClip=self.rClip, angleEllipse=self.angleBodypart_i, theta_0=min(theta_0a, theta_1a), theta_1=max(theta_0a, theta_1a), amplifyTheta=1.0) except imageprocessing.TransformException: pass self.imgHeadroomPolarCropped = self.imgHeadroomPolar[0:iMinY] # Perform the correction. TH = self.imgHeadroomPolarCropped / 255.0 M = np.mean(self.imgRoiFgMaskedPolarCropped, 0).astype(np.float32) TF = self.imgRoiFgMaskedPolarCropped.astype(np.float32) self.imgRoiFgMaskedPolarCropped = M + cv2.multiply(TH, TF - M) if (self.bValidMask): if (self.wfnRoiMaskedPolarCropped is None) or (self.imgRoiFgMaskedPolarCropped.shape != self.wfnRoiMaskedPolarCropped.shape): self.wfnRoiMaskedPolarCropped = self.create_wfn( self.imgRoiFgMaskedPolarCropped.shape, self.params[self.name]['feathering']) self.imgRoiFgMaskedPolarCroppedWindowed = cv2.multiply( self.imgRoiFgMaskedPolarCropped.astype(np.float32), self.wfnRoiMaskedPolarCropped) # Show the image. img = self.imgRoiFgMaskedPolarCroppedWindowed #img = self.imgRoiFgMaskedPolarCropped self.windowPolar.set_image(img) # End update_polarimage() def update(self, dt, image, bInvertColor): MotionTrackedBodypart.update(self, dt, image, bInvertColor) if (self.params['gui'][self.name]['track']): self.update_polarimage() self.imgFinal = self.imgRoiFgMaskedPolarCroppedWindowed def draw(self, image): MotionTrackedBodypart.draw(self, image) self.windowPolar.show()
class Fly(object): def __init__(self, params={}): self.nodename = rospy.get_name().rstrip('/') EdgeTracker = EdgeTrackerByIntensityProfile #EdgeTracker = EdgeTrackerByHoughTransform # Create the body axis tracker. self.axis = AxisTracker(name='axis', params=params, color='yellow') # Create the head tracker. if (params['head']['tracker']=='area'): self.head = AreaTracker(name='head', params=params, color='cyan', bEqualizeHist=False) elif (params['head']['tracker']=='edge'): self.head = EdgeTracker(name='head', params=params, color='cyan', bEqualizeHist=False) elif (params['head']['tracker']=='tip'): self.head = TipTracker(name='head', params=params, color='cyan', bEqualizeHist=False) elif (params['head']['tracker']=='intensity'): self.head = IntensityTracker(name='head', params=params, color='cyan', bEqualizeHist=False) else: rospy.logwarn('Head tracker parameter must be one of [''area'', ''edge'', ''tip'', ''intensity'']') # Create the abdomen tracker. if (params['abdomen']['tracker']=='area'): self.abdomen = AreaTracker(name='abdomen', params=params, color='magenta', bEqualizeHist=False) elif (params['abdomen']['tracker']=='edge'): self.abdomen = EdgeTracker(name='abdomen', params=params, color='magenta', bEqualizeHist=False) elif (params['abdomen']['tracker']=='tip'): self.abdomen = TipTracker(name='abdomen', params=params, color='magenta', bEqualizeHist=False) elif (params['abdomen']['tracker']=='intensity'): self.abdomen = IntensityTracker(name='abdomen', params=params, color='magenta', bEqualizeHist=False) else: rospy.logwarn('Abdomen tracker parameter must be one of [''area'', ''edge'', ''tip'', ''intensity'']') # Create the right wing tracker. if (params['right']['tracker']=='area'): self.right = AreaTracker(name='right', params=params, color='red', bEqualizeHist=False) elif (params['right']['tracker']=='edge'): self.right = EdgeTracker(name='right', params=params, color='red', bEqualizeHist=False) elif (params['right']['tracker']=='tip'): self.right = TipTracker(name='right', params=params, color='red', bEqualizeHist=False) elif (params['right']['tracker']=='intensity'): self.right = IntensityTracker(name='right', params=params, color='red', bEqualizeHist=False) else: rospy.logwarn('Right wing tracker parameter must be one of [''area'', ''edge'', ''tip'', ''intensity'']') # Create the left wing tracker. if (params['left']['tracker']=='area'): self.left = AreaTracker(name='left', params=params, color='green', bEqualizeHist=False) elif (params['left']['tracker']=='edge'): self.left = EdgeTracker(name='left', params=params, color='green', bEqualizeHist=False) elif (params['left']['tracker']=='tip'): self.left = TipTracker(name='left', params=params, color='green', bEqualizeHist=False) elif (params['left']['tracker']=='intensity'): self.left = IntensityTracker(name='left', params=params, color='green', bEqualizeHist=False) else: rospy.logwarn('Left wing tracker parameter must be one of [''area'', ''edge'', ''tip'', ''intensity'']') # Create the aux tracker. self.aux = IntensityTracker(name='aux', params=params, color='yellow', bEqualizeHist=False) self.windowInvertColorArea = ImageWindow(False, 'InvertColorArea') self.bgra_body = ui.bgra_dict['light_gray'] self.ptBodyIndicator1 = None self.ptBodyIndicator2 = None self.bInvertColor = False self.bInvertColorAuto = True self.iCount = 0 self.stampPrev = None self.stampPrevAlt = None self.stamp = rospy.Time(0) self.pubFlystate = rospy.Publisher(self.nodename+'/flystate', MsgFlystate, queue_size=100) def set_params(self, params): self.params = params self.axis.set_params(params) self.head.set_params(params) self.abdomen.set_params(params) self.left.set_params(params) self.right.set_params(params) self.aux.set_params(params) pt1 = [params['gui']['head']['hinge']['x'], params['gui']['head']['hinge']['y']] pt2 = [params['gui']['abdomen']['hinge']['x'], params['gui']['abdomen']['hinge']['y']] pt3 = [params['gui']['left']['hinge']['x'], params['gui']['left']['hinge']['y']] pt4 = [params['gui']['right']['hinge']['x'], params['gui']['right']['hinge']['y']] self.ptBodyCenter_i = imageprocessing.get_intersection(pt1,pt2,pt3,pt4) r = max(params['gui']['left']['radius_outer'], params['gui']['right']['radius_outer']) self.angleBody_i = self.get_bodyangle_i() self.ptBodyIndicator1 = tuple((self.ptBodyCenter_i + r * np.array([np.cos(self.angleBody_i), np.sin(self.angleBody_i)])).astype(int)) self.ptBodyIndicator2 = tuple((self.ptBodyCenter_i - r * np.array([np.cos(self.angleBody_i), np.sin(self.angleBody_i)])).astype(int)) # Radius of an area approximately where the thorax would be. self.rInvertColorArea = np.linalg.norm(np.array([params['gui']['head']['hinge']['x'], params['gui']['head']['hinge']['y']]) - np.array([params['gui']['abdomen']['hinge']['x'], params['gui']['abdomen']['hinge']['y']]))/2.0 self.bInvertColorValid = False def create_masks(self, shapeImage): if (self.params['gui']['axis']['track']): if (not self.axis.bValidMask): self.axis.create_mask (shapeImage) self.axis.bValidMask = True if (self.params['gui']['head']['track']): if (not self.head.bValidMask): self.head.create_mask (shapeImage) self.head.bValidMask = True if (self.params['gui']['abdomen']['track']): if (not self.abdomen.bValidMask): self.abdomen.create_mask (shapeImage) self.abdomen.bValidMask = True if (self.params['gui']['right']['track']): if (not self.right.bValidMask): self.right.create_mask (shapeImage) self.right.bValidMask = True if (self.params['gui']['left']['track']): if (not self.left.bValidMask): self.left.create_mask (shapeImage) self.left.bValidMask = True if (self.params['gui']['aux']['track']): if (not self.aux.bValidMask): self.aux.create_mask (shapeImage) self.aux.bValidMask = True def get_bodyangle_i(self): angle_i = imageprocessing.get_angle_from_points_i(self.abdomen.ptHinge_i, self.head.ptHinge_i) #angleBody_i = (angle_i + np.pi) % (2.0*np.pi) - np.pi angleBody_i = angle_i return angleBody_i # Calculate what we think the bInvertColor flag should be to make white-on-black. def get_invertcolor(self, image): # Get a roi around the body center. xMin = max(0,self.ptBodyCenter_i[0]-int(0.75*self.rInvertColorArea)) yMin = max(0,self.ptBodyCenter_i[1]-int(0.75*self.rInvertColorArea)) xMax = min(self.ptBodyCenter_i[0]+int(0.75*self.rInvertColorArea), image.shape[1]-1) yMax = min(self.ptBodyCenter_i[1]+int(0.75*self.rInvertColorArea), image.shape[0]-1) imgInvertColorArea = image[yMin:yMax, xMin:xMax] self.windowInvertColorArea.set_image(imgInvertColorArea) # Midpoint between darkest & lightest colors. threshold = np.mean(image) #rospy.logwarn((np.min(image), np.median(image), np.mean(image), np.max(image), np.mean(imgInvertColorArea))) # If the roi is too dark, then set bInvertColor. if (np.mean(imgInvertColorArea) <= threshold): bInvertColor = True else: bInvertColor = False return bInvertColor def set_background(self, image): self.head.set_background(image) self.abdomen.set_background(image) self.left.set_background(image) self.right.set_background(image) self.aux.set_background(image) self.axis.set_background(image) def update_handle_points(self): self.head.update_handle_points() self.abdomen.update_handle_points() self.left.update_handle_points() self.right.update_handle_points() self.aux.update_handle_points() self.axis.update_handle_points() def update(self, header=None, image=None): if (image is not None): self.header = header if (not self.bInvertColorValid) and (self.bInvertColorAuto): self.bInvertColor = self.get_invertcolor(image) self.bInvertColorValid = True # Get the dt. Keep track of both the camera timestamp, and the now() timestamp, # and use the now() timestamp only if the camera timestamp isn't changing. self.stamp = self.header.stamp stampAlt = rospy.Time.now() if (self.stampPrev is not None): dt = max(0.0, (self.header.stamp - self.stampPrev).to_sec()) # If the camera is not giving good timestamps, then use our own clock. if (dt == 0.0): dt = max(0.0, (stampAlt - self.stampPrevAlt).to_sec()) self.stamp = stampAlt # If time wrapped, then just assume a value. if (dt == 0.0): dt = 1.0 else: dt = np.inf self.stampPrev = self.header.stamp self.stampPrevAlt = stampAlt self.head.update(dt, image, self.bInvertColor) self.abdomen.update(dt, image, self.bInvertColor) self.left.update(dt, image, self.bInvertColor) self.right.update(dt, image, self.bInvertColor) self.aux.update(dt, image, self.bInvertColor) self.axis.update(dt, image, self.bInvertColor) def draw(self, image): # Draw line to indicate the body axis. cv2.line(image, self.ptBodyIndicator1, self.ptBodyIndicator2, self.bgra_body, 1) # Draw a line longer than just head-to-abdomen. self.axis.draw(image) self.head.draw(image) self.abdomen.draw(image) self.left.draw(image) self.right.draw(image) self.aux.draw(image) self.windowInvertColorArea.show() def publish(self): flystate = MsgFlystate() flystate.header = Header(seq=self.iCount, stamp=self.stamp, frame_id='Fly') if (self.params['gui']['head']['track']): flystate.head = self.head.state else: flystate.head = MsgState() if (self.params['gui']['abdomen']['track']): flystate.abdomen = self.abdomen.state else: flystate.abdomen = MsgState() if (self.params['gui']['left']['track']): flystate.left = self.left.state else: flystate.left = MsgState() if (self.params['gui']['right']['track']): flystate.right = self.right.state else: flystate.right = MsgState() if (self.params['gui']['aux']['track']): flystate.aux = self.aux.state else: flystate.aux = MsgState() self.iCount += 1 self.pubFlystate.publish(flystate)