示例#1
0
 def on_enter_tile(self, *args):
     segs = lineseg_circle(self._flyx, self._flyy, 50)
     pts = []
     for seg in segs:
         pts.append(geometry_msgs.msg.Point32(seg[0], seg[1], 0))
     self._pubpts.publish(pts)
示例#2
0
 def on_enter_tile(self, *args):
     segs = lineseg_circle(self._flyx,self._flyy,50)
     pts = []
     for seg in segs:
         pts.append( geometry_msgs.msg.Point32(seg[0],seg[1],0) )
     self._pubpts.publish(pts)
示例#3
0
文件: trackem.py 项目: motmot/trackem
    def process_frame(self, cam_id, buf, buf_offset, timestamp, framenumber):
        """do work on each frame

        This function gets called on every single frame capture. It is
        called within the realtime thread, NOT the wxPython
        application mainloop's thread. Therefore, be extremely careful
        (use threading locks) when sharing data with the rest of the
        class.

        """
        assert self.pixel_format == "MONO8"

        now = time.time()
        if self.pub_image is not None:
            if (now - self.pub_last_image) > 30.0:
                msg = self.pub_image_class()
                msg.header.seq = framenumber
                # XXX TODO: once camera trigger is ROS node, get accurate timestamp
                msg.header.stamp = self._rospy_time_from_sec(now)
                msg.header.frame_id = "0"

                npbuf = np.array(buf)
                (height, width) = npbuf.shape

                msg.height = height
                msg.width = width
                msg.encoding = "mono8"
                msg.step = width
                msg.data = npbuf.tostring()  # let numpy convert to string

                self.pub_image.publish(msg)
                self.pub_last_image = now

        if self.pub_mean_luminance is not None:
            if (now - self.pub_last_mean_luminance) > 0.5:
                mean_lum = np.mean(buf)
                self.pub_mean_luminance.publish(float(mean_lum))
                self.pub_last_mean_luminance = now

        buf = FastImage.asfastimage(buf)

        ros_list = []
        point_list = []
        draw_linesegs = []  # [ (x0,y0,x1,y1) ]
        if self.enabled.isSet():

            light_on_dark = self.light_on_dark.isSet()
            offset_x, offset_y = buf_offset
            if 1:

                # work on a copy of the image so the original is displayed unaltered
                copyview = self.copybuf.roi(offset_x, offset_y, buf.size)
                buf.get_8u_copy_put(copyview, buf.size)
                buf = copyview
            buf_view = np.asarray(buf)  # get numpy view of data

            # step 1. extract points
            analysis_radius = self.analysis_radius.get()
            luminance_threshold = self.luminance_threshold.get()
            if light_on_dark:
                clearval = 0
            else:
                clearval = 255

            # outside mask, set values to clearval
            mask = self.get_mask()
            buf_view[mask] = clearval

            for pt_num in range(self.num_points.get()):
                if light_on_dark:
                    # find brightest point
                    max_val, x, y = buf.max_index(buf.size)
                    if max_val < luminance_threshold:
                        break
                else:
                    # find darkest point
                    min_val, x, y = buf.min_index(buf.size)
                    if min_val > (255 - luminance_threshold):
                        break

                # calculate region around found point
                clearxmin = max(0, x - analysis_radius)
                clearxmax = min(buf.size.w - 1, x + analysis_radius)
                clearymin = max(0, y - analysis_radius)
                clearymax = min(buf.size.h - 1, y + analysis_radius)

                # extract a view of this region
                this_region = buf_view[clearymin:clearymax, clearxmin:clearxmax]
                if light_on_dark:
                    binary_region = this_region > (max_val / 1.1)
                else:
                    binary_region = this_region < (min_val * 1.1)
                num_pixels_classified = np.sum(binary_region.ravel())
                # print '%d, (%d, %d)'%(num_pixels_classified,x,y)

                if num_pixels_classified > self.max_area.get():
                    # we don't want this point
                    pass
                else:
                    # compute luminance center of mass
                    if not light_on_dark:
                        # make a light-on-dark image
                        this_region2 = 255 - this_region
                    else:
                        this_region2 = this_region
                    fibuf = FastImage.asfastimage(this_region2)
                    try:
                        results = realtime_image_analysis.py_fit_params(fibuf)
                    except Exception as err:
                        print "%s: error extracting image data. ignoring." % (err,)
                    else:
                        (x0, y0, area, slope, eccentricity) = results
                        theta = np.arctan(slope)

                        x1 = x0 + clearxmin
                        y1 = y0 + clearymin

                        # save values
                        ros_list.append((offset_x + x1, offset_y + y1, theta))
                        point_list.append((offset_x + x1, offset_y + y1))

                # clear the region near the detected point
                buf_view[clearymin:clearymax, clearxmin:clearxmax] = clearval

            # send data over ROS
            if self.num_points.get():
                if self.pub_position is not None:
                    msg = self.pub_position_class()

                    msg.header.stamp.secs = int(np.floor(timestamp))
                    msg.header.stamp.nsecs = int((timestamp % 1.0) * 1e9)
                    msg.header.frame_id = "pixels"

                    msg.framenumber = framenumber

                    for (x, y, theta) in ros_list:
                        pose = self.pub_pose_class()
                        pose.x = x
                        pose.y = y
                        pose.theta = theta
                        msg.points.append(pose)
                    self.pub_position.publish(msg)

        if self.view_mask_mode.isSet():

            w, h = self.image_size
            x = self.mask_center_x.get()
            y = self.mask_center_y.get()
            radius = self.mask_radius.get()

            draw_linesegs.extend(lineseg_circle(x, y, radius))

        return point_list, draw_linesegs