def refresh_data(self): # Get raw data. self._found_kinect, rgb, depth = self._kinect.get_frames() # Perform basic data extraction. self._analyzer = DepthAnalyser(depth) l, r = self._analyzer.find_sticks() self._left_stick, self._right_stick = l, r dz = self._analyzer.extract_detection_band(l, r) self._detection_zone = dz lb = self._analyzer.extract_borders(dz) f = self._analyzer.analyze_borders(lb) self._feet = f # Convert numpy arrays to cairo surfaces. alpha_channel = numpy.ones((480, 640, 1), dtype=numpy.uint8) * 255 # 1. RGB bitmap. rgb32 = numpy.concatenate((alpha_channel, rgb), axis=2) self._rgb_surface = cairo.ImageSurface.create_for_data( rgb32[:, :, ::-1].astype(numpy.uint8), cairo.FORMAT_ARGB32, 640, 480) # 2. Depth map, take care of special NaN value. i = numpy.amin(depth) depth_clean = numpy.where(depth == Kinect.UNDEF_DEPTH, 0, depth) a = numpy.amax(depth_clean) depth = numpy.where( depth == Kinect.UNDEF_DEPTH, 0, 255 - (depth - i) * 254.0 / (a - i)) depth32 = numpy.dstack(( alpha_channel, depth, numpy.where(depth == 0, 128, depth), depth)) self._depth_surface = cairo.ImageSurface.create_for_data( depth32[:, :, ::-1].astype(numpy.uint8), cairo.FORMAT_ARGB32, 640, 480) self._notify_observers()
class KinectDisplay(gtk.DrawingArea): def __init__(self, kinect): gtk.DrawingArea.__init__(self) self.set_size_request(1280, 480) self._found = False self._rgb_surface = None self._depth_surface = None self._kinect = kinect self._observers = [] self._analyzer = None self._x = -1 self._y = -1 self._left_stick, self._right_stick = None, None self._detection_zone = None self._feet = None self.refresh_data() self.add_events(gtk.gdk.MOTION_NOTIFY | gtk.gdk.BUTTON_PRESS | gtk.gdk.LEAVE_NOTIFY | gtk.gdk.LEAVE_NOTIFY_MASK) self.connect("motion_notify_event", self.motion_notify) self.connect("leave_notify_event", self.leave_notify) self.connect("expose_event", self.expose) def add_observer(self, observer): self._observers.append(observer) def _notify_observers(self): data = {} data['cursor'] = self._x, self._y, \ self._analyzer._distance[self._y, self._x] data['feet'] = self._feet data['stick'] = self._left_stick, self._right_stick for observer in self._observers: observer.observable_changed(data) def leave_notify(self, widget, event): self._x, self._y = -1, -1 self._notify_observers() self.queue_draw() def motion_notify(self, widget, event): x, y = event.x, event.y if x >= 640: x -= 640 self._x, self._y = x, y self._notify_observers() self.queue_draw() def expose(self, widget, event): self.context = widget.window.cairo_create() self.draw(self.context) return False def refresh_data(self): # Get raw data. self._found_kinect, rgb, depth = self._kinect.get_frames() # Perform basic data extraction. self._analyzer = DepthAnalyser(depth) l, r = self._analyzer.find_sticks() self._left_stick, self._right_stick = l, r dz = self._analyzer.extract_detection_band(l, r) self._detection_zone = dz lb = self._analyzer.extract_borders(dz) f = self._analyzer.analyze_borders(lb) self._feet = f # Convert numpy arrays to cairo surfaces. alpha_channel = numpy.ones((480, 640, 1), dtype=numpy.uint8) * 255 # 1. RGB bitmap. rgb32 = numpy.concatenate((alpha_channel, rgb), axis=2) self._rgb_surface = cairo.ImageSurface.create_for_data( rgb32[:, :, ::-1].astype(numpy.uint8), cairo.FORMAT_ARGB32, 640, 480) # 2. Depth map, take care of special NaN value. i = numpy.amin(depth) depth_clean = numpy.where(depth == Kinect.UNDEF_DEPTH, 0, depth) a = numpy.amax(depth_clean) depth = numpy.where( depth == Kinect.UNDEF_DEPTH, 0, 255 - (depth - i) * 254.0 / (a - i)) depth32 = numpy.dstack(( alpha_channel, depth, numpy.where(depth == 0, 128, depth), depth)) self._depth_surface = cairo.ImageSurface.create_for_data( depth32[:, :, ::-1].astype(numpy.uint8), cairo.FORMAT_ARGB32, 640, 480) self._notify_observers() def draw(self, ctx): # Draw surfaces. ctx.save() ctx.move_to(0, 0) ctx.set_source_surface(self._rgb_surface) ctx.paint() ctx.translate(640, 0) ctx.set_source_surface(self._depth_surface) ctx.paint() ctx.restore() # Coordinate system. ctx.set_line_width(1) ctx.set_source_rgb(1.0, 1.0, 1.0) ctx.move_to(640 + 30, 470) ctx.line_to(640 + 10, 470) ctx.line_to(640 + 10, 450) ctx.stroke() ctx.select_font_face('Sans') ctx.set_font_size(12) ctx.move_to(640 + 3, 450) ctx.show_text('y') ctx.stroke() ctx.move_to(640 + 30, 477) ctx.show_text('x') ctx.stroke() # Trace lines. if self._x >= 0 and self._y >= 0: ctx.set_source_rgb(1.0, 0.0, 0.0) ctx.set_line_width(1) ctx.move_to(0, self._y) ctx.line_to(1280, self._y) ctx.stroke() ctx.move_to(self._x, 0) ctx.line_to(self._x, 480) ctx.stroke() ctx.move_to(self._x + 640, 0) ctx.line_to(self._x + 640, 480) ctx.stroke() # Tell about center_depth. depth = self._kinect.latest_depth[self._y, self._x] distance = self._kinect.depth_to_cm(depth) if distance != Kinect.UNDEF_DISTANCE: text = "(%d, %d) - distance: %0.0f cm (depth = %d)" \ % (self._x, self._y, distance, depth) else: text = "(%d, %d)" % (self._x, self._y) ctx.set_font_size(16) ctx.move_to(950, 475) ctx.set_source_rgb(1, 1, 1) ctx.show_text(text) ctx.stroke() # Draw sticks rectangles and detection zone. ctx.set_line_width(1) ctx.set_source_rgb(1, 1, 0) x, y, w, h, _ = self._left_stick ctx.rectangle(x + 640, y, w, h) ctx.stroke() x, y, w, h, _ = self._right_stick ctx.rectangle(x + 640, y, w, h) ctx.stroke() ctx.set_source_rgb(1, 0, 1) x, y, w, h = self._detection_zone ctx.rectangle(x + 640, y, w, h) ctx.stroke() # Draw detected feet in detection zone. ctx.set_line_width(2) ctx.set_source_rgb(1, 0, 0) for foot in self._feet: x, y, _ = foot[0] ctx.move_to(640 + x, y) for x, y, _ in foot[1:]: ctx.line_to(640 + x, y) ctx.stroke() # Tell if images are not from a present device. if not self._found_kinect: ctx.set_font_size(20) ctx.move_to(20, 20) ctx.set_source_rgb(0.0, 0.0, 1.0) ctx.show_text("No Kinect detected, using static picture from disk") ctx.stroke()