def expose_c(self, widget, event):
        x, y, w, h = self.area_a.get_allocation()
        self.space_a = CartesianSpace(w, h)
        x, y, w, h = self.area_c.get_allocation()
        self.space_c = CartesianSpace(w, h)

        cr = self.area_c.window.cairo_create()

        cr.set_source_rgb(0.1, 0.1, 0.8)
        cr.rectangle(0, 0, w, h)
        cr.fill()

        cr.set_line_width(9)
        cr.set_source_rgb(0.7, 0.2, 0.0)

        if self.cursor_position_c is not None:
            cursor_dims = (40, 40)
            cr.move_to(self.cursor_position_c[0] - cursor_dims[0] / 2,
                       self.cursor_position_c[1])
            cr.line_to(self.cursor_position_c[0] + cursor_dims[0] / 2,
                       self.cursor_position_c[1])
            cr.move_to(self.cursor_position_c[0],
                       self.cursor_position_c[1] - cursor_dims[1] / 2)
            cr.line_to(self.cursor_position_c[0],
                       self.cursor_position_c[1] + cursor_dims[1] / 2)
            cr.stroke()
Esempio n. 2
0
 def get_draw_queue(self, width, height, alpha=1.0):
     app = get_app()
     if app.dmf_device:
         d = DrawQueue()
         x, y, device_width, device_height = app.dmf_device.get_bounding_box()
         self.svg_space = CartesianSpace(device_width, device_height,
                 offset=(x, y))
         padding = 20
         if width/device_width < height/device_height:
             drawing_width = width - 2 * padding
             drawing_height = drawing_width * (device_height / device_width)
             drawing_x = padding
             drawing_y = (height - drawing_height) / 2
         else:
             drawing_height = height - 2 * padding
             drawing_width = drawing_height * (device_width / device_height)
             drawing_x = (width - drawing_width) / 2
             drawing_y = padding
         self.drawing_space = CartesianSpace(drawing_width, drawing_height,
             offset=(drawing_x, drawing_y))
         scale = np.array(self.drawing_space.dims) / np.array(
                 self.svg_space.dims)
         d.translate(*self.drawing_space._offset)
         d.scale(*scale)
         d.translate(*(-np.array(self.svg_space._offset)))
         for id, electrode in app.dmf_device.electrodes.iteritems():
             if self.electrode_color.keys().count(id):
                 r, g, b = self.electrode_color[id]
                 self.draw_electrode(electrode, d, (b, g, r, alpha))
         return d
 def draw_b(self, cairo_context):
     x, y, w, h = self.area_a.get_allocation()
     padding = (self.padding * w, self.padding * h)
     self.space_b = CartesianSpace(w - 2 * padding[0],
                                   h - 2 * padding[1],
                                   offset=(padding[0], padding[1]))
     cairo_context.rectangle(*(self.space_b._offset + self.space_b.dims))
     cairo_context.stroke()
Esempio n. 4
0
class DmfDeviceView(GtkVideoView):
    '''
    Slave view for DMF device view.

    This view contains a canvas where video is overlayed with a
    graphical rendering of the device.  The video can optionally be
    registered to align it to the device rendering.  The signal
    'transform-changed' is emitted whenever a video registration has
    been completed.

    The signal 'channel-state-changed' is emitted whenever the state of
    a channel has changed as a result of interaction with the device
    view.
    '''
    builder_path = base_path().joinpath('gui', 'glade',
                                        'dmf_device_view.glade')

    gsignal('channel-state-changed', object)
    gsignal('transform-changed', object)

    def __init__(self, dmf_device_controller, name):
        self.controller = dmf_device_controller
        self.last_frame_time = datetime.now()
        self.last_frame = None
        self.video_offset = (0, 0)
        self.display_offset = (0, 0)
        self.electrode_color = {}
        self.background = None
        self.overlay_opacity = None
        self.pixmap = None
        self._proxy = None
        self._set_window_title = False

        self.svg_space = None
        self.view_space = None
        self.drawing_space = None

        self.popup = ElectrodeContextMenu(self)
        self.popup.connect('registration-request', self.on_register)
        self.force_aspect_ratio = False
        self.sink = None
        self.window_xid = None
        SlaveView.__init__(self)

    def create_ui(self, *args, **kwargs):
        self.widget = self.device_area

    def grab_frame(self):
        #return self.play_bin.grab_frame()
        return None

    def update_draw_queue(self):
        if self.window_xid and self._proxy:
            if self.controller.video_enabled:
                overlay_opacity = self.overlay_opacity / 100.
            else:
                overlay_opacity = 1.
            x, y, width, height = self.device_area.get_allocation()
            draw_queue = self.get_draw_queue(width, height, overlay_opacity)
            self._proxy.set_draw_queue(draw_queue)

    def get_draw_queue(self, width, height, alpha=1.0):
        app = get_app()
        if app.dmf_device:
            d = DrawQueue()
            x, y, device_width, device_height = app.dmf_device.get_bounding_box()
            self.svg_space = CartesianSpace(device_width, device_height,
                    offset=(x, y))
            padding = 20
            if width/device_width < height/device_height:
                drawing_width = width - 2 * padding
                drawing_height = drawing_width * (device_height / device_width)
                drawing_x = padding
                drawing_y = (height - drawing_height) / 2
            else:
                drawing_height = height - 2 * padding
                drawing_width = drawing_height * (device_width / device_height)
                drawing_x = (width - drawing_width) / 2
                drawing_y = padding
            self.drawing_space = CartesianSpace(drawing_width, drawing_height,
                offset=(drawing_x, drawing_y))
            scale = np.array(self.drawing_space.dims) / np.array(
                    self.svg_space.dims)
            d.translate(*self.drawing_space._offset)
            d.scale(*scale)
            d.translate(*(-np.array(self.svg_space._offset)))
            for id, electrode in app.dmf_device.electrodes.iteritems():
                if self.electrode_color.keys().count(id):
                    r, g, b = self.electrode_color[id]
                    self.draw_electrode(electrode, d, (b, g, r, alpha))
            return d

    def draw_electrode(self, electrode, cr, color=None):
        p = electrode.path
        cr.save()
        if color is None:
            color = [v / 255. for v in p.color]
        if len(color) < 4:
            color += [1.] * (len(color) - 4)
        cr.set_source_rgba(*color)
        for loop in p.loops:
            cr.move_to(*loop.verts[0])
            for v in loop.verts[1:]:
                cr.line_to(*v)
            cr.close_path()
            cr.fill()
        cr.restore()

    def _initialize_video(self, device, caps_str, bitrate=None,
                          record_path=None):
        # Connect to JSON-RPC server and request to run the pipeline
        self._proxy = WindowServiceProxy(59000)
        self._proxy.window_xid(self.window_xid)

        x, y, width, height = self.widget.get_allocation()
        draw_queue = self.get_draw_queue(width, height)
        self._proxy.create(device, caps_str, bitrate=bitrate,
                           record_path=record_path, draw_queue=draw_queue,
                           with_scale=True, with_warp=True)
        self._proxy.scale(width, height)
        self._proxy.start()
        self.update_draw_queue()

    def destroy_video_proxy(self):
        if self._proxy is not None:
            print '[destroy_video_proxy]'
            try:
                self._proxy.stop()
                print '  \->SUCCESS'
            except:
                print '  \->ERROR'
                import traceback
                traceback.print_exc()
            finally:
                self._proxy.close()
                self._proxy = None
                print '  --- CLOSED ---'

    def on_device_area__realize(self, widget, *args):
        self.on_realize(widget)

    def on_device_area__size_allocate(self, *args):
        '''
        Called when the device DrawingArea widget has been realized.

        Here, we need to reset the CartesianSpace instance representing
        the drawing area.
        '''
        x, y, width, height = self.device_area.get_allocation()
        self.view_space = CartesianSpace(width, height)

    def on_device_area__destroy(self, *args):
        self.destroy_video_proxy()

    def get_clicked_electrode(self, event):
        app = get_app()
        if self.svg_space and self.drawing_space:
            # Get the click coordinates, normalized to the bounding box of the
            # DMF device drawing (NOT the entire device drawing area)
            normalized_coords = self.drawing_space.normalized_coords(
                    *event.get_coords())
            # Conduct a point query in the SVG space to see which electrode (if
            # any) was clicked.  Note that the normalized coordinates are
            # translated to get the coordinates relative to the SVG space.
            shape = app.dmf_device.body_group.space.point_query_first(
                    self.svg_space.translate_normalized(*normalized_coords))
            if shape:
                return app.dmf_device.get_electrode_from_body(shape.body)
        return None

    def on_device_area__button_press_event(self, widget, event):
        '''
        Modifies state of channel based on mouse-click.
        '''
        self.widget.grab_focus()
        # Determine which electrode was clicked (if any)
        electrode = self.get_clicked_electrode(event)
        if electrode:
            self.on_electrode_click(electrode, event)
        return True

    def on_electrode_click(self, electrode, event):
        options = self.controller.get_step_options()
        state = options.state_of_channels
        if event.button == 1:
            if len(electrode.channels):
                for channel in electrode.channels:
                    if state[channel] > 0:
                        state[channel] = 0
                    else:
                        state[channel] = 1
                self.emit('channel-state-changed', electrode.channels[:])
            else:
                logger.error("No channel assigned to electrode.")
        elif event.button == 3:
            self.popup.popup(state, electrode, event.button, event.time,
                    register_enabled=self.controller.video_enabled)
        return True

    def on_register(self, *args, **kwargs):
        if self._proxy is not None:
            self._proxy.request_frame()
            def process_frame(self):
                #draw_queue = self.get_draw_queue(*self.view_space.dims)
                frame = self._proxy.get_frame()
                if frame is not None:
                    cv_im = cv.CreateMat(frame.shape[0], frame.shape[1], cv.CV_8UC3)
                    cv.SetData(cv_im, frame.tostring(), frame.shape[1] * frame.shape[2])
                    cv_scaled = cv.CreateMat(500, 600, cv.CV_8UC3)
                    cv.Resize(cv_im, cv_scaled)
                    self._on_register_frame_grabbed(cv_scaled)
                    return False
                return True
            gtk.timeout_add(10, process_frame, self)

    def _on_register_frame_grabbed(self, cv_img):
        x, y, width, height = self.device_area.get_allocation()
        # Create a cairo surface to draw device on
        surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, width, height)
        cr = cairo.Context(surface)
        draw_queue = self.get_draw_queue(width, height)
        draw_queue.render(cr)

        size = (width, height)
        # Write cairo surface to cv image in RGBA format
        alpha_image = cv.CreateImageHeader(size, cv.IPL_DEPTH_8U, 4)
        cv.SetData(alpha_image, surface.get_data(), 4 * width)

        # Convert RGBA image (alpha_image) to RGB image (device_image)
        device_image = cv.CreateImage(size, cv.IPL_DEPTH_8U, 3)
        cv.CvtColor(alpha_image, device_image, cv.CV_RGBA2RGB)

        video_image = cv.CreateImage(size, cv.IPL_DEPTH_8U, 3)

        cv.Resize(cv_img, video_image)

        def do_device_registration():
            # Since this function may have been called from outside the main
            # thread, we need to surround GTK code with threads_enter/leave()
            dialog = DeviceRegistrationDialog(device_image, video_image)
            results = dialog.run()
            if results:
                array = np.fromstring(results.tostring(), dtype='float32',
                        count=results.width * results.height)
                # If the transform matrix is the default, set it to the
                # identity matrix.  This will simply reset the transform.
                if array.flatten()[-1] == 1 and array.sum() == 1:
                    array = np.identity(results.width, dtype=np.float32)
                array.shape = (results.width, results.height)
                self.emit('transform-changed', array)
            return False
        gtk.threads_enter()
        do_device_registration()
        gtk.threads_leave()

    def on_device_area__key_press_event(self, widget, data=None):
        pass