class GTKGStreamerWindow(object):
    with WindowServiceProxy(port=59000) as w:
        video_mode_map = w.get_video_mode_map()
        video_mode_keys = sorted(video_mode_map.keys())
        device_key, devices = w.get_video_source_configs()

    if not video_mode_keys:
        raise DeviceNotFound

    def __init__(self):
        window = gtk.Window(gtk.WINDOW_TOPLEVEL)
        window.set_title("Mpeg2-Player")
        window.set_default_size(640, 500)
        window.connect("destroy", self.on_destroy)
        vbox = gtk.VBox()
        window.add(vbox)
        hbox = gtk.HBox()
        vbox.pack_start(hbox, expand=False)

        video_mode_enum = Enum.named('video_mode').valued(*self.video_mode_keys)
        form = Form.of(
            video_mode_enum.using(default=self.video_mode_keys[0]),
            Filepath.named('output_path').using(default=''),
            Integer.named('bitrate').using(default=150, validators=[ValueAtLeast(
                    minimum=25)], properties={'step': 25,
                            'label': 'Bitrate (KB/s)', }),
            String.named('transform_string').using(default='1,0,0,0,1,0,0,0,1'),
            Boolean.named('draw_cairo').using(default=False),
        )
        self.video_mode_form_view = create_form_view(form)
        for field in ['video_mode', 'output_path', 'bitrate',
                'transform_string', 'draw_cairo']:
            setattr(self, '%s_field' % field, self.video_mode_form_view.form\
                    .fields[field])
        self.video_mode_field.proxy.connect('changed', self._on_mode_changed)
        self.video_source = None
        hbox.add(self.video_mode_form_view.widget)
        self.button = gtk.Button("Start")
        hbox.pack_start(self.button, False)
        self.button.connect("clicked", self.start_stop)
        self.aframe = gtk.AspectFrame(xalign=0.5, yalign=1.0, ratio=4.0 / 3.0,
                obey_child=False)

        self.pipeline = None
        self._proxy = None

        vbox.pack_start(self.aframe, expand=True)
        self.movie_view = GtkVideoView()
        self.movie_window = self.movie_view.widget
        self.aframe.add(self.movie_window)
        window.show_all()
        self.window = window

    @property
    def transform_str(self):
        transform_string = self.video_mode_form_view.form\
                .fields['transform_string'].element.value
        data = [float(v) for v in transform_string.split(',')]
        if len(data) != 9:
            print '''
                Transform string must be 9 comma-separated floats'''.strip()
            return '1,0,0,0,1,0,0,0,1'
        return ','.join(['{}'.format(v) for v in data])

    @property
    def draw_cairo(self):
        return self.video_mode_form_view.form.fields['draw_cairo'].element.value

    @property
    def bitrate(self):
        return (self.video_mode_form_view.form.fields['bitrate'].element.value << 13)

    @property
    def output_path(self):
        return self.video_mode_form_view.form.fields['output_path'].element.value

    @property
    def video_settings(self):
        return self.video_mode_form_view.form.fields['video_mode'].element.value

    def _on_mode_changed(self, *args):
        self.video_settings = self.video_mode_form_view.form.fields[
                'video_mode'].element.value

    @video_settings.setter
    def video_settings(self, value):
        self._video_settings = value

    def get_video_device_and_caps_str(self):
        selected_mode = self.video_mode_map[self.video_settings]
        caps_str = GstVideoSourceManager.get_caps_string(selected_mode)
        return (str(selected_mode['device']), caps_str)

    def start_stop(self, w):
        if self.button.get_label() == "Start":
            self.start()
        else:
            self.stop()

    def start(self):
        if not self.output_path:
            error('Please select a valid output filepath.')
            return
        self.movie_window.set_size_request(640, 480)
        self.aframe.show_all()

        # Use GStreamer WindowServiceProxy to control GStreamer video
        # pipeline.  Behind the scenes, it runs GStreamer in a separate
        # process (subprocess.Popen), exposed through a JSON-RPC
        # interface.
        # There are issues with the GTK gui freezing when the
        # GStreamer pipeline is started here directly.
        from pygst_utils.elements.draw_queue import get_example_draw_queue
        if self.draw_cairo:
            print 'using draw_queue'
            x, y, width, height = self.movie_window.get_allocation()
            draw_queue = get_example_draw_queue(width, height)
        else:
            print 'NOT using draw_queue'
            draw_queue = None
        self._proxy = WindowServiceProxy(port=59000)

        try:
            self._proxy.window_xid(self.movie_view.window_xid)
            device, caps_str = self.get_video_device_and_caps_str()
            self._proxy.create(device, caps_str, record_path=self.output_path,
                    bitrate=self.bitrate, draw_queue=draw_queue, with_warp=True,
                    with_scale=True)
            self._proxy.set_warp_transform(self.transform_str)
            self._proxy.start()
            self._proxy.scale(width, height)
        except (Exception, ), why:
            print why
            self.stop()
            return

        self.video_mode_field.proxy.widget.set_button_sensitivity(gtk.SENSITIVITY_OFF)
        self.transform_string_field.widget.set_sensitive(False)
        self.output_path_field.widget.set_sensitive(False)
        self.bitrate_field.widget.set_sensitive(False)

        self.button.set_label("Stop")
Example #2
0
class DmfDeviceView(GtkVideoView):
    '''
    Slave view for DMF device view.

    This view contains a canvas where video is overlayed with a
    graphical rendering of the device.  The video can optionally be
    registered to align it to the device rendering.  The signal
    'transform-changed' is emitted whenever a video registration has
    been completed.

    The signal 'channel-state-changed' is emitted whenever the state of
    a channel has changed as a result of interaction with the device
    view.
    '''
    builder_path = base_path().joinpath('gui', 'glade',
                                        'dmf_device_view.glade')

    gsignal('channel-state-changed', object)
    gsignal('transform-changed', object)

    def __init__(self, dmf_device_controller, name):
        self.controller = dmf_device_controller
        self.last_frame_time = datetime.now()
        self.last_frame = None
        self.video_offset = (0, 0)
        self.display_offset = (0, 0)
        self.electrode_color = {}
        self.background = None
        self.overlay_opacity = None
        self.pixmap = None
        self._proxy = None
        self._set_window_title = False

        self.svg_space = None
        self.view_space = None
        self.drawing_space = None

        self.popup = ElectrodeContextMenu(self)
        self.popup.connect('registration-request', self.on_register)
        self.force_aspect_ratio = False
        self.sink = None
        self.window_xid = None
        SlaveView.__init__(self)

    def create_ui(self, *args, **kwargs):
        self.widget = self.device_area

    def grab_frame(self):
        #return self.play_bin.grab_frame()
        return None

    def update_draw_queue(self):
        if self.window_xid and self._proxy:
            if self.controller.video_enabled:
                overlay_opacity = self.overlay_opacity / 100.
            else:
                overlay_opacity = 1.
            x, y, width, height = self.device_area.get_allocation()
            draw_queue = self.get_draw_queue(width, height, overlay_opacity)
            self._proxy.set_draw_queue(draw_queue)

    def get_draw_queue(self, width, height, alpha=1.0):
        app = get_app()
        if app.dmf_device:
            d = DrawQueue()
            x, y, device_width, device_height = app.dmf_device.get_bounding_box()
            self.svg_space = CartesianSpace(device_width, device_height,
                    offset=(x, y))
            padding = 20
            if width/device_width < height/device_height:
                drawing_width = width - 2 * padding
                drawing_height = drawing_width * (device_height / device_width)
                drawing_x = padding
                drawing_y = (height - drawing_height) / 2
            else:
                drawing_height = height - 2 * padding
                drawing_width = drawing_height * (device_width / device_height)
                drawing_x = (width - drawing_width) / 2
                drawing_y = padding
            self.drawing_space = CartesianSpace(drawing_width, drawing_height,
                offset=(drawing_x, drawing_y))
            scale = np.array(self.drawing_space.dims) / np.array(
                    self.svg_space.dims)
            d.translate(*self.drawing_space._offset)
            d.scale(*scale)
            d.translate(*(-np.array(self.svg_space._offset)))
            for id, electrode in app.dmf_device.electrodes.iteritems():
                if self.electrode_color.keys().count(id):
                    r, g, b = self.electrode_color[id]
                    self.draw_electrode(electrode, d, (b, g, r, alpha))
            return d

    def draw_electrode(self, electrode, cr, color=None):
        p = electrode.path
        cr.save()
        if color is None:
            color = [v / 255. for v in p.color]
        if len(color) < 4:
            color += [1.] * (len(color) - 4)
        cr.set_source_rgba(*color)
        for loop in p.loops:
            cr.move_to(*loop.verts[0])
            for v in loop.verts[1:]:
                cr.line_to(*v)
            cr.close_path()
            cr.fill()
        cr.restore()

    def _initialize_video(self, device, caps_str, bitrate=None,
                          record_path=None):
        # Connect to JSON-RPC server and request to run the pipeline
        self._proxy = WindowServiceProxy(59000)
        self._proxy.window_xid(self.window_xid)

        x, y, width, height = self.widget.get_allocation()
        draw_queue = self.get_draw_queue(width, height)
        self._proxy.create(device, caps_str, bitrate=bitrate,
                           record_path=record_path, draw_queue=draw_queue,
                           with_scale=True, with_warp=True)
        self._proxy.scale(width, height)
        self._proxy.start()
        self.update_draw_queue()

    def destroy_video_proxy(self):
        if self._proxy is not None:
            print '[destroy_video_proxy]'
            try:
                self._proxy.stop()
                print '  \->SUCCESS'
            except:
                print '  \->ERROR'
                import traceback
                traceback.print_exc()
            finally:
                self._proxy.close()
                self._proxy = None
                print '  --- CLOSED ---'

    def on_device_area__realize(self, widget, *args):
        self.on_realize(widget)

    def on_device_area__size_allocate(self, *args):
        '''
        Called when the device DrawingArea widget has been realized.

        Here, we need to reset the CartesianSpace instance representing
        the drawing area.
        '''
        x, y, width, height = self.device_area.get_allocation()
        self.view_space = CartesianSpace(width, height)

    def on_device_area__destroy(self, *args):
        self.destroy_video_proxy()

    def get_clicked_electrode(self, event):
        app = get_app()
        if self.svg_space and self.drawing_space:
            # Get the click coordinates, normalized to the bounding box of the
            # DMF device drawing (NOT the entire device drawing area)
            normalized_coords = self.drawing_space.normalized_coords(
                    *event.get_coords())
            # Conduct a point query in the SVG space to see which electrode (if
            # any) was clicked.  Note that the normalized coordinates are
            # translated to get the coordinates relative to the SVG space.
            shape = app.dmf_device.body_group.space.point_query_first(
                    self.svg_space.translate_normalized(*normalized_coords))
            if shape:
                return app.dmf_device.get_electrode_from_body(shape.body)
        return None

    def on_device_area__button_press_event(self, widget, event):
        '''
        Modifies state of channel based on mouse-click.
        '''
        self.widget.grab_focus()
        # Determine which electrode was clicked (if any)
        electrode = self.get_clicked_electrode(event)
        if electrode:
            self.on_electrode_click(electrode, event)
        return True

    def on_electrode_click(self, electrode, event):
        options = self.controller.get_step_options()
        state = options.state_of_channels
        if event.button == 1:
            if len(electrode.channels):
                for channel in electrode.channels:
                    if state[channel] > 0:
                        state[channel] = 0
                    else:
                        state[channel] = 1
                self.emit('channel-state-changed', electrode.channels[:])
            else:
                logger.error("No channel assigned to electrode.")
        elif event.button == 3:
            self.popup.popup(state, electrode, event.button, event.time,
                    register_enabled=self.controller.video_enabled)
        return True

    def on_register(self, *args, **kwargs):
        if self._proxy is not None:
            self._proxy.request_frame()
            def process_frame(self):
                #draw_queue = self.get_draw_queue(*self.view_space.dims)
                frame = self._proxy.get_frame()
                if frame is not None:
                    cv_im = cv.CreateMat(frame.shape[0], frame.shape[1], cv.CV_8UC3)
                    cv.SetData(cv_im, frame.tostring(), frame.shape[1] * frame.shape[2])
                    cv_scaled = cv.CreateMat(500, 600, cv.CV_8UC3)
                    cv.Resize(cv_im, cv_scaled)
                    self._on_register_frame_grabbed(cv_scaled)
                    return False
                return True
            gtk.timeout_add(10, process_frame, self)

    def _on_register_frame_grabbed(self, cv_img):
        x, y, width, height = self.device_area.get_allocation()
        # Create a cairo surface to draw device on
        surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, width, height)
        cr = cairo.Context(surface)
        draw_queue = self.get_draw_queue(width, height)
        draw_queue.render(cr)

        size = (width, height)
        # Write cairo surface to cv image in RGBA format
        alpha_image = cv.CreateImageHeader(size, cv.IPL_DEPTH_8U, 4)
        cv.SetData(alpha_image, surface.get_data(), 4 * width)

        # Convert RGBA image (alpha_image) to RGB image (device_image)
        device_image = cv.CreateImage(size, cv.IPL_DEPTH_8U, 3)
        cv.CvtColor(alpha_image, device_image, cv.CV_RGBA2RGB)

        video_image = cv.CreateImage(size, cv.IPL_DEPTH_8U, 3)

        cv.Resize(cv_img, video_image)

        def do_device_registration():
            # Since this function may have been called from outside the main
            # thread, we need to surround GTK code with threads_enter/leave()
            dialog = DeviceRegistrationDialog(device_image, video_image)
            results = dialog.run()
            if results:
                array = np.fromstring(results.tostring(), dtype='float32',
                        count=results.width * results.height)
                # If the transform matrix is the default, set it to the
                # identity matrix.  This will simply reset the transform.
                if array.flatten()[-1] == 1 and array.sum() == 1:
                    array = np.identity(results.width, dtype=np.float32)
                array.shape = (results.width, results.height)
                self.emit('transform-changed', array)
            return False
        gtk.threads_enter()
        do_device_registration()
        gtk.threads_leave()

    def on_device_area__key_press_event(self, widget, data=None):
        pass