Esempio n. 1
0
 def _consume_done(self, config, ghostPad, recv_rtp_socket,
                   recv_rtcp_socket):
     Gst.info('%s _consume_done %s' % (self.name, config))
     #
     desc = getConsumerPipelineDesc(config)
     Gst.debug('%s _produce_done desc=%s' % (self.name, desc))
     bin = Gst.parse_bin_from_description(desc, False)
     self.add(bin)
     rtpbin = bin.get_by_name('rtpbin')
     # setup sockets
     bin.get_by_name('rtp_udpsrc').set_property('socket', recv_rtp_socket)
     bin.get_by_name('rtcp_udpsrc').set_property('socket', recv_rtcp_socket)
     #
     # bin.set_state(Gst.State.PAUSED)
     # link ghost pad
     src_pad = bin.get_by_name('sink').get_static_pad('src')
     tmp_pad = ghostPad.get_target()
     ghostPad.set_target(src_pad)
     self.remove_pad(tmp_pad)
     #
     bin.set_state(Gst.State.PLAYING)
     #
     self.emit('consumer-added', config['consumerId'])
     #
     self.mediasoup.resumeConsumer(config['transportId'],
                                   config['consumerId'],
                                   self._resume_consumer_done,
                                   self._on_error, config)
Esempio n. 2
0
    def do_start(self) -> bool:
        Gst.info("Starting")
        try:
            self.image_acquirer = ImageAcquirer()

            if not self.image_acquirer.init_device(
                    device_serial=self.serial,
                    device_index=(0 if self.serial is None else None),
            ):
                error_string = "Camera not found"
                self.post_message(
                    Gst.Message.new_error(self, GLib.Error(error_string),
                                          error_string))
                return False

            if not self.apply_properties_to_cam():
                error_string = "Camera settings could not be applied"
                self.post_message(
                    Gst.Message.new_error(self, GLib.Error(error_string),
                                          error_string))
                return False

            self.camera_caps = self.get_camera_caps()

        except Exception as ex:
            Gst.error(f"Error: {ex}")
            self.post_message(
                Gst.Message.new_error(self, GLib.Error(str(ex)), str(ex)))
            return False
        return True
Esempio n. 3
0
 def execute_cam_node(self, node_name: str, log_execution: bool = True):
     try:
         self.image_acquirer.execute_node(node_name)
         if log_execution:
             Gst.info(f"{node_name} executed")
     except (ValueError, NotImplementedError) as ex:
         Gst.warning(f"Warning: {ex}")
Esempio n. 4
0
    def do_transform_ip(self, buffer_out):
        Gst.info("timestamp(buffer):%s" % (Gst.TIME_ARGS(buffer_out.pts)))
        
        # Get Frame dimensions
        config = buffer_out.pool.get_config()
        caps = config['caps']
        struct = caps.get_structure(0)
        (ok, width) = struct.get_int('width')
        if not ok:
            raise RuntimeError("Failed to get width")
        
        (ok, height) = struct.get_int('height')
        if not ok:
            raise RuntimeError("Failed to get height")

        mo = buffer_out.mini_object
        saved_refcount = mo.refcount
        mo.refcount = 1

        # for GObject instances, hash() returns the pointer to the C struct
        pbuffer = hash(buffer_out)
        mapping = _GstMapInfo()
        success = _libgst.gst_buffer_map(pbuffer, mapping, Gst.MapFlags.WRITE)
        if not success:
            raise RuntimeError("Could not map buffer")
        else:
            ctypes_region = ctypes.cast(mapping.data,
                    ctypes.POINTER(ctypes.c_byte * mapping.size))
            raw_ptr = ctypes_region.contents

            # create ctypes array from the raw pointer
            ptr = (ctypes.c_byte * mapping.size).from_address(ctypes.addressof(raw_ptr))

            # cast array to uint32 to work with RGBA/BGRx data
            # FIXME: hardcode caps to only allow 4-byte modes?
            ptr = ctypes.cast(ptr, ctypes.POINTER(ctypes.c_uint32))
            np_arr = np.ctypeslib.as_array(ptr, shape=(height, width))

            # this is fast, just one call into numpy and proably vectorized in C
            # if image is BGRx, then this will erase the red channel
            np_arr &= 0xff00ffff

            # invert colors for top quarter rows
            for i in range(height // 4):
                np_arr[i] ^= 0x00ffffff

            # this is SLOOOOW, multiple calls into numpy for each element
            # however, if performance is not a concern
            #(for example, processing video offline), you can add
            # sync=false to the last pipeline element (sink)
            # to avoid the "gst_base_sink_is_too_late" error
            #for i in range(height):
            #    for j in range(width):
            #        np_arr[i][j] //= 2

            _libgst.gst_buffer_unmap(pbuffer, mapping)

        mo.refcount = saved_refcount
        return Gst.FlowReturn.OK
    def do_transform_ip(self, buffer: Gst.Buffer) -> Gst.FlowReturn:
        # DO SOMETHING
        info_str = f"{Gst.TIME_ARGS(buffer.pts)}: int-prop: {self.int_prop}, float-prop: {self.float_prop} "
        info_str += f"bool-prop: {self.bool_prop}, str-prop: {self.str_prop}, pyobject-prop: {self.pyobject_prop}"
        Gst.info(info_str)
        # *****************

        return Gst.FlowReturn.OK
Esempio n. 6
0
 def do_stop(self) -> bool:
     Gst.info("Stopping")
     try:
         self.image_acquirer.end_acquisition()
         self.image_acquirer = None
     except Exception as ex:
         Gst.error(f"Error: {ex}")
     return True
Esempio n. 7
0
	def _sinkl_event(self, pad, parent, event):
		Gst.debug("event %s" % event)
		Gst.debug("event type %s" % event.type)
		if event.type == Gst.EventType.CAPS:
			caps = event.parse_caps()
			Gst.info("event caps %s" % caps)
			return self.setcaps_srcv(parent, caps)
		return self.srcvpad.push_event(event)
Esempio n. 8
0
	def _srcv_event(self, pad, parent, event):
		Gst.debug("event %s" % event)
		Gst.debug("event type %s" % event.type)
		if event.type == Gst.EventType.QOS:
			info = event.parse_qos()
			Gst.info("QOS %s" % (str(info)))
		else:
			return self.sinklpad.push_event(event) and self.sinkrpad.push_event(event)
Esempio n. 9
0
    def apply_properties_to_cam(self) -> bool:
        Gst.info("Applying properties")
        try:
            self.set_cam_node_val("UserSetSelector", self.user_set)
            self.execute_cam_node("UserSetLoad")

            self.set_cam_node_val("StreamBufferHandlingMode", "OldestFirst")
            self.set_cam_node_val("StreamBufferCountMode", "Manual")
            self.set_cam_node_val("StreamBufferCountManual",
                                  self.num_cam_buffers)

            # Configure Camera Properties
            if self.h_binning > 1:
                self.set_cam_node_val("BinningHorizontal", self.h_binning)

            if self.v_binning > 1:
                self.set_cam_node_val("BinningVertical", self.v_binning)

            if self.exposure_time >= 0:
                self.set_cam_node_val("ExposureAuto", "Off")
                self.set_cam_node_val("ExposureTime", self.exposure_time)
            elif self.auto_exposure:
                self.set_cam_node_val("ExposureAuto", "Continuous")
            else:
                self.set_cam_node_val("ExposureAuto", "Off")

            if self.gain >= 0:
                self.set_cam_node_val("GainAuto", "Off")
                self.set_cam_node_val("Gain", self.gain)
            elif self.auto_gain:
                self.set_cam_node_val("GainAuto", "Continuous")
            else:
                self.set_cam_node_val("GainAuto", "Off")

            if self.cam_node_available("BalanceWhiteAuto"):
                manual_wb = False
                if self.wb_blue >= 0:
                    self.set_cam_node_val("BalanceWhiteAuto", "Off")
                    self.set_cam_node_val("BalanceRatioSelector", "Blue")
                    self.set_cam_node_val("BalanceRatio", self.wb_blue)
                    manual_wb = True

                if self.wb_red >= 0:
                    self.set_cam_node_val("BalanceWhiteAuto", "Off")
                    self.set_cam_node_val("BalanceRatioSelector", "Red")
                    self.set_cam_node_val("BalanceRatio", self.wb_red)
                    manual_wb = True

                if self.auto_wb and not manual_wb:
                    self.set_cam_node_val("BalanceWhiteAuto", "Continuous")
                else:
                    self.set_cam_node_val("BalanceWhiteAuto", "Off")

        except Exception as ex:
            Gst.error(f"Error: {ex}")
            return False

        return True
Esempio n. 10
0
    def _produce_done(self, config, ghostPad):
        Gst.info('%s _produce_done %s' % (self.name, config))
        #
        desc = getProducerPipelineDesc(config)
        Gst.info('%s _produce_done desc=%s' % (self.name, desc))
        bin = Gst.parse_bin_from_description(desc, False)
        self.add(bin)
        #
        # handle time display
        if self.clock_overlay:
            clock_overlay = bin.get_by_name('clock_overlay')

            def on_v_encoder_buffer(pad, info):
                clock_overlay.set_property(
                    'text',
                    datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S.%f UTC'))
                return Gst.PadProbeReturn.OK

            clock_overlay.get_static_pad('video_sink').add_probe(
                Gst.PadProbeType.BUFFER, on_v_encoder_buffer)
        #
        bin.set_state(Gst.State.PAUSED)
        # create a udpsrc element with the same rtcp_udpsink socket
        rtcp_socket = bin.get_by_name('rtcp_udpsink').get_property(
            'used-socket')
        rtcp_udpsrc = Gst.ElementFactory.make('udpsrc')
        rtcp_udpsrc.set_property('name', 'rtcp_udpsrc')
        rtcp_udpsrc.set_property('socket', rtcp_socket)
        bin.add(rtcp_udpsrc)
        #recv_rtcp_sink_0 = rtpbin.get_request_pad('recv_rtcp_sink_0')
        #rtcp_udpsrc.get_static_pad('src').link(recv_rtcp_sink_0)
        #
        # rtcp_udpsrc -> tee --recv_rtcp_sink_0
        #                    `-recv_rtcp_sink_1
        #                    `-recv_rtcp_sink_2
        rtcp_udpsrc_tee = Gst.ElementFactory.make('tee')
        bin.add(rtcp_udpsrc_tee)
        rtcp_udpsrc.link(rtcp_udpsrc_tee)
        #
        rtpbin = bin.get_by_name('rtpbin')
        recv_rtcp_sink_0 = rtpbin.get_request_pad('recv_rtcp_sink_0')
        rtcp_udpsrc_tee.get_request_pad('src_0').link(recv_rtcp_sink_0)
        if config.get('simulcast', False):
            recv_rtcp_sink_1 = rtpbin.get_request_pad('recv_rtcp_sink_1')
            rtcp_udpsrc_tee.get_request_pad('src_1').link(recv_rtcp_sink_1)
            recv_rtcp_sink_2 = rtpbin.get_request_pad('recv_rtcp_sink_2')
            rtcp_udpsrc_tee.get_request_pad('src_2').link(recv_rtcp_sink_2)
        # link source ghost pad
        sink_pad = bin.get_by_name('src').get_static_pad('sink')
        tmp_pad = ghostPad.get_target()
        ghostPad.set_target(sink_pad)
        self.remove_pad(tmp_pad)
        #
        bin.set_state(Gst.State.PLAYING)
        #
        self.emit('producer-added', config['producerId'])
Esempio n. 11
0
    def do_get_caps(self, filter: Gst.Caps) -> Gst.Caps:
        Gst.info("Get Caps")
        caps = None

        if self.camera_caps is not None:
            caps = Gst.Caps.copy(self.camera_caps)
        else:
            caps = Gst.Caps.new_any()

        Gst.info(f"Avaliable caps: {caps.to_string()}")
        return caps
Esempio n. 12
0
 def set_cam_node_val(self, node_name: str, value, log_value: bool = True):
     try:
         self.image_acquirer.set_node_val(node_name, value)
         if log_value:
             node_value = self.image_acquirer.get_node_val(node_name)
             if node_value == value:
                 Gst.info(f"{node_name}: {node_value}")
             else:
                 Gst.info(
                     f"{node_name}: requested={value}, actual={node_value}")
     except (ValueError, NotImplementedError) as ex:
         Gst.warning(f"Warning: {ex}")
Esempio n. 13
0
    def do_transform_ip(self, buffer_out):
        Gst.info("timestamp(buffer):%s" % (Gst.TIME_ARGS(buffer_out.pts)))
        if self.__mxnet_detector__ is None:
            self.__mxnet_detector__ = self.initializeDetector()

        # Get Frame dimensions
        config = buffer_out.pool.get_config()
        caps = config['caps']
        struct = caps.get_structure(0)
        (ok, width) = struct.get_int('width')
        if not ok:
            raise RuntimeError("Failed to get width")

        (ok, height) = struct.get_int('height')
        if not ok:
            raise RuntimeError("Failed to get height")

        mo = buffer_out.mini_object
        saved_refcount = mo.refcount
        mo.refcount = 1

        # for GObject instances, hash() returns the pointer to the C struct
        pbuffer = hash(buffer_out)
        mapping = _GstMapInfo()
        success = _libgst.gst_buffer_map(pbuffer, mapping, Gst.MapFlags.WRITE)
        if not success:
            raise RuntimeError("Could not map buffer")
        else:
            ctypes_region = ctypes.cast(
                mapping.data, ctypes.POINTER(ctypes.c_byte * mapping.size))
            raw_ptr = ctypes_region.contents

            # create ctypes array from the raw pointer
            ptr = (ctypes.c_byte * mapping.size).from_address(
                ctypes.addressof(raw_ptr))

            # cast array to uint32 to work with RGBA/BGRx data
            # FIXME: hardcode caps to only allow 4-byte modes?
            ptr = ctypes.cast(ptr, ctypes.POINTER(ctypes.c_uint8))
            np_arr = np.ctypeslib.as_array(ptr, shape=(height, width, 4))

            cv_img = cv2.imdecode(np_arr, cv2.IMREAD_UNCHANGED)
            #self.__mxnet_detector__.process(cv_img, np_arr)
            self.__mxnet_detector__.process(np_arr, np_arr)

            if False:
                cv2.rectangle(np_arr, (10, 10), (100, 100), (0, 255, 0), 4)

            _libgst.gst_buffer_unmap(pbuffer, mapping)

        mo.refcount = saved_refcount
        return Gst.FlowReturn.OK
Esempio n. 14
0
    def chainfunc(self, pad: Gst.Pad, parent,
                  buffer: Gst.Buffer) -> Gst.FlowReturn:
        """
        :param parent: GstPluginPy
        """

        # DO SOMETHING
        info_str = f"{Gst.TIME_ARGS(buffer.pts)}: int-prop: {self.int_prop}, float-prop: {self.float_prop} "
        info_str += f"bool-prop: {self.bool_prop}, str-prop: {self.str_prop}, pyobject-prop: {self.pyobject_prop}"
        Gst.info(info_str)
        # *****************

        return self.srcpad.push(buffer)
Esempio n. 15
0
 def do_set_property(self, prop: GObject.GParamSpec, value):
     Gst.info(f"Setting {prop.name} = {value}")
     if prop.name == "auto-exposure":
         self.auto_exposure = value
         if self.image_acquirer is not None and self.image_acquirer.is_initialized(
         ):
             self.set_cam_node_val(
                 "ExposureAuto",
                 "Continuous" if self.auto_exposure else "Off")
     elif prop.name == "auto-gain":
         self.auto_gain = value
         if self.image_acquirer is not None and self.image_acquirer.is_initialized(
         ):
             self.set_cam_node_val(
                 "GainAuto", "Continuous" if self.auto_gain else "Off")
     elif prop.name == "exposure":
         self.exposure_time = value
         if self.image_acquirer is not None and self.image_acquirer.is_initialized(
         ):
             self.set_cam_node_val("ExposureTime", self.exposure_time)
     elif prop.name == "gain":
         self.gain = value
         if self.image_acquirer is not None and self.image_acquirer.is_initialized(
         ):
             self.set_cam_node_val("Gain", self.gain)
     elif prop.name == "auto-wb":
         self.auto_wb = value
     elif prop.name == "wb-blue-ratio":
         self.wb_blue = value
     elif prop.name == "wb-red-ratio":
         self.wb_red = value
     elif prop.name == "h-binning":
         self.h_binning = value
     elif prop.name == "v-binning":
         self.v_binning = value
     elif prop.name == "offset-x":
         self.offset_x = value
     elif prop.name == "offset-y":
         self.offset_y = value
     elif prop.name == "center-x":
         self.center_x = value
     elif prop.name == "center-y":
         self.center_y = value
     elif prop.name == "num-image-buffers":
         self.num_cam_buffers = value
     elif prop.name == "serial":
         self.serial = value
     elif prop.name == "user-set":
         self.user_set = value
     else:
         raise AttributeError("unknown property %s" % prop.name)
    def do_can_load_uri(self, uri):
        try:
            if not Gst.uri_is_valid(uri) or Gst.uri_get_protocol(uri) != "file":
                return False
        except GLib.Error as e:
            Gst.error(str(e))
            return False

        if uri.endswith(".xges"):
            return False

        try:
            return otio.adapters.from_filepath(Gst.uri_get_location(uri)) is not None
        except Exception as e:
            Gst.info("Could not load %s -> %s" % (uri, e))
            return False
Esempio n. 17
0
 def chain_function(pad, parent, buf):
     Gst.debug('%s chain_function caps: %s pts: %f' %
               (pad.name, pad.get_current_caps(), buf.pts * 1e-9))
     caps = pad.get_current_caps()
     if caps:
         structure = caps.get_structure(0)
         Gst.info('%s event_function caps=%s' % (pad.name, caps))
         kind = structure.get_name().split('/')[0]
         appData = json.loads(self.app_data)
         # producer
         config = {
             'kind': kind,
             'producerId': structure.get_string('producer-id'),
             'text_overlay': self.text_overlay,
             'time_overlay': self.time_overlay,
             'clock_overlay': self.clock_overlay,
         }
         if kind == 'audio':
             config.update(DEFAULT_AUDIO_CONFIG)
             config['codec'] = self.audio_codec
             config['bitrate'] = self.audio_bitrate
         else:
             config.update(DEFAULT_VIDEO_CONFIG)
             config['server_ip'] = self.server_ip
             config['codec'] = self.video_codec
             config['bitrate'] = self.video_bitrate
             config['hw'] = self.hw
             config['gop'] = self.gop
             config['simulcast'] = self.simulcast
             config['width'] = structure['width']
             config['height'] = structure['height']
             config['framerate'] = structure['framerate'].num or 30
             appData['width'] = config['width']
             appData['height'] = config['height']
             appData['framerate'] = config['framerate']
         appData['maxBitrate'] = config['bitrate']
         self.mediasoup.produce(config, appData, self._produce_done,
                                self._on_error,
                                self._on_producer_removed, ghostPad)
     return Gst.FlowReturn.OK
Esempio n. 18
0
    def do_fixate(self, caps: Gst.Caps) -> Gst.Caps:
        Gst.info("Fixating caps")

        structure = caps.get_structure(0).copy()

        Gst.info(f"Incoming caps: {structure}")

        genicam_format = self.get_cam_node_val("PixelFormat")
        structure.fixate_field_string(
            "format",
            self.get_format_from_genicam(genicam_format).gst)
        self.set_cam_node_val(
            "PixelFormat",
            self.get_format_from_gst(structure.get_value("format")).genicam,
        )

        height = self.get_cam_node_val("Height")
        structure.fixate_field_nearest_int("height", height)
        self.set_cam_node_val("Height", structure.get_value("height"))

        if self.center_y:
            self.set_cam_node_val(
                "OffsetY",
                (self.get_cam_node_range("Height")[1] -
                 self.get_cam_node_val("Height")) // 2,
            )
        else:
            self.set_cam_node_val("OffsetY", self.offset_y)

        width = self.get_cam_node_val("Width")
        structure.fixate_field_nearest_int("width", width)
        self.set_cam_node_val("Width", structure.get_value("width"))

        if self.center_x:
            self.set_cam_node_val(
                "OffsetX",
                (self.get_cam_node_range("Width")[1] -
                 self.get_cam_node_val("Width")) // 2,
            )
        else:
            self.set_cam_node_val("OffsetX", self.offset_x)

        if self.cam_node_available("AcquisitionFrameRateEnable"):
            self.set_cam_node_val("AcquisitionFrameRateEnable", True)
        else:
            self.set_cam_node_val("AcquisitionFrameRateAuto", "Off")
            self.set_cam_node_val("AcquisitionFrameRateEnabled", True)

        frame_rate = self.get_cam_node_val("AcquisitionFrameRate")
        structure.fixate_field_nearest_fraction("framerate", frame_rate, 1)
        self.set_cam_node_val("AcquisitionFrameRate",
                              float(structure.get_value("framerate")), True)

        Gst.info(f"Fixated caps: {structure}")

        new_caps = Gst.Caps.new_empty()
        new_caps.append_structure(structure)

        return new_caps.fixate()
Esempio n. 19
0
 def on_pad_linked(pad, peer):
     caps = pad.peer_query_caps()
     structure = caps.get_structure(0)
     producerId = structure.get_string('producer-id')
     if not producerId:
         Gst.error('producerId not found')
         return
     Gst.info('%s on_pad_linked %s' % (pad.name, caps))
     # create listen rtp/rtcp sockets
     recv_rtp_socket = Gio.Socket.new(Gio.SocketFamily.IPV4,
                                      Gio.SocketType.DATAGRAM,
                                      Gio.SocketProtocol.UDP)
     rtp_socket_address = Gio.InetSocketAddress.new_from_string(
         self.local_ip, 0)
     recv_rtp_socket.bind(rtp_socket_address, False)
     #
     recv_rtcp_socket = Gio.Socket.new(Gio.SocketFamily.IPV4,
                                       Gio.SocketType.DATAGRAM,
                                       Gio.SocketProtocol.UDP)
     rtcp_socket_address = Gio.InetSocketAddress.new_from_string(
         self.local_ip, 0)
     recv_rtcp_socket.bind(rtcp_socket_address, False)
     #
     config = {
         'producerId':
         producerId,
         'local_ip':
         self.local_ip,
         'local_rtpPort':
         recv_rtp_socket.get_local_address().get_port(),
         'local_rtcpPort':
         recv_rtcp_socket.get_local_address().get_port(),
     }
     appData = json.loads(self.app_data)
     self.mediasoup.consume(config, appData, self._consume_done,
                            self._on_error,
                            self._on_producer_removed, ghostPad,
                            recv_rtp_socket, recv_rtcp_socket)
Esempio n. 20
0
	def setcaps_srcv(self, parent, caps):
		othercaps = self.srcvpad.get_allowed_caps()
		Gst.debug("other caps %s" % othercaps)

		self._srcw = caps[0]["width"]
		self._srch = caps[0]["height"]
		self._srcd = 4

		if 1:
			outcaps = Gst.Caps('%s' % othercaps)

			out_s = outcaps[0]
			Gst.debug("outcasp 0 %s" % (out_s))
			Gst.debug(" %s" % dir(out_s))

			fr = caps[0]["framerate"]
			out_s.set_value("framerate", fr)

			Gst.debug("out_s %s" % out_s.to_string())

			Gst.debug(" dir %s" % dir(outcaps))
			#outcaps.set_structure(0, out_s)
			outcaps = Gst.Caps.from_string(out_s.to_string())


			Gst.debug("pad is %s" % self.srcvpad.__class__)
			Gst.debug("pad can %s" % dir(self.srcvpad))

		self._dstw = outcaps[0]["width"]
		self._dsth = outcaps[0]["height"]

		res = self.srcvpad.push_event(Gst.Event.new_caps(outcaps))

		Gst.info("srcv caps %s" % outcaps)

		self.srcvpad.use_fixed_caps()

		return res
Esempio n. 21
0
    def do_start(self) -> bool:
        Gst.info("Starting")
        try:
            self.image_acquirer = ImageAcquirer()

            if not self.image_acquirer.init_device(
                    device_serial=self.serial,
                    device_index=(0 if self.serial is None else None),
            ):
                error_string = "Camera not found"
                self.post_message(
                    Gst.Message.new_error(self, GLib.Error(error_string),
                                          error_string))
                return False

            if not self.apply_properties_to_cam():
                error_string = "Camera settings could not be applied"
                self.post_message(
                    Gst.Message.new_error(self, GLib.Error(error_string),
                                          error_string))
                return False

            self.camera_caps = self.get_camera_caps()

            # self._current_device: PySpin.Camera
            # Apply custom properties
            self.configure_BFS_PGE_200S6()
            self.configure_bandwidth_multicam(num_cameras=2,
                                              total_bandwidth_gbps=1.0)

        except Exception as ex:
            Gst.error(f"Error: {ex}")
            self.post_message(
                Gst.Message.new_error(self, GLib.Error(str(ex)), str(ex)))
            return False
        return True
    def __init__(self):
        Gst.Element.__init__(self)
        Gst.info('creating sinkpad')
        self.sinkpad = Gst.Pad.new_from_template(self.__gsttemplates__, "sink")
        Gst.info('adding sinkpad to self')
        self.add_pad(self.sinkpad)

        Gst.info('setting chain/event functions')
        self.sinkpad.set_chain_function(self.chainfunc)
        self.sinkpad.set_event_function(self.eventfunc)
        st = Gst.Structure.from_string("yes,fps=1/2")[0]
Esempio n. 23
0
    def do_set_caps(self, caps: Gst.Caps) -> bool:
        Gst.info("Setting caps")

        self.info.from_caps(caps)
        self.set_blocksize(
            self.info.size if self.info.size > 0 else self.info.width *
            self.info.height)

        Gst.info(f"Blocksize: {self.get_blocksize()} bytes")

        try:
            self.image_acquirer.start_acquisition()
        except ValueError as ex:
            Gst.error(f"Error: {ex}")
            return False

        Gst.info("Acquisition Started")

        return True
Esempio n. 24
0
 def do_transform_ip(self, buffer):
     Gst.info("timestamp(buffer):%s" % (Gst.TIME_ARGS(buffer.pts)))
     return Gst.FlowReturn.OK
Esempio n. 25
0
 def do_transform_ip(self, buffer):
     Gst.info("timestamp(buffer):%s" % (Gst.TIME_ARGS(buffer.pts)))
     return Gst.FlowReturn.OK
Esempio n. 26
0
 def do_render(self, buffer):
     Gst.info("timestamp(buffer):%s" % (Gst.TIME_ARGS(buffer.pts)))
     return Gst.FlowReturn.OK
Esempio n. 27
0
 def _resume_consumer_done(self, config):
     Gst.info('%s _resume_consumer_done' % (self.name))
 def chainfunc(self, pad, buffer):
     Gst.info("%s timestamp(buffer):%d" % (pad, buffer.pts))
     return Gst.FlowReturn.OK
 def eventfunc(self, pad, event):
     Gst.info("%s event:%r" % (pad, event.type))
     return True
Esempio n. 30
0
	def process_video(self, parent):

		while True:

			def popleft_or_none(x):
				try:
					return x.popleft()
				except IndexError:
					return

			Gst.info("Buffers outstanding % 3d / % 3d" % (len(self._bufs_l), len(self._bufs_r)))

			buf_l0 = popleft_or_none(self._bufs_l)
			buf_l1 = popleft_or_none(self._bufs_l)
			buf_r0 = popleft_or_none(self._bufs_r)
			buf_r1 = popleft_or_none(self._bufs_r)

			def app_if(x, y):
				if y is not None:
					x.appendleft(y)

			if buf_l0 is None:
				Gst.info("No L buffers")
				app_if(self._bufs_r, buf_r1)
				app_if(self._bufs_r, buf_r0)
				return

			if buf_r0 is None:
				Gst.info("No R buffers")
				app_if(self._bufs_l, buf_l1)
				app_if(self._bufs_l, buf_l0)
				return

			if buf_l1 is None:
				app_if(self._bufs_l, buf_l0)
				app_if(self._bufs_r, buf_r1)
				app_if(self._bufs_r, buf_r0)
				return

			if buf_r1 is None:
				app_if(self._bufs_r, buf_r0)
				app_if(self._bufs_l, buf_l1)
				app_if(self._bufs_l, buf_l0)
				return

			Gst.debug("Buffer L0 %s ts=%s sz=%s" \
			 % (buf_l0, Gst.TIME_ARGS(buf_l0.pts), buf_l0.get_size()))
			Gst.debug("Buffer L1 %s ts=%s sz=%s" \
			 % (buf_l1, Gst.TIME_ARGS(buf_l1.pts), buf_l1.get_size()))
			Gst.debug("Buffer R0 %s ts=%s sz=%s" \
			 % (buf_r0, Gst.TIME_ARGS(buf_r0.pts), buf_r0.get_size()))
			Gst.debug("Buffer R1 %s ts=%s sz=%s" \
			 % (buf_r1, Gst.TIME_ARGS(buf_r1.pts), buf_r1.get_size()))


			if buf_l0.pts >= buf_r0.pts and buf_l0.pts <= buf_r0.pts + buf_r0.duration:
				Gst.debug("a")
				buf_l = buf_l0
				buf_r = buf_r0
				app_if(self._bufs_l, buf_l1)
				app_if(self._bufs_r, buf_r1)
			elif buf_r0.pts >= buf_l0.pts and buf_r0.pts <= buf_l0.pts + buf_l0.duration:
				Gst.debug("b")
				buf_l = buf_l0
				buf_r = buf_r0
				app_if(self._bufs_l, buf_l1)
				app_if(self._bufs_r, buf_r1)
			elif buf_r0.pts + buf_r0.duration < buf_l0.pts:
				Gst.debug("c")
				app_if(self._bufs_l, buf_l1)
				app_if(self._bufs_l, buf_l0)
				app_if(self._bufs_r, buf_r1)
				Gst.info("Next time for R")
				continue
			elif buf_l0.pts + buf_l0.duration < buf_r0.pts:
				Gst.debug("d")
				app_if(self._bufs_r, buf_r1)
				app_if(self._bufs_r, buf_r0)
				app_if(self._bufs_l, buf_l1)
				Gst.info("Next time for L")
				continue
			else:
				Gst.info("Not implemented!")

			Gst.debug("Pushing new buffer for t=%s" % Gst.TIME_ARGS(buf_l.pts))

			srch, srcw, srcd = self._srch, self._srcw, self._srcd
			dsth, dstw = self._dsth, self._dstw

			tz = list()

			res, mil = buf_l.map(Gst.MapFlags.READ)
			assert res
			res, mir = buf_r.map(Gst.MapFlags.READ)
			assert res

			src0_d = mil.data
			src1_d = mir.data

			img_l = np.fromstring(src0_d, dtype=np.uint8)[:srch*srcw*srcd].reshape((srch, srcw, srcd))
			img_r = np.fromstring(src1_d, dtype=np.uint8)[:srch*srcw*srcd].reshape((srch, srcw, srcd))

			buf_r.unmap(mir)
			buf_l.unmap(mil)

			v = np.hstack((img_l, img_r))

			tz.append(time.time())
			buf_out = Gst.Buffer.new_allocate(None, dsth*dstw*4, None)
			buf_out.dts = buf_l.dts
			buf_out.pts = buf_l.pts
			buf_out.duration = buf_l.duration
			tz.append(time.time())

			buf_out.fill(0, v.tobytes())

			#time.sleep(0.01) # artificial computation time

			#buf_out = Gst.Buffer.new_wrapped(v.data)#, 1280*480*4)

			#res, mio = buf_out.map(Gst.MapFlags.WRITE)
			#print(dir(mio))
			#dst_d = mio.data
			#dst_d[:] = v
			#buf_out.unmap(mio)
			tz.append(time.time())

			tz.append(time.time())
			self.srcvpad.push(buf_out)
			tz.append(time.time())

			Gst.debug("t_alloc %.3f  t_cp %.3f t_push %.3f" \
			 % (tz[1] - tz[0], tz[2] - tz[1], tz[-1] - tz[-2]))
Esempio n. 31
0
def editContainer(scenario, action):
    timeline = get_pipeline(scenario).props.timeline
    container = timeline.get_element(action.structure["container-name"])

    if container is None:
        for layer in timeline.get_layers():
            for clip in layer.get_clips():
                Gst.info("Exisiting clip: %s" % clip.get_name())

        scenario.report_simple(GLib.quark_from_string("scenario::execution-error"),
                               "Could not find container: %s"
                               % action.structure["container-name"])

        return 1

    res, position = GstValidate.action_get_clocktime(scenario, action, "position")
    layer_prio = action.structure["new-layer-priority"]

    if res is False:
        return 0

    edge = get_edge(action.structure)
    container_ui = container.ui

    setEditingMode(timeline, scenario, action)

    y = 21 - container_ui.translate_coordinates(timeline.ui, 0, 0)[1]

    if container.get_layer().get_priority() != layer_prio and layer_prio != -1:
        try:
            layer = timeline.get_layers()[layer_prio]
            Gst.info("Y is: %s Realized?? %s Priori: %s layer prio: %s"
                     % (layer.ui.get_allocation().y,
                        container_ui.get_realized(),
                        container.get_layer().get_priority(),
                        layer_prio))
            y = layer.ui.get_allocation().y - container_ui.translate_coordinates(timeline.ui, 0, 0)[1]
            if y < 0:
                y += 21
            elif y > 0:
                y -= 21
        except IndexError:
            if layer_prio == -1:
                y = -5
            else:
                layer = timeline.get_layers()[-1]
                alloc = layer.ui.get_allocation()
                y = alloc.y + alloc.height + 10 - container_ui.translate_coordinates(timeline.ui, 0, 0)[1]

    if not hasattr(scenario, "last_edge"):
        scenario.last_edge = edge

    if not hasattr(scenario, "dragging") or scenario.dragging is False \
            or scenario.last_edge != edge:
        event_widget = container.ui
        if isinstance(container, GES.SourceClip):
            if edge == GES.Edge.EDGE_START:
                event_widget = container.ui.leftHandle
            elif edge == GES.Edge.EDGE_END:
                event_widget = container.ui.rightHandle

        scenario.dragging = True
        event = Event(Gdk.EventType.BUTTON_PRESS, button=1, y=y)
        with mock.patch.object(Gtk, "get_event_widget") as get_event_widget:
            get_event_widget.return_value = event_widget
            timeline.ui._button_press_event_cb(event_widget, event)

    event = Event(Gdk.EventType.MOTION_NOTIFY, button=1,
                  x=Zoomable.nsToPixelAccurate(position) -
                  container_ui.translate_coordinates(timeline.ui.layout.layers_vbox, 0, 0)[0],
                  y=y, state=Gdk.ModifierType.BUTTON1_MASK)
    with mock.patch.object(Gtk, "get_event_widget") as get_event_widget:
        get_event_widget.return_value = container.ui
        timeline.ui._motion_notify_event_cb(None, event)

    GstValidate.print_action(action, "Editing %s to %s in %s mode, edge: %s "
                             "with new layer prio: %d\n" % (action.structure["container-name"],
                                                            Gst.TIME_ARGS(position),
                                                            scenario.last_mode,
                                                            edge,
                                                            layer_prio))

    _releaseButtonIfNeeded(scenario, action, timeline, container, edge, layer_prio,
                           position, y)
    scenario.last_edge = edge

    return 1
 def do_set_caps(self, incaps, outcaps):
     struct = incaps.get_structure(0)
     self.width = struct.get_int("width").value
     self.height = struct.get_int("height").value
     Gst.info("width=%d, height=%d" % (self.width, self.height))
     return True
Esempio n. 33
0
 def do_render(self, buffer):
     Gst.info("timestamp(buffer):%s" % (Gst.TIME_ARGS(buffer.pts)))
     return Gst.FlowReturn.OK
Esempio n. 34
0
def init():
    global has_validate
    try:
        from gi.repository import GstValidate
        GstValidate.init()
        has_validate = GES.validate_register_action_types()
        GstValidate.register_action_type("stop", "pitivi",
                                         stop, None,
                                         "Pitivi override for the stop action",
                                         GstValidate.ActionTypeFlags.NONE)

        GstValidate.register_action_type("seek", "pitivi",
                                         seek, None,
                                         "Pitivi override for the seek action",
                                         GstValidate.ActionTypeFlags.NONE)

        GstValidate.register_action_type("pause", "pitivi",
                                         set_state, None,
                                         "Pitivi override for the pause action",
                                         GstValidate.ActionTypeFlags.NONE)

        GstValidate.register_action_type("play", "pitivi",
                                         set_state, None,
                                         "Pitivi override for the pause action",
                                         GstValidate.ActionTypeFlags.NONE)

        GstValidate.register_action_type("set-state", "pitivi",
                                         set_state, None,
                                         "Pitivi override for the set-state action",
                                         GstValidate.ActionTypeFlags.NONE)

        GstValidate.register_action_type("edit-container", "pitivi",
                                         editContainer, None,
                                         "Start dragging a clip in the timeline",
                                         GstValidate.ActionTypeFlags.NONE)

        GstValidate.register_action_type("split-clip", "pitivi",
                                         split_clip, None,
                                         "Split a clip",
                                         GstValidate.ActionTypeFlags.NONE)

        GstValidate.register_action_type("add-layer", "pitivi",
                                         add_layer, None,
                                         "Add layer",
                                         GstValidate.ActionTypeFlags.NONE)

        GstValidate.register_action_type("remove-clip", "pitivi",
                                         remove_clip, None,
                                         "Remove clip",
                                         GstValidate.ActionTypeFlags.NONE)
        GstValidate.register_action_type("select-clips", "pitivi",
                                         select_clips, [Parametter("clip-name",
                                                                   "The name of the clip to select",
                                                                   True, None, "str")],
                                         "Select clips",
                                         GstValidate.ActionTypeFlags.NONE)

        for z in ["zoom-fit", "zoom-out", "zoom-in"]:
            GstValidate.register_action_type(z, "pitivi", zoom, None, z,
                                             GstValidate.ActionTypeFlags.NO_EXECUTION_NOT_FATAL)
        GstValidate.register_action_type('set-zoom-level', "pitivi", setZoomLevel, None, z,
                                         GstValidate.ActionTypeFlags.NO_EXECUTION_NOT_FATAL)

        Gst.info("Adding pitivi::wrong-window-creation")
        GstValidate.Issue.register(GstValidate.Issue.new(
                                   GLib.quark_from_string("pitivi::wrong-window-creation"),
                                   "A new window for the sink has wrongly been created",
                                   "All sink should display their images in an embedded "
                                   "widget and thus not create a new window",
                                   GstValidate.ReportLevel.CRITICAL))
        return True
    except ImportError:
        has_validate = False
        return False
Esempio n. 35
0
    def do_request_new_pad(self, templ, name, caps):
        Gst.info(
            '%s do_request_new_pad name_template=%s direction=%d name=%s caps=%s'
            % (self.name, templ.name_template, templ.direction, name, caps))
        # create a default signaling
        if not self.signaling:
            try:
                self.signaling = DefaultSignaling(self.server_url)
            except Exception as e:
                Gst.error('DefaultSignaling error: %s' % e)
                sys.exit(-1)
        # create MediaSoup instance
        if not self.mediasoup:
            self.mediasoup = MediaSoup(self.signaling)
        # create tmp pad
        pad = Gst.Pad.new_from_template(templ, templ.name_template + '_tmp')
        self.add_pad(pad)
        # create ghost pad
        ghostPad = Gst.GhostPad.new_from_template(templ.name_template, pad,
                                                  templ)
        ghostPad.set_active(True)
        self.add_pad(ghostPad)
        # producer
        if templ.direction == Gst.PadDirection.SINK:
            # connect tmp pad chain
            def chain_function(pad, parent, buf):
                Gst.debug('%s chain_function caps: %s pts: %f' %
                          (pad.name, pad.get_current_caps(), buf.pts * 1e-9))
                caps = pad.get_current_caps()
                if caps:
                    structure = caps.get_structure(0)
                    Gst.info('%s event_function caps=%s' % (pad.name, caps))
                    kind = structure.get_name().split('/')[0]
                    appData = json.loads(self.app_data)
                    # producer
                    config = {
                        'kind': kind,
                        'producerId': structure.get_string('producer-id'),
                        'text_overlay': self.text_overlay,
                        'time_overlay': self.time_overlay,
                        'clock_overlay': self.clock_overlay,
                    }
                    if kind == 'audio':
                        config.update(DEFAULT_AUDIO_CONFIG)
                        config['codec'] = self.audio_codec
                        config['bitrate'] = self.audio_bitrate
                    else:
                        config.update(DEFAULT_VIDEO_CONFIG)
                        config['server_ip'] = self.server_ip
                        config['codec'] = self.video_codec
                        config['bitrate'] = self.video_bitrate
                        config['hw'] = self.hw
                        config['gop'] = self.gop
                        config['simulcast'] = self.simulcast
                        config['width'] = structure['width']
                        config['height'] = structure['height']
                        config['framerate'] = structure['framerate'].num or 30
                        appData['width'] = config['width']
                        appData['height'] = config['height']
                        appData['framerate'] = config['framerate']
                    appData['maxBitrate'] = config['bitrate']
                    self.mediasoup.produce(config, appData, self._produce_done,
                                           self._on_error,
                                           self._on_producer_removed, ghostPad)
                return Gst.FlowReturn.OK

            pad.set_chain_function_full(chain_function)
        # consumer
        elif templ.direction == Gst.PadDirection.SRC:
            # use the peer caps
            def on_pad_linked(pad, peer):
                caps = pad.peer_query_caps()
                structure = caps.get_structure(0)
                producerId = structure.get_string('producer-id')
                if not producerId:
                    Gst.error('producerId not found')
                    return
                Gst.info('%s on_pad_linked %s' % (pad.name, caps))
                # create listen rtp/rtcp sockets
                recv_rtp_socket = Gio.Socket.new(Gio.SocketFamily.IPV4,
                                                 Gio.SocketType.DATAGRAM,
                                                 Gio.SocketProtocol.UDP)
                rtp_socket_address = Gio.InetSocketAddress.new_from_string(
                    self.local_ip, 0)
                recv_rtp_socket.bind(rtp_socket_address, False)
                #
                recv_rtcp_socket = Gio.Socket.new(Gio.SocketFamily.IPV4,
                                                  Gio.SocketType.DATAGRAM,
                                                  Gio.SocketProtocol.UDP)
                rtcp_socket_address = Gio.InetSocketAddress.new_from_string(
                    self.local_ip, 0)
                recv_rtcp_socket.bind(rtcp_socket_address, False)
                #
                config = {
                    'producerId':
                    producerId,
                    'local_ip':
                    self.local_ip,
                    'local_rtpPort':
                    recv_rtp_socket.get_local_address().get_port(),
                    'local_rtcpPort':
                    recv_rtcp_socket.get_local_address().get_port(),
                }
                appData = json.loads(self.app_data)
                self.mediasoup.consume(config, appData, self._consume_done,
                                       self._on_error,
                                       self._on_producer_removed, ghostPad,
                                       recv_rtp_socket, recv_rtcp_socket)

            ghostPad.connect('linked', on_pad_linked)
        #
        return ghostPad