Ejemplo n.º 1
0
    def do_start(self) -> bool:
        Gst.info("Starting")
        try:
            self.image_acquirer = ImageAcquirer()

            if not self.image_acquirer.init_device(
                    device_serial=self.serial,
                    device_index=(0 if self.serial is None else None),
            ):
                error_string = "Camera not found"
                self.post_message(
                    Gst.Message.new_error(self, GLib.Error(error_string),
                                          error_string))
                return False

            if not self.apply_properties_to_cam():
                error_string = "Camera settings could not be applied"
                self.post_message(
                    Gst.Message.new_error(self, GLib.Error(error_string),
                                          error_string))
                return False

            self.camera_caps = self.get_camera_caps()

        except Exception as ex:
            Gst.error(f"Error: {ex}")
            self.post_message(
                Gst.Message.new_error(self, GLib.Error(str(ex)), str(ex)))
            return False
        return True
Ejemplo n.º 2
0
 def do_stop(self) -> bool:
     Gst.info("Stopping")
     try:
         self.image_acquirer.end_acquisition()
         self.image_acquirer = None
     except Exception as ex:
         Gst.error(f"Error: {ex}")
     return True
Ejemplo n.º 3
0
 def do_transform_ip(self, buf):
     try:
         with buf.map(Gst.MapFlags.READ | Gst.MapFlags.WRITE) as info:
             print(info)
             return Gst.FlowReturn.OK
     except Gst.MapError as e:
         Gst.error("Mapping error: %s" % e)
         return Gst.FlowReturn.ERROR
Ejemplo n.º 4
0
    def apply_properties_to_cam(self) -> bool:
        Gst.info("Applying properties")
        try:
            self.set_cam_node_val("UserSetSelector", self.user_set)
            self.execute_cam_node("UserSetLoad")

            self.set_cam_node_val("StreamBufferHandlingMode", "OldestFirst")
            self.set_cam_node_val("StreamBufferCountMode", "Manual")
            self.set_cam_node_val("StreamBufferCountManual",
                                  self.num_cam_buffers)

            # Configure Camera Properties
            if self.h_binning > 1:
                self.set_cam_node_val("BinningHorizontal", self.h_binning)

            if self.v_binning > 1:
                self.set_cam_node_val("BinningVertical", self.v_binning)

            if self.exposure_time >= 0:
                self.set_cam_node_val("ExposureAuto", "Off")
                self.set_cam_node_val("ExposureTime", self.exposure_time)
            elif self.auto_exposure:
                self.set_cam_node_val("ExposureAuto", "Continuous")
            else:
                self.set_cam_node_val("ExposureAuto", "Off")

            if self.gain >= 0:
                self.set_cam_node_val("GainAuto", "Off")
                self.set_cam_node_val("Gain", self.gain)
            elif self.auto_gain:
                self.set_cam_node_val("GainAuto", "Continuous")
            else:
                self.set_cam_node_val("GainAuto", "Off")

            if self.cam_node_available("BalanceWhiteAuto"):
                manual_wb = False
                if self.wb_blue >= 0:
                    self.set_cam_node_val("BalanceWhiteAuto", "Off")
                    self.set_cam_node_val("BalanceRatioSelector", "Blue")
                    self.set_cam_node_val("BalanceRatio", self.wb_blue)
                    manual_wb = True

                if self.wb_red >= 0:
                    self.set_cam_node_val("BalanceWhiteAuto", "Off")
                    self.set_cam_node_val("BalanceRatioSelector", "Red")
                    self.set_cam_node_val("BalanceRatio", self.wb_red)
                    manual_wb = True

                if self.auto_wb and not manual_wb:
                    self.set_cam_node_val("BalanceWhiteAuto", "Continuous")
                else:
                    self.set_cam_node_val("BalanceWhiteAuto", "Off")

        except Exception as ex:
            Gst.error(f"Error: {ex}")
            return False

        return True
Ejemplo n.º 5
0
    def do_transform_ip(self, buffer: Gst.Buffer):
        try:
            caps = self.sinkpad.get_current_caps()
            image = utils.gst_buffer_with_caps_to_ndarray(buffer, caps)
            detections = self.process(image)
            gst_meta_write(buffer, detections)

        except Exception as err:
            Gst.error(f'Error {self}: {traceback.format_exc()}')

        return Gst.FlowReturn.OK
Ejemplo n.º 6
0
 def do_transform_ip(self, buf):
     try:
         #print(type(buf))
         (_, info) = buf.map(Gst.MapFlags.READ | Gst.MapFlags.WRITE)
         #print(type(info))
         A = np.ndarray(buf.get_size(), dtype=np.uint8, buffer=info.data)
         A = A.reshape(self.height, self.width, 3).squeeze()
         limg = A[:, :self.width // 2 - 1]
         rimg = A[:, self.width // 2:]
         img = A
         lpose = self.pose(limg)
         rpose = self.pose(rimg)
         if lpose[0] is not None:
             print(lpose)
             ([ly1, lx1], [ly2, lx2]) = lpose
         else:
             #print(lpose)
             #print('lpose')
             ly1 = lx1 = ly2 = lx2 = 0
         if rpose[0] is not None:
             ([ry1, rx1], [ry2, rx2]) = rpose
         else:
             ry1 = ry2 = rx1 = rx2 = 0
         lpoints = np.array([[lx1 / 2, ly1 * 2], [lx2 / 2, ly2 * 2]],
                            dtype=np.float32)
         rpoints = np.array([[rx1 / 2, ry1 * 2], [rx2 / 2, ry2 * 2]],
                            dtype=np.float32)
         #print(self.lcameramtx)
         #print(self.ldist)
         #print(self.lrectification)
         #print(self.lprojection)
         #print(lpoints)
         lpoints = cv2.undistortPoints(lpoints, self.lcameramtx, self.ldist,
                                       None, self.lrectification,
                                       self.lprojection)
         rpoints = cv2.undistortPoints(rpoints, self.rcameramtx, self.rdist,
                                       None, self.rrectification,
                                       self.rprojection)
         lx1 = lpoints[0][0][0]
         ly1 = lpoints[0][0][1]
         lx2 = lpoints[1][0][0]
         ly2 = lpoints[1][0][1]
         rx1 = rpoints[0][0][0]
         ry1 = rpoints[0][0][1]
         rx2 = rpoints[1][0][0]
         ry2 = rpoints[1][0][1]
         print(self.calculate_height(lx1, ly1, lx2, ly2, rx1, ry1, rx2,
                                     ry2))
         return Gst.FlowReturn.OK
     except Gst.MapError as e:
         Gst.error("Mapping error: %s" % e)
         return Gst.FlowReturn.ERROR
Ejemplo n.º 7
0
    def do_transform_ip(self, buf):
        try:
            with buf.map(Gst.MapFlags.READ | Gst.MapFlags.WRITE) as info:
                # Create a NumPy ndarray from the memoryview and modify it in place:
                A = np.ndarray(shape=(self.height, self.width),
                               dtype=np.uint8,
                               buffer=info.data)
                A[:] = np.invert(A)

                return Gst.FlowReturn.OK
        except Gst.MapError as e:
            Gst.error("Mapping error: %s" % e)
            return Gst.FlowReturn.ERROR
    def do_save_to_uri(self, timeline, uri, overwrite):
        if not Gst.uri_is_valid(uri) or Gst.uri_get_protocol(uri) != "file":
            Gst.error("Protocol not supported for file: %s" % uri)
            return False

        with tempfile.NamedTemporaryFile(suffix=".xges") as tmpxges:
            timeline.get_asset().save(timeline, "file://" + tmpxges.name, None, overwrite)

            linker = otio.media_linker.MediaLinkingPolicy.ForceDefaultLinker
            otio_timeline = otio.adapters.read_from_file(tmpxges.name, "xges", media_linker_name=linker)
            location = Gst.uri_get_location(uri)
            out_adapter = otio.adapters.from_filepath(location)
            otio.adapters.write_to_file(otio_timeline, Gst.uri_get_location(uri), out_adapter.name)

        return True
    def do_can_load_uri(self, uri):
        try:
            if not Gst.uri_is_valid(uri) or Gst.uri_get_protocol(uri) != "file":
                return False
        except GLib.Error as e:
            Gst.error(str(e))
            return False

        if uri.endswith(".xges"):
            return False

        try:
            return otio.adapters.from_filepath(Gst.uri_get_location(uri)) is not None
        except Exception as e:
            Gst.info("Could not load %s -> %s" % (uri, e))
            return False
Ejemplo n.º 10
0
    def _reset_cam(self):
        if self._current_device is not None and self._current_device.IsValid():
            try:
                if self._current_device.IsStreaming():
                    self.end_acquisition()
                if self._current_device.IsInitialized():
                    self._current_device.DeInit()
            except Exception as ex:
                Gst.error(f"Error: {ex}")

        self._device_node_map = None
        self._tl_device_node_map = None
        self._tl_stream_node_map = None

        del self._current_device
        self._current_device = None
Ejemplo n.º 11
0
    def seek(self, location):
        """
        @param location: time to seek to, in nanoseconds
        """
        Gst.debug("seeking to %r" % location)
        event = Gst.event_new_seek(1.0, Gst.FORMAT_TIME,
            Gst.SEEK_FLAG_FLUSH | Gst.SEEK_FLAG_ACCURATE,
            Gst.SEEK_TYPE_SET, location,
            Gst.SEEK_TYPE_NONE, 0)

        res = self.player.send_event(event)
        if res:
            #Gst.info("setting new stream time to 0")
            self.player.set_new_stream_time(0L)
        else:
            Gst.error("seek to %r failed" % location)
Ejemplo n.º 12
0
    def do_fill(self, offset, length, buf):
        if length == -1:
            samples = SAMPLESPERBUFFER
        else:
            samples = int(length / self.info.bpf)

        self.generate_samples_per_buffer = samples

        bytes_ = samples * self.info.bpf

        next_sample = self.next_sample + samples
        next_byte = self.next_byte + bytes_
        next_time = Gst.util_uint64_scale_int(next_sample, Gst.SECOND,
                                              self.info.rate)

        try:
            with buf.map(Gst.MapFlags.WRITE) as info:
                array = np.ndarray(shape=self.info.channels * samples,
                                   dtype=np.float32,
                                   buffer=info.data)
                if not self.mute:
                    r = np.repeat(
                        np.arange(self.accumulator,
                                  self.accumulator + samples),
                        self.info.channels)
                    np.sin(2 * np.pi * r * self.freq / self.info.rate,
                           out=array)
                    array *= self.volume
                else:
                    array[:] = 0
        except Exception as e:
            Gst.error("Mapping error: %s" % e)
            return Gst.FlowReturn.ERROR

        buf.offset = self.next_sample
        buf.offset_end = next_sample
        buf.pts = self.next_time
        buf.duration = next_time - self.next_time

        self.next_time = next_time
        self.next_sample = next_sample
        self.next_byte = next_byte
        self.accumulator += samples
        self.accumulator %= self.info.rate / self.freq

        return (Gst.FlowReturn.OK, buf)
Ejemplo n.º 13
0
    def do_set_caps(self, caps: Gst.Caps) -> bool:
        Gst.info("Setting caps")

        self.info.from_caps(caps)
        self.set_blocksize(
            self.info.size if self.info.size > 0 else self.info.width *
            self.info.height)

        Gst.info(f"Blocksize: {self.get_blocksize()} bytes")

        try:
            self.image_acquirer.start_acquisition()
        except ValueError as ex:
            Gst.error(f"Error: {ex}")
            return False

        Gst.info("Acquisition Started")

        return True
 def do_transform_ip(self, buf):
     try:
         with buf.map(Gst.MapFlags.READ | Gst.MapFlags.WRITE) as info:
             Gst.trace('info=%s, size=%d' % (str(info), info.size))
             # Create a NumPy ndarray from the memoryview and modify it in place.
             buf_np = np.ndarray(shape=(self.height, self.width),
                                 dtype=np.uint8,
                                 buffer=info.data)
             Gst.trace("buf_np=%s" % (str(buf_np)))
             # Create tensors.
             t1 = tf.constant(buf_np)
             Gst.trace("t1=%s" % (str(t1)))
             t2 = t1 / 4
             Gst.trace("t2=%s" % (str(t2)))
             # Copy tensor to overwrite input/output buffer.
             buf_np[:] = t2
             return Gst.FlowReturn.OK
     except Gst.MapError as e:
         Gst.error("Mapping error: %s" % e)
         return Gst.FlowReturn.ERROR
Ejemplo n.º 15
0
    def do_transform_ip(self, buf):
        try:
            with buf.map(Gst.MapFlags.READ | Gst.MapFlags.WRITE) as info:

                A = np.ndarray(buf.get_size(),
                               dtype=np.uint8,
                               buffer=info.data)
                A = A.reshape(self.height, self.width, 3).squeeze()
                limg = A[:, :self.width // 2 - 1]
                rimg = A[:, self.width // 2:]
                img = A
                print(self.height)
                cv2.imwrite('asdf.jpg', rimg)
                (lx, ly, lw, lh) = self.object_detection(limg)
                (rx, ry, rw, rh) = self.object_detection(rimg)
                print(self.calculate_height(lx, ly, lw, lh, rx, ry, rw, rh))
                return Gst.FlowReturn.OK
        except Gst.MapError as e:
            Gst.error("Mapping error: %s" % e)
            return Gst.FlowReturn.ERROR
Ejemplo n.º 16
0
 def on_pad_linked(pad, peer):
     caps = pad.peer_query_caps()
     structure = caps.get_structure(0)
     producerId = structure.get_string('producer-id')
     if not producerId:
         Gst.error('producerId not found')
         return
     Gst.info('%s on_pad_linked %s' % (pad.name, caps))
     # create listen rtp/rtcp sockets
     recv_rtp_socket = Gio.Socket.new(Gio.SocketFamily.IPV4,
                                      Gio.SocketType.DATAGRAM,
                                      Gio.SocketProtocol.UDP)
     rtp_socket_address = Gio.InetSocketAddress.new_from_string(
         self.local_ip, 0)
     recv_rtp_socket.bind(rtp_socket_address, False)
     #
     recv_rtcp_socket = Gio.Socket.new(Gio.SocketFamily.IPV4,
                                       Gio.SocketType.DATAGRAM,
                                       Gio.SocketProtocol.UDP)
     rtcp_socket_address = Gio.InetSocketAddress.new_from_string(
         self.local_ip, 0)
     recv_rtcp_socket.bind(rtcp_socket_address, False)
     #
     config = {
         'producerId':
         producerId,
         'local_ip':
         self.local_ip,
         'local_rtpPort':
         recv_rtp_socket.get_local_address().get_port(),
         'local_rtcpPort':
         recv_rtcp_socket.get_local_address().get_port(),
     }
     appData = json.loads(self.app_data)
     self.mediasoup.consume(config, appData, self._consume_done,
                            self._on_error,
                            self._on_producer_removed, ghostPad,
                            recv_rtp_socket, recv_rtcp_socket)
Ejemplo n.º 17
0
    def do_gst_push_src_fill(self, buffer: Gst.Buffer) -> Gst.FlowReturn:
        try:

            (
                image_array,
                image_frame_id,
                image_timestamp_ns,
            ) = self.image_acquirer.get_next_image(logger=Gst.warning)

            with map_gst_buffer(buffer, Gst.MapFlags.READ) as mapped:
                mapped_array = np.ndarray(image_array.shape,
                                          buffer=mapped,
                                          dtype=image_array.dtype)
                mapped_array[:] = image_array

            if self.timestamp_offset == 0:
                self.timestamp_offset = image_timestamp_ns
                self.previous_timestamp = image_timestamp_ns

            buffer.pts = image_timestamp_ns - self.timestamp_offset
            buffer.offset = image_frame_id
            buffer.offset_end = image_frame_id + 1
            buffer.duration = image_timestamp_ns - self.previous_timestamp

            self.previous_timestamp = image_timestamp_ns

            Gst.log(
                f"Sending buffer of size: {image_array.nbytes} bytes, "
                f"type: {image_array.dtype}, "
                f"offset: {image_frame_id}"
                f"timestamp offset: {buffer.pts // self.MILLISECONDS_PER_NANOSECOND}ms"
            )

        except Exception as ex:
            Gst.error(f"Error: {ex}")
            return Gst.FlowReturn.ERROR

        return Gst.FlowReturn.OK
Ejemplo n.º 18
0
    def data(self, flag: Gst.MapFlags = Gst.MapFlags.WRITE) -> numpy.ndarray:
        with gst_buffer_data(self.__buffer, flag) as data:
            bytes_per_pix = self.__video_info.finfo.pixel_stride[
                0]  # pixel stride for 1st plane. works well for for 1-plane formats, like BGR, BGRA, BGRx
            w = self.__video_info.width
            if self.__video_info.finfo.format == GstVideo.VideoFormat.NV12:
                h = int(self.__video_info.height * 1.5)
            elif self.__video_info.finfo.format == GstVideo.VideoFormat.BGR or \
                 self.__video_info.finfo.format == GstVideo.VideoFormat.BGRA or \
                 self.__video_info.finfo.format == GstVideo.VideoFormat.BGRX:
                h = self.__video_info.height
            else:
                raise RuntimeError("VideoFrame.data: Unsupported format")

            if len(data) != h * w * bytes_per_pix:
                Gst.warning(
                    "Size of buffer's data: {}, and requested size: {}".format(
                        len(data), h * w * bytes_per_pix))
                Gst.warning("Let to get shape from video meta...")
                meta = self.video_meta()
                if meta:
                    h, w = meta.height, meta.width
                else:
                    Gst.warning(
                        "Video meta is {}. Can't get shape.".format(meta))

            try:
                yield numpy.ndarray((h, w, bytes_per_pix),
                                    buffer=data,
                                    dtype=numpy.uint8)
            except TypeError as e:
                Gst.error(str(e))
                Gst.error(
                    "Size of buffer's data: {}, and requested size: {}".format(
                        len(data), h * w * bytes_per_pix))
                raise e
Ejemplo n.º 19
0
    def do_start(self) -> bool:
        Gst.info("Starting")
        try:
            self.image_acquirer = ImageAcquirer()

            if not self.image_acquirer.init_device(
                    device_serial=self.serial,
                    device_index=(0 if self.serial is None else None),
            ):
                error_string = "Camera not found"
                self.post_message(
                    Gst.Message.new_error(self, GLib.Error(error_string),
                                          error_string))
                return False

            if not self.apply_properties_to_cam():
                error_string = "Camera settings could not be applied"
                self.post_message(
                    Gst.Message.new_error(self, GLib.Error(error_string),
                                          error_string))
                return False

            self.camera_caps = self.get_camera_caps()

            # self._current_device: PySpin.Camera
            # Apply custom properties
            self.configure_BFS_PGE_200S6()
            self.configure_bandwidth_multicam(num_cameras=2,
                                              total_bandwidth_gbps=1.0)

        except Exception as ex:
            Gst.error(f"Error: {ex}")
            self.post_message(
                Gst.Message.new_error(self, GLib.Error(str(ex)), str(ex)))
            return False
        return True
    def assertTimelineTopology(self, topology, groups=[]):
        res = []
        for layer in self.timeline.get_layers():
            layer_timings = []
            for clip in layer.get_clips():
                layer_timings.append(
                    (type(clip), clip.props.start, clip.props.duration))
                for child in clip.get_children(True):
                    self.assertEqual(child.props.start, clip.props.start)
                    self.assertEqual(child.props.duration, clip.props.duration)

            res.append(layer_timings)
        if topology != res:
            Gst.error(self.timeline_as_str())
            self.assertEqual(topology, res)

        timeline_groups = self.timeline.get_groups()
        if groups and timeline_groups:
            for i, group in enumerate(groups):
                self.assertEqual(set(group),
                                 set(timeline_groups[i].get_children(False)))
            self.assertEqual(len(timeline_groups), i + 1)

        return res
Ejemplo n.º 21
0
 def post_error(self, string, debug=''):
     Gst.error(string)
     gerror = GLib.Error.new_literal(Gst.ResourceError.quark(), string,
                                     Gst.CoreError.FAILED)
     message = Gst.Message.new_error(self, gerror, debug)
     return self.post_message(message)
Ejemplo n.º 22
0
 def _on_producer_removed(self, removedProducer):
     Gst.error('%s producer %s removed' % (self.name, removedProducer))
     err = 'Producer %s removed' % (removedProducer['id'])
     self.bus.post(Gst.Message.new_error(self, GLib.Error(err), err))
Ejemplo n.º 23
0
 def _on_error(self, err, *args):
     Gst.error('%s error %s' % (self.name, err))
     self.bus.post(Gst.Message.new_error(self, GLib.Error(err), err))
Ejemplo n.º 24
0
import gi

gi.require_version("Gst", "1.0")
gi.require_version("GstBase", "1.0")
gi.require_version("GObject", "2.0")
gi.require_version("GstVideo", "1.0")

from gi.repository import Gst, GObject, GLib, GstBase, GstVideo

from gstreamer.gst_hacks import map_gst_buffer

try:
    import numpy as np
except ImportError:
    Gst.error("pyspinsrc requires numpy")
    raise

try:
    import PySpin
except ImportError:
    Gst.error("pyspinsrc requires PySpin")
    raise


class ImageAcquirer:

    TIMEOUT_MS = 2000

    def __init__(self):
        self._system = PySpin.System.GetInstance()
Ejemplo n.º 25
0
'''

import gi

gi.require_version('Gst', '1.0')
gi.require_version('GstBase', '1.0')
gi.require_version('GObject', '2.0')

from gi.repository import Gst, GObject, GstBase

Gst.init(None)

try:
    from PIL import Image
except ImportError:
    Gst.error('py_videomixer requires PIL')
    raise

# Completely fixed input / output
ICAPS = Gst.Caps(
    Gst.Structure('video/x-raw',
                  format='RGBA',
                  width=320,
                  height=240,
                  framerate=Gst.Fraction(30, 1)))

OCAPS = Gst.Caps(
    Gst.Structure('video/x-raw',
                  format='RGBA',
                  width=320,
                  height=240,
Ejemplo n.º 26
0
gst-launch-1.0 py_audiotestsrc ! autoaudiosink
'''

import gi

gi.require_version('Gst', '1.0')
gi.require_version('GstBase', '1.0')
gi.require_version('GstAudio', '1.0')

from gi.repository import Gst, GLib, GObject, GstBase, GstAudio

try:
    import numpy as np
except ImportError:
    Gst.error('py_audiotestsrc requires numpy')
    raise

OCAPS = Gst.Caps.from_string(
    'audio/x-raw, format=F32LE, layout=interleaved, rate=44100, channels=2')

SAMPLESPERBUFFER = 1024

DEFAULT_FREQ = 440
DEFAULT_VOLUME = 0.8
DEFAULT_MUTE = False
DEFAULT_IS_LIVE = False


class AudioTestSrc(GstBase.BaseSrc):
    __gstmetadata__ = ('CustomSrc','Src', \
Ejemplo n.º 27
0
    def do_request_new_pad(self, templ, name, caps):
        Gst.info(
            '%s do_request_new_pad name_template=%s direction=%d name=%s caps=%s'
            % (self.name, templ.name_template, templ.direction, name, caps))
        # create a default signaling
        if not self.signaling:
            try:
                self.signaling = DefaultSignaling(self.server_url)
            except Exception as e:
                Gst.error('DefaultSignaling error: %s' % e)
                sys.exit(-1)
        # create MediaSoup instance
        if not self.mediasoup:
            self.mediasoup = MediaSoup(self.signaling)
        # create tmp pad
        pad = Gst.Pad.new_from_template(templ, templ.name_template + '_tmp')
        self.add_pad(pad)
        # create ghost pad
        ghostPad = Gst.GhostPad.new_from_template(templ.name_template, pad,
                                                  templ)
        ghostPad.set_active(True)
        self.add_pad(ghostPad)
        # producer
        if templ.direction == Gst.PadDirection.SINK:
            # connect tmp pad chain
            def chain_function(pad, parent, buf):
                Gst.debug('%s chain_function caps: %s pts: %f' %
                          (pad.name, pad.get_current_caps(), buf.pts * 1e-9))
                caps = pad.get_current_caps()
                if caps:
                    structure = caps.get_structure(0)
                    Gst.info('%s event_function caps=%s' % (pad.name, caps))
                    kind = structure.get_name().split('/')[0]
                    appData = json.loads(self.app_data)
                    # producer
                    config = {
                        'kind': kind,
                        'producerId': structure.get_string('producer-id'),
                        'text_overlay': self.text_overlay,
                        'time_overlay': self.time_overlay,
                        'clock_overlay': self.clock_overlay,
                    }
                    if kind == 'audio':
                        config.update(DEFAULT_AUDIO_CONFIG)
                        config['codec'] = self.audio_codec
                        config['bitrate'] = self.audio_bitrate
                    else:
                        config.update(DEFAULT_VIDEO_CONFIG)
                        config['server_ip'] = self.server_ip
                        config['codec'] = self.video_codec
                        config['bitrate'] = self.video_bitrate
                        config['hw'] = self.hw
                        config['gop'] = self.gop
                        config['simulcast'] = self.simulcast
                        config['width'] = structure['width']
                        config['height'] = structure['height']
                        config['framerate'] = structure['framerate'].num or 30
                        appData['width'] = config['width']
                        appData['height'] = config['height']
                        appData['framerate'] = config['framerate']
                    appData['maxBitrate'] = config['bitrate']
                    self.mediasoup.produce(config, appData, self._produce_done,
                                           self._on_error,
                                           self._on_producer_removed, ghostPad)
                return Gst.FlowReturn.OK

            pad.set_chain_function_full(chain_function)
        # consumer
        elif templ.direction == Gst.PadDirection.SRC:
            # use the peer caps
            def on_pad_linked(pad, peer):
                caps = pad.peer_query_caps()
                structure = caps.get_structure(0)
                producerId = structure.get_string('producer-id')
                if not producerId:
                    Gst.error('producerId not found')
                    return
                Gst.info('%s on_pad_linked %s' % (pad.name, caps))
                # create listen rtp/rtcp sockets
                recv_rtp_socket = Gio.Socket.new(Gio.SocketFamily.IPV4,
                                                 Gio.SocketType.DATAGRAM,
                                                 Gio.SocketProtocol.UDP)
                rtp_socket_address = Gio.InetSocketAddress.new_from_string(
                    self.local_ip, 0)
                recv_rtp_socket.bind(rtp_socket_address, False)
                #
                recv_rtcp_socket = Gio.Socket.new(Gio.SocketFamily.IPV4,
                                                  Gio.SocketType.DATAGRAM,
                                                  Gio.SocketProtocol.UDP)
                rtcp_socket_address = Gio.InetSocketAddress.new_from_string(
                    self.local_ip, 0)
                recv_rtcp_socket.bind(rtcp_socket_address, False)
                #
                config = {
                    'producerId':
                    producerId,
                    'local_ip':
                    self.local_ip,
                    'local_rtpPort':
                    recv_rtp_socket.get_local_address().get_port(),
                    'local_rtcpPort':
                    recv_rtcp_socket.get_local_address().get_port(),
                }
                appData = json.loads(self.app_data)
                self.mediasoup.consume(config, appData, self._consume_done,
                                       self._on_error,
                                       self._on_producer_removed, ghostPad,
                                       recv_rtp_socket, recv_rtcp_socket)

            ghostPad.connect('linked', on_pad_linked)
        #
        return ghostPad
Ejemplo n.º 28
0
gi.require_version('Gst', '1.0')
gi.require_version('GstBase', '1.0')
gi.require_version('GstAudio', '1.0')
gi.require_version('GstVideo', '1.0')

from gi.repository import Gst, GLib, GObject, GstBase, GstAudio, GstVideo

try:
    import numpy as np
    import matplotlib.patheffects as pe
    from numpy_ringbuffer import RingBuffer
    from matplotlib import pyplot as plt
    from matplotlib.backends.backend_agg import FigureCanvasAgg
except ImportError:
    Gst.error('audioplot requires numpy, numpy_ringbuffer and matplotlib')
    raise


Gst.init(None)

AUDIO_FORMATS = [f.strip() for f in
                 GstAudio.AUDIO_FORMATS_ALL.strip('{ }').split(',')]

ICAPS = Gst.Caps(Gst.Structure('audio/x-raw',
                               format=Gst.ValueList(AUDIO_FORMATS),
                               layout='interleaved',
                               rate = Gst.IntRange(range(1, GLib.MAXINT)),
                               channels = Gst.IntRange(range(1, GLib.MAXINT))))

OCAPS = Gst.Caps(Gst.Structure('video/x-raw',
Ejemplo n.º 29
0
def decodebin_pad_added_cb(decodebin, srcpad, sinkpad):
    Gst.error("Yay linking %s and %s" %
              (srcpad.props.name, sinkpad.props.name))
    srcpad.link(sinkpad)
Ejemplo n.º 30
0
 def snapping_started_cb(timeline, element1, element2, dist, self):
     Gst.error(
         "Here %s %s" %
         (Gst.TIME_ARGS(element1.props.start + element1.props.duration),
          Gst.TIME_ARGS(element2.props.start)))
     not_called.append("No snapping should happen")