def test_new(self): Gst.init(None) test = Gst.Structure('test', test=1) self.assertEqual(test['test'], 1) test = Gst.Structure('test,test=1') self.assertEqual(test['test'], 1)
def start_pipeline(self): """Starts the gstreamer pipeline """ # Construct the webrtcbin pipeline with video and audio. self.build_webrtcbin_pipeline() self.build_video_pipeline() if self.audio: self.build_audio_pipeline() # Advance the state of the pipeline to PLAYING. res = self.pipeline.set_state(Gst.State.PLAYING) if res.value_name != 'GST_STATE_CHANGE_SUCCESS': raise GSTWebRTCAppError( "Failed to transition pipeline to PLAYING: %s" % res) # Create the data channel, this has to be done after the pipeline is PLAYING. options = Gst.Structure("application/data-channel") options.set_value("ordered", True) options.set_value("max-retransmits", 0) self.data_channel = self.webrtcbin.emit('create-data-channel', "input", options) self.data_channel.connect('on-open', lambda _: self.on_data_open()) self.data_channel.connect('on-close', lambda _: self.on_data_close()) self.data_channel.connect('on-error', lambda _: self.on_data_error()) self.data_channel.connect('on-message-string', lambda _, msg: self.on_data_message(msg)) logger.info("pipeline started")
def get_camera_caps(self) -> Gst.Caps: width_min, width_max = self.get_cam_node_range("Width") height_min, height_max = self.get_cam_node_range("Height") genicam_formats = self.get_cam_node_entries("PixelFormat") supported_pixel_formats = [ self.get_format_from_genicam(pf) for pf in genicam_formats ] supported_pixel_formats = [ pf for pf in supported_pixel_formats if pf is not None ] camera_caps = Gst.Caps.new_empty() for pixel_format in supported_pixel_formats: camera_caps.append_structure( Gst.Structure( pixel_format.cap_type, format=pixel_format.gst, width=Gst.IntRange(range(width_min, width_max)), height=Gst.IntRange(range(height_min, height_max)), )) return camera_caps
def force_keyframes(self): if self.running: threading.Timer(1, self.force_keyframes).start() if self.ready: struct = Gst.Structure("GstForceKeyUnit") event = Gst.Event.new_custom(Gst.EventType.CUSTOM_UPSTREAM, struct) self.webrtc.send_event(event)
def intersect(v1, v2): structure = Gst.Structure('t', t=v1).intersect(Gst.Structure('t', t=v2)) if structure: return structure['t'] return None
gi.require_version("GstBase", "1.0") import os import cv2 import numpy as np import msgpack import struct from gi.repository import Gst, GObject, GstBase, GLib from typing import List Gst.init(None) ICAPS = Gst.Caps( Gst.Structure( "application/msgpack-predicts" ) ) OCAPS = Gst.Caps( Gst.Structure( "application/meter", ) ) def read_mask(): mask = cv2.imread(os.path.join( os.path.dirname(os.path.realpath(__file__)), "..", "resource",
import gi gi.require_version("GstBase", "1.0") import numpy as np import msgpack import struct from gi.repository import Gst, GObject, GstBase, GLib Gst.init(None) ICAPS = Gst.Caps(Gst.Structure("application/msgpack-predicts")) OCAPS = Gst.Caps(Gst.Structure("application/meter", )) class AverageMeter(GstBase.BaseTransform): __gstmetadata__ = ("AverageMeter", "Transform", "Meter", "UM") __gsttemplates__ = ( Gst.PadTemplate.new("src", Gst.PadDirection.SRC, Gst.PadPresence.ALWAYS, OCAPS), Gst.PadTemplate.new("sink", Gst.PadDirection.SINK, Gst.PadPresence.ALWAYS, ICAPS), ) max_age = GObject.Property( type=int, nick="Max Age", blurb="Amount of frames, infuencing on current count.", minimum=0,
from numpy_ringbuffer import RingBuffer from matplotlib import pyplot as plt from matplotlib.backends.backend_agg import FigureCanvasAgg except ImportError: Gst.error('audioplot requires numpy, numpy_ringbuffer and matplotlib') raise Gst.init(None) AUDIO_FORMATS = [f.strip() for f in GstAudio.AUDIO_FORMATS_ALL.strip('{ }').split(',')] ICAPS = Gst.Caps(Gst.Structure('audio/x-raw', format=Gst.ValueList(AUDIO_FORMATS), layout='interleaved', rate = Gst.IntRange(range(1, GLib.MAXINT)), channels = Gst.IntRange(range(1, GLib.MAXINT)))) OCAPS = Gst.Caps(Gst.Structure('video/x-raw', format='ARGB', width=Gst.IntRange(range(1, GLib.MAXINT)), height=Gst.IntRange(range(1, GLib.MAXINT)), framerate=Gst.FractionRange(Gst.Fraction(1, 1), Gst.Fraction(GLib.MAXINT, 1)))) DEFAULT_WINDOW_DURATION = 1.0 DEFAULT_WIDTH = 640 DEFAULT_HEIGHT = 480 DEFAULT_FRAMERATE_NUM = 25 DEFAULT_FRAMERATE_DENOM = 1
def testGetValue(self): Gst.init(None) self.assertEqual( Gst.Structure('test,test=(bitmask)0x20')['test'], 1 << 5)
def asr_result(self, asr, text, uttid): struct = Gst.Structure("result") struct.set_value("hyp", text) struct.set_value("uttid", uttid) self.debugMsg("asr result: " + text, 0) asr.post_message(Gst.message_new_application(asr, struct))
import os import threadsched gi.require_version('Gst', '1.0') gi.require_version('GstBase', '1.0') gi.require_version('GstVideo', '1.0') from gi.repository import Gst, GLib, GObject, GstBase, GstVideo Gst.init(None) VIDEOCAPS = Gst.Caps(Gst.Structure("video/x-bayer", width=Gst.IntRange(range(1, GLib.MAXINT)), height=Gst.IntRange(range(1, GLib.MAXINT)), framerate=Gst.FractionRange(Gst.Fraction(1,1), Gst.Fraction(GLib.MAXINT, 1)) )) class ImgProc(GstBase.BaseTransform): __gstmetadata__ = ("ImgProc", "Filter", "Process image data", "Arne Caspari") __gsttemplates__ = ( Gst.PadTemplate.new("src", Gst.PadDirection.SRC, Gst.PadPresence.ALWAYS, VIDEOCAPS ), Gst.PadTemplate.new("sink", Gst.PadDirection.SINK,
from gi.repository import Gst, GObject, GstBase, GLib from detector.inference.tensort import TRTDetector, load_cfg from detector.inference.tensort.common import init_pycuda Gst.init(None) # TODO move these to the element's properties WIDTH = 800 HEIGHT = 800 ICAPS = Gst.Caps( Gst.Structure( "video/x-raw", format="BGR", width=WIDTH, height=HEIGHT, ) ) OCAPS = Gst.Caps( Gst.Structure( "application/msgpack-predicts", ) ) DEFAULT_MODEL_CFG = "ssd_resnet18_train_mix_800" DEFAULT_MODEL_CHECKPOINT = "ssd_resnet18_train_min_int.trt" DEFAULT_GPU_DEVICE_ID = 0
from gi.repository import Gst, GObject, GstBase Gst.init(None) try: from PIL import Image except ImportError: Gst.error('py_videomixer requires PIL') raise # Completely fixed input / output ICAPS = Gst.Caps( Gst.Structure('video/x-raw', format='RGBA', width=320, height=240, framerate=Gst.Fraction(30, 1))) OCAPS = Gst.Caps( Gst.Structure('video/x-raw', format='RGBA', width=320, height=240, framerate=Gst.Fraction(30, 1))) class BlendData: def __init__(self, outimg): self.outimg = outimg self.pts = 0
def tags_cb(playbin, stream, data): # We are possibly in a GStreamer working thread, so we notify the main # thread of this event through a message in the bus */ playbin.post_message(Gst.Message.new_application(playbin, Gst.Structure("tags-changed")))
gi.require_version('GstBase', '1.0') gi.require_version('GstVideo', '1.0') from gi.repository import Gst, GObject, GLib, GstBase, GstVideo # noqa:F401,F402 from gstreamer.utils import gst_buffer_with_caps_to_ndarray # noqa:F401,F402 # formats taken from existing videoconvert plugins # gst-inspect-1.0 videoconvert FORMATS = [f.strip() for f in "RGBx,xRGB,BGRx,xBGR,RGBA,ARGB,BGRA,ABGR,RGB,BGR,RGB16,RGB15,GRAY8,GRAY16_LE,GRAY16_BE".split(',')] # Input caps IN_CAPS = Gst.Caps(Gst.Structure('video/x-raw', format=Gst.ValueList(FORMATS), width=Gst.IntRange(range(1, GLib.MAXINT)), height=Gst.IntRange(range(1, GLib.MAXINT)))) # Output caps OUT_CAPS = Gst.Caps(Gst.Structure('video/x-raw', format=Gst.ValueList(FORMATS), width=Gst.IntRange(range(1, GLib.MAXINT)), height=Gst.IntRange(range(1, GLib.MAXINT)))) def clip(value, min_value, max_value): """Clip value to range [min_value, max_value]""" return min(max(value, min_value), max_value) class GstVideoCrop(GstBase.BaseTransform):