def _add_source_bin(self, pipeline): if gstreamer.element_factory_exists("appsrc") and \ gstreamer.get_plugin_version("app") >= (0, 10, 22, 0): self.source = gst.element_factory_make('appsrc', 'source') self.source.set_property('do-timestamp', True) self.source.connect('need-data', self.push_buffer) else: #FIXME: fluoverlaysrc only needed on gst-plugins-base < 0.10.22 gobject.type_register(OverlayImageSource) gst.element_register(OverlayImageSource, "fluoverlaysrc", gst.RANK_MARGINAL) self.source = gst.element_factory_make('fluoverlaysrc', 'source') # create the source bin self.sourceBin = gst.Bin() # create the alphacolor element alphacolor = gst.element_factory_make('alphacolor') # add the elements to the source bin and link them self.sourceBin.add_many(self.source, alphacolor) self.source.link(alphacolor) pipeline.add(self.sourceBin) # create the source ghost pad self.sourceBin.add_pad(gst.GhostPad('src', alphacolor.get_pad('src'))) # set the locked state and wait until we get the first caps change # and we know the widht and height of the input stream self.sourceBin.set_locked_state(True)
def _add_source_bin(self, pipeline): if gstreamer.element_factory_exists("appsrc") and \ gstreamer.get_plugin_version("app") >= (0, 10, 22, 0): self.source = gst.element_factory_make('appsrc', 'source') self.source.set_property('do-timestamp', True) self.source.connect('need-data', self.push_buffer) else: #FIXME: fluoverlaysrc only needed on gst-plugins-base < 0.10.22 gobject.type_register(OverlayImageSource) gst.element_register(OverlayImageSource, "fluoverlaysrc", gst.RANK_MARGINAL) self.source = gst.element_factory_make('fluoverlaysrc', 'source') # create the source bin self.sourceBin = gst.Bin() # create the alphacolor element alphacolor = gst.element_factory_make('alphacolor') # add the elements to the source bin and link them self.sourceBin.add_many(self.source, alphacolor) self.source.link(alphacolor) pipeline.add(self.sourceBin) # create the source ghost pad self.sourceBin.add_pad(gst.GhostPad('src', alphacolor.get_pad('src'))) # set the locked state and wait until we get the first caps change # and we know the widht and height of the input stream self.sourceBin.set_locked_state(True)
def get_pipeline_string(self, properties): def getProps(): ret = [] for k, default in ( ("width", 320), ("height", 240), ("x-offset", 0), ("y-offset", 0), ("framerate", (5, 1)), ): ret.append(properties.get(k, default)) return ret width, height, x_offset, y_offset, framerate = getProps() src = "ximagesrc" if not gstreamer.element_factory_exists(src): raise errors.MissingElementError(src) return ( "%s startx=%d starty=%d endx=%d endy=%d use-damage=false" " ! ffmpegcolorspace" " ! video/x-raw-yuv,framerate=(fraction)%s,format=(fourcc)I420" % (src, x_offset, y_offset, width + x_offset, height + y_offset, "%d/%d" % framerate) )
def get_kuscheduler(self, interval): if not gstreamer.element_factory_exists('keyunitsscheduler'): register() kubin = Gst.parse_bin_from_description('keyunitsscheduler interval=%s ' 'name=scheduler' % interval, True) self._kuscheduler = kubin.get_by_name('scheduler') return kubin
def get_pipeline_string(self, properties): self.bitrate = properties.get("bitrate", -1) self.quality = properties.get("quality", 0.3) self.channels = properties.get("channels", 2) resampler = "audioresample" if gstreamer.element_factory_exists("legacyresample"): resampler = "legacyresample" return "%s name=ar ! audioconvert ! capsfilter name=cf " "! vorbisenc name=encoder" % resampler
def get_pipeline_string(self, properties): resampler = 'audioresample' if gstreamer.element_factory_exists('legacyresample'): resampler = 'legacyresample' # we only support mulaw in multipart, and multipart mandates # the audio/basic content-type to be 8000 Hz mono, c.f. RFC2046 return ('%s ! audioconvert ! audio/x-raw-int,rate=8000,channels=1 ' '! mulawenc name=encoder' % resampler)
def get_pipeline_string(self, properties): resampler = 'audioresample' if gstreamer.element_factory_exists('legacyresample'): resampler = 'legacyresample' # we only support mulaw in multipart, and multipart mandates # the audio/basic content-type to be 8000 Hz mono, c.f. RFC2046 return ('%s ! audioconvert ! audio/x-raw-int,rate=8000,channels=1 ' '! mulawenc name=encoder' % resampler)
def get_pipeline_string(self, properties): self.bitrate = properties.get('bitrate', -1) self.quality = properties.get('quality', 0.3) self.channels = properties.get('channels', 2) resampler = 'audioresample' if gstreamer.element_factory_exists('legacyresample'): resampler = 'legacyresample' return ('%s name=ar ! audioconvert ! capsfilter name=cf ' '! vorbisenc name=enc' % resampler)
def get_pipeline_string(self, properties): self.bitrate = properties.get('bitrate', -1) self.quality = properties.get('quality', 0.3) self.channels = properties.get('channels', 2) resampler = 'audioresample' if gstreamer.element_factory_exists('legacyresample'): resampler = 'legacyresample' return ('%s name=ar ! audioconvert ! capsfilter name=cf ' '! vorbisenc name=enc' % resampler)
def check_gnl(element): exists = gstreamer.element_factory_exists(element) if not exists: m = messages.Error(T_(N_( "%s is missing. Make sure your gnonlin " "installation is complete."), element)) documentation.messageAddGStreamerInstall(m) self.debug(m) self.addMessage(m)
def get_kuscheduler(self, interval): if not gstreamer.element_factory_exists('keyunitsscheduler'): register() kubin = gst.parse_bin_from_description( 'keyunitsscheduler interval=%s ' 'name=scheduler' % interval, True) self._kuscheduler = kubin.get_by_name('scheduler') return kubin
def check_gnl(element): exists = gstreamer.element_factory_exists(element) if not exists: m = messages.Error( T_( N_("%s is missing. Make sure your gnonlin " "installation is complete."), element)) documentation.messageAddGStreamerInstall(m) self.debug(m) self.addMessage(m)
def do_check(self): self.debug('running Windows Media Video encoder check.') version = self.config['properties'].get('version', 3) # For WMV2 we need dmoenc_wmvdmoe2v2. if version == 2: if gstreamer.element_factory_exists('dmoenc_wmvdmoe2v2'): self.wmvEncoder = 'dmoenc_wmvdmoe2v2' self.wmvVersion = gstreamer.get_plugin_version('pitfdll') else: version = gstreamer.get_plugin_version('pitfdll') if not version: self.warning('could not find pitfdll.') m = Error(T_( N_("This host is missing the 'gst-pitfdll' GStreamer plug-in.\n"))) else: self.warning('could not find dmoenc_wmvdmoe2v2, probably missing DLL, or old registry.') m = Error(T_( N_("This host is missing the Windows encoder DLL.\n"))) self.wmvEncoder = None self.addMessage(m) else: # First look for Fluendo WMV encoder. if gstreamer.element_factory_exists('fluwmvenc'): self.debug('found fluwmvenc, using it.') self.wmvEncoder = 'fluwmvenc' self.wmvVersion = gstreamer.get_plugin_version('fluwmvenc') elif gstreamer.element_factory_exists('dmoenc_wmvdmoe2v3'): self.debug('could not find fluwmvenc, found dmoenc_wmvdmoe2v3.') self.wmvEncoder = 'dmoenc_wmvdmoe2v3' self.wmvVersion = gstreamer.get_plugin_version('pitfdll') else: self.warning('could not find any WMV encoder.') m = Error(T_( N_("This host is missing the WMV encoder plug-ins.\n"))) self.wmvEncoder = None self.addMessage(m) self.uiState.set('encoder', self.wmvEncoder) self.uiState.set('version', self.wmvVersion) return defer.succeed(None)
def get_pipeline_string(self, properties): samplerate = properties.get('samplerate', 44100) self.samplerate = samplerate volume = properties.get('volume', 1.0) is_live = 'is-live=true' source = 'audiotestsrc' if not gstreamer.element_factory_exists(source): raise errors.MissingElementError(source) return ('%s name=source %s ! identity name=identity silent=TRUE ! ' \ 'audio/x-raw-int,rate=%d ! ' \ 'volume name=volume volume=%f ! level name=level' % (source, is_live, samplerate, volume))
def __init__(self, channels=None, samplerate=None, tolerance=DEFAULT_TOLERANCE): gst.Bin.__init__(self) self._samplerate = samplerate self._samplerate_caps = '' self._channels = channels self._channels_caps = '' if self._use_audiorate(): self._audiorate = gst.element_factory_make("audiorate") self._audiorate.set_property("skip-to-first", True) else: self._audiorate = gst.element_factory_make("identity") self._audiorate.set_property("silent", True) self._audioconv = gst.element_factory_make("audioconvert") resampler = 'audioresample' if gstreamer.element_factory_exists('legacyresample'): resampler = 'legacyresample' self._audioresample = gst.element_factory_make(resampler) self._capsfilter = gst.element_factory_make("capsfilter") self._identity = gst.parse_launch("identity silent=true") self.add(self._audiorate) self.add(self._audioconv) self.add(self._audioresample) self.add(self._capsfilter) self.add(self._identity) self._audiorate.link(self._audioconv) self._audioconv.link(self._audioresample) self._audioresample.link(self._capsfilter) self._capsfilter.link(self._identity) # Create source and sink pads self._sinkPad = gst.GhostPad('sink', self._audiorate.get_pad('sink')) self._srcPad = gst.GhostPad('src', self._identity.get_pad('src')) self.add_pad(self._sinkPad) self.add_pad(self._srcPad) self._sinkPad.set_event_function(self.eventfunc) self._setSamplerate(samplerate) self._setChannels(channels) self._setTolerance(tolerance)
def __init__(self, channels=None, samplerate=None, tolerance=DEFAULT_TOLERANCE): gst.Bin.__init__(self) self._samplerate = samplerate self._samplerate_caps = '' self._channels = channels self._channels_caps = '' if self._use_audiorate(): self._audiorate = gst.element_factory_make("audiorate") self._audiorate.set_property("skip-to-first", True) else: self._audiorate = gst.element_factory_make("identity") self._audiorate.set_property("silent", True) self._audioconv = gst.element_factory_make("audioconvert") resampler = 'audioresample' if gstreamer.element_factory_exists('legacyresample'): resampler = 'legacyresample' self._audioresample = gst.element_factory_make(resampler) self._capsfilter = gst.element_factory_make("capsfilter") self._identity = gst.parse_launch("identity silent=true") self.add(self._audiorate) self.add(self._audioconv) self.add(self._audioresample) self.add(self._capsfilter) self.add(self._identity) self._audiorate.link(self._audioconv) self._audioconv.link(self._audioresample) self._audioresample.link(self._capsfilter) self._capsfilter.link(self._identity) # Create source and sink pads self._sinkPad = gst.GhostPad('sink', self._audiorate.get_pad('sink')) self._srcPad = gst.GhostPad('src', self._identity.get_pad('src')) self.add_pad(self._sinkPad) self.add_pad(self._srcPad) self._sinkPad.set_event_function(self.eventfunc) self._setSamplerate(samplerate) self._setChannels(channels) self._setTolerance(tolerance)
def setUp(self): if not gstreamer.element_factory_exists('keyunitscheduler'): from flumotion.component.effects.kuscheduler \ import kuscheduler kuscheduler.register() self.tp = comptest.ComponentTestHelper() prod = ('videotestsrc is-live=1 ! ' \ 'video/x-raw-yuv,width=(int)320,height=(int)240, '\ 'framerate=(fraction)30/1 ! ' \ 'keyunitsscheduler interval = 1000000000 !' \ 'flumch264enc ! ismlmux ' \ 'trak-timescale=10000000 movie-timescale=10000000') self.s = \ 'flumotion.component.consumers.smoothstreamer.'\ 'SmoothHTTPLiveStreamer' self.prod = comptest.pipeline_src(prod)
def get_pipeline_string(self, properties): samplerate = properties.get('samplerate', 44100) wave = properties.get('wave', 0) self.samplerate = samplerate volume = properties.get('volume', 1.0) is_live = 'is-live=true' source = 'audiotestsrc' if not gstreamer.element_factory_exists(source): raise errors.MissingElementError(source) return ('%s name=source wave=%s %s ! ' \ 'identity name=identity silent=TRUE ! ' \ 'audio/x-raw-int,rate=%d ! ' \ 'volume name=volume volume=%f ! level name=level' % (source, wave, is_live, samplerate, volume))
def get_pipeline_string(self, properties): # the order here is important; to have our eater be the reference # stream for videomixer it needs to be specified last source_element = "" if gstreamer.element_factory_exists("appsrc") and \ gstreamer.get_plugin_version("app") >= (0, 10, 22, 0): source_element = "appsrc name=source do-timestamp=true" else: #FIXME: fluoverlaysrc only needed on gst-plugins-base < 0.10.22 gobject.type_register(OverlayImageSource) ret = gst.element_register(OverlayImageSource, "fluoverlaysrc", gst.RANK_MARGINAL) source_element = "fluoverlaysrc name=source " pipeline = ( '%s ! alphacolor ! ' 'videomixer name=mix ! @feeder:default@ ' '@eater:default@ ! ffmpegcolorspace ! mix.' % source_element) return pipeline
def get_pipeline_string(self, properties): gstElements = ['audioconvert', 'fluwmaenc name=encoder'] channels = properties.get('channels', 2) if 'samplerate' in properties: resampler = 'audioresample' if gstreamer.element_factory_exists('legacyresample'): resampler = 'legacyresample' gstElements.insert(1, resampler) gstElements.insert(2, 'audio/x-raw-int,rate=%d,channels=%d' % (properties['samplerate'], channels)) if 'drop-probability' in properties: gstElements.insert(0, 'identity drop-probability=%f silent=TRUE' % properties['drop-probability']) return " ! ".join(gstElements)
def get_pipeline_string(self, properties): # v2 supports only 16000, 22050, 24000, 32000, 44100, 48000 KHz samplerate = properties.get('samplerate', 44100) ht = properties.get('headers', False) and 1 or 0 he = properties.get('high-efficiency-version', 2) channels = properties.get('channels', 2) resampler = 'audioresample' if gstreamer.element_factory_exists('legacyresample'): resampler = 'legacyresample' return "audioconvert ! %s " \ "! audio/x-raw-int,rate=%d,channels=%d " \ "! flumcaacenc header-type=%d name=encoder he=%d " \ "! audio/mpeg,rate=%d" % (resampler, samplerate, channels, ht, he, samplerate)
def do_check(self): exists = gstreamer.element_factory_exists('ismlmux') if not exists: m = messages.Error(T_(N_( "%s is missing. Make sure your %s " "installation is complete."), 'ismlmux', 'ismlmux')) documentation.messageAddGStreamerInstall(m) self.debug(m) self.addMessage(m) return v = gstreamer.get_plugin_version('isomp4') if v < (0, 10, 23, 0): m = messages.Warning( T_(N_("Versions up to and including %s of the '%s' " "GStreamer plug-in are not suitable for " "smooth streaming.\n"), '0.10.23', 'isomp4')) self.addMessage(m)
def get_pipeline_string(self, properties): def getProps(): ret = [] for k, default in (('width', 320), ('height', 240), ('x-offset', 0), ('y-offset', 0), ('framerate', (5, 1))): ret.append(properties.get(k, default)) return ret width, height, x_offset, y_offset, framerate = getProps() src = 'ximagesrc' if not gstreamer.element_factory_exists(src): raise errors.MissingElementError(src) return ( '%s startx=%d starty=%d endx=%d endy=%d use-damage=false' ' ! ffmpegcolorspace' ' ! video/x-raw-yuv,framerate=(fraction)%s,format=(fourcc)I420' % (src, x_offset, y_offset, width + x_offset, height + y_offset, '%d/%d' % framerate))
def _buildAudioPipeline(self, pipeline, src): audiorate = gst.element_factory_make("audiorate") audioconvert = gst.element_factory_make('audioconvert') resampler = 'audioresample' if gstreamer.element_factory_exists('legacyresample'): resampler = 'legacyresample' audioresample = gst.element_factory_make(resampler) outcaps = gst.Caps( "audio/x-raw-int,channels=%d,rate=%d,width=16,depth=16" % (self._channels, self._samplerate)) capsfilter = gst.element_factory_make("capsfilter") capsfilter.props.caps = outcaps pipeline.add(audiorate, audioconvert, audioresample, capsfilter) src.link(audioconvert) audioconvert.link(audioresample) audioresample.link(audiorate) audiorate.link(capsfilter) return capsfilter.get_pad('src')
def _buildAudioPipeline(self, pipeline, src): audiorate = gst.element_factory_make("audiorate") audioconvert = gst.element_factory_make('audioconvert') resampler = 'audioresample' if gstreamer.element_factory_exists('legacyresample'): resampler = 'legacyresample' audioresample = gst.element_factory_make(resampler) outcaps = gst.Caps( "audio/x-raw-int,channels=%d,rate=%d,width=16,depth=16" % (self._channels, self._samplerate)) capsfilter = gst.element_factory_make("capsfilter") capsfilter.props.caps = outcaps pipeline.add(audiorate, audioconvert, audioresample, capsfilter) src.link(audioconvert) audioconvert.link(audioresample) audioresample.link(audiorate) audiorate.link(capsfilter) return capsfilter.get_pad('src')
def get_pipeline_string(self, properties): def getProps(): ret = [] for k, default in (('width', 320), ('height', 240), ('x-offset', 0), ('y-offset', 0), ('framerate', (5, 1))): ret.append(properties.get(k, default)) return ret width, height, x_offset, y_offset, framerate = getProps() src = 'ximagesrc' if not gstreamer.element_factory_exists(src): raise errors.MissingElementError(src) return ( '%s startx=%d starty=%d endx=%d endy=%d use-damage=false' ' ! ffmpegcolorspace' ' ! video/x-raw-yuv,framerate=(fraction)%s,format=(fourcc)I420' % (src, x_offset, y_offset, width + x_offset, height + y_offset, '%d/%d' % framerate))
from twisted.python import failure from twisted.internet import defer, reactor, interfaces, gtk3reactor from twisted.web import client, error from flumotion.common import testsuite from flumotion.common import log, errors from flumotion.common.planet import moods from flumotion.component.converters.video import video from flumotion.common import gstreamer from flumotion.test import comptest attr = testsuite.attr if not gstreamer.element_factory_exists('deinterlace')\ or not gstreamer.element_factory_has_property('deinterlace', 'method'): skip="GStreamer element 'deinterlace' is too old or doesn't exists" class TestVideoConverter(comptest.CompTestTestCase, log.Loggable): def setUp(self): self.tp = comptest.ComponentTestHelper() prod = ('videotestsrc is-live=true ! ' 'video/x-raw-rgb,framerate=(fraction)1/2,width=320,height=240,' 'pixel-aspect-ratio=1/2,interlaced=true') self.s = 'flumotion.component.converters.video.video.Converter' self.prod = comptest.pipeline_src(prod)
def get_pipeline_string(self, properties): # Check of the hlssink is available or use the python one if not gstreamer.element_factory_exists('hlssink'): hlssink.register() return "hlssink name=sink sync=false"
def makeAudioEncodeBin(config, analysis, tag, withRateControl=True, pipelineInfo=None, logger=None): logger = logger or log pipelineParts = list() bin = gst.Bin() # input queue element inqueue = gst.element_factory_make("queue", "audioinqueue-%s" % tag) # Cannot specify max_size_time property because of some buggy buffers # with invalid time that make the queue lock inqueue.props.max_size_time = 0 inqueue.props.max_size_buffers = 200 pipelineParts.append("queue") # audiorate element if withRateControl: rate = gst.element_factory_make("audiorate", "audiorate-%s" % tag) # Add a tolerance of 20ms to audiorate to fix cracking audio if gstreamer.element_has_property(rate, "tolerance"): rate.set_property("tolerance", DEFAULT_TOLERANCE) pipelineParts.append("audiorate") else: rate = None # audioconvert element convert = gst.element_factory_make("audioconvert", "audioconvert-%s" % tag) pipelineParts.append("audioconvert") # audioresample element # Use legacyresample if available because after 0.10.22 the old # audioresample element got renamed to legacyresample and replaced # by speexresample that cause audio/video synchronization issues. resamplerName = "audioresample" if gstreamer.element_factory_exists("legacyresample"): resamplerName = "legacyresample" if config.audioResampler: if gstreamer.element_factory_exists(config.audioResampler): resamplerName = config.audioResampler else: logger.warning("Audio resampler %s doesn't exist, defaulting to %s", config.audioResampler, resamplerName) resample = gst.element_factory_make(resamplerName, "%s-%s" % (resamplerName, tag)) pipelineParts.append(resamplerName) # capsfilter element capsfilter = gst.element_factory_make("capsfilter", "audiocapsfilter-%s" % tag) # Because the analysis not reliably give channel # and rate info, do not not rely on it. if config.audioRate or config.audioChannels: capsList = [] if config.audioRate: capsList.append("rate=%d" % config.audioRate) elif analysis.audioRate: capsList.append("rate=%d" % analysis.audioRate) if config.audioChannels: capsList.append("channels=%d" % config.audioChannels) elif analysis.audioChannels: capsList.append("channels=%d" % analysis.audioChannels) caps = ", ".join(capsList) if caps: fullcaps = "audio/x-raw-int, %s;audio/x-raw-float, %s" % (caps, caps) logger.debug("Audio capsfilter: '%s'", fullcaps) pipelineParts.append("'%s'" % fullcaps) capsfilter.props.caps = gst.caps_from_string(fullcaps) else: logger.debug("No audio capsfilter") # encoder elements encode = gstutils.parse_bin_from_description(config.audioEncoder, True) pipelineParts.extend(map(str.strip, config.audioEncoder.split("!"))) # output queue element outqueue = gst.element_factory_make("queue", "audioutqueue-%s" % tag) outqueue.props.max_size_time = gst.SECOND * 20 outqueue.props.max_size_buffers = 0 pipelineParts.append("queue") if rate: bin.add(inqueue, rate, convert, resample, capsfilter, encode, outqueue) gst.element_link_many(inqueue, rate, convert, resample, capsfilter, encode, outqueue) else: bin.add(inqueue, convert, resample, capsfilter, encode, outqueue) gst.element_link_many(inqueue, convert, resample, capsfilter, encode, outqueue) bin.add_pad(gst.GhostPad("sink", inqueue.get_pad("sink"))) bin.add_pad(gst.GhostPad("src", outqueue.get_pad("src"))) pipelineDesc = " ! ".join(pipelineParts) logger.debug("Audio pipeline: %s", pipelineDesc) if pipelineInfo != None: pipelineInfo["audio"] = pipelineDesc return bin
from twisted.python import failure from twisted.internet import defer, reactor, interfaces, gtk2reactor from twisted.web import client, error from flumotion.common import testsuite from flumotion.common import log, errors from flumotion.common.planet import moods from flumotion.component.converters.video import video from flumotion.common import gstreamer from flumotion.test import comptest attr = testsuite.attr if not gstreamer.element_factory_exists('deinterlace')\ or not gstreamer.element_factory_has_property('deinterlace', 'method'): skip = "GStreamer element 'deinterlace' is too old or doesn't exists" class TestVideoConverter(comptest.CompTestTestCase, log.Loggable): def setUp(self): self.tp = comptest.ComponentTestHelper() prod = ('videotestsrc is-live=true ! ' 'video/x-raw-rgb,framerate=(fraction)1/2,width=320,height=240,' 'pixel-aspect-ratio=1/2,interlaced=true') self.s = 'flumotion.component.converters.video.video.Converter' self.prod = comptest.pipeline_src(prod) def tearDown(self):