def _setTolerance(self, tolerance): self._tolerance = tolerance if gstreamer.element_has_property(self._audiorate, 'tolerance'): self._audiorate.set_property('tolerance', self._tolerance) else: self.warning("The 'tolerance' property could not be set in the " "audiorate element.")
def configure_pipeline(self, pipeline, properties): self.volume = pipeline.get_by_name("setvolume") from flumotion.component.effects.volume import volume comp_level = pipeline.get_by_name('volumelevel') vol = volume.Volume('inputVolume', comp_level, pipeline) # catch bus message for when camera disappears bus = pipeline.get_bus() bus.add_signal_watch() bus.connect('message::element', self._bus_message_received_cb) self.addEffect(vol) decoder = pipeline.get_by_name("decoder") if gstreamer.element_has_property(decoder, 'drop-factor'): if self.framerate: framerate = float(self.framerate.num / self.framerate.denom) if 12.5 < framerate: drop_factor = 1 elif 6.3 < framerate <= 12.5: drop_factor = 2 elif 3.2 < framerate <= 6.3: drop_factor = 4 elif framerate <= 3.2: drop_factor = 8 else: drop_factor = 1 decoder.set_property('drop-factor', drop_factor) vr = videorate.Videorate('videorate', decoder.get_pad("src"), pipeline, self.framerate) self.addEffect(vr) vr.plug() deinterlacer = deinterlace.Deinterlace('deinterlace', vr.effectBin.get_pad("src"), pipeline, self.deintMode, self.deintMethod) self.addEffect(deinterlacer) deinterlacer.plug() videoscaler = videoscale.Videoscale( 'videoscale', self, deinterlacer.effectBin.get_pad("src"), pipeline, self.width, self.height, self.is_square, self.add_borders) self.addEffect(videoscaler) videoscaler.plug() # Setting a tolerance of 20ms should be enough (1/2 frame), but # we set it to 40ms to be more conservatives ar = audioconvert.Audioconvert( 'audioconvert', comp_level.get_pad("src"), pipeline, tolerance=40 * gst.MSECOND) self.addEffect(ar) ar.plug()
def __init__(self, mode, method): gst.Bin.__init__(self) self.keepFR = True self.deinterlacerName = PASSTHROUGH_DEINTERLACER self._interlaced = False # Create elements self._colorspace = gst.element_factory_make("ffmpegcolorspace") self._colorfilter = gst.element_factory_make("capsfilter") self._deinterlacer = gst.element_factory_make(PASSTHROUGH_DEINTERLACER) self._deinterlacer.set_property('silent', True) self._videorate = gst.element_factory_make("videorate") self._ratefilter = gst.element_factory_make("capsfilter") # Add elements to the bin self.add(self._colorspace, self._colorfilter, self._deinterlacer, self._videorate, self._ratefilter) # FIXME: I420 is the only format support by the ffmpeg deinterlacer. # Forcing it simplifies renegotiation issues if the input colorspace # is different and the ffmpeg deinterlacer is added after the # negotiation happened in a different colorspace. This makes this # element not-passthrough. self._colorfilter.set_property('caps', gst.Caps( 'video/x-raw-yuv, format=(fourcc)I420')) if gstreamer.element_has_property(self._videorate, 'skip-to-first'): self._videorate.set_property('skip-to-first', True) # Link elements self._colorspace.link(self._colorfilter) self._colorfilter.link(self._deinterlacer) self._deinterlacer.link(self._videorate) self._videorate.link(self._ratefilter) # Create source and sink pads self._sinkPad = gst.GhostPad('sink', self._colorspace.get_pad('sink')) self._srcPad = gst.GhostPad('src', self._ratefilter.get_pad('src')) self.add_pad(self._sinkPad) self.add_pad(self._srcPad) # Store deinterlacer's sink and source peer pads self._sinkPeerPad = self._colorspace.get_pad('src') self._srcPeerPad = self._videorate.get_pad('sink') # Add setcaps callback in the sink pad self._sinkPad.set_setcaps_function(self._sinkSetCaps) self._sinkPad.set_event_function(self.eventfunc) # Set the mode and method in the deinterlacer self._setMethod(method) self._setMode(mode)
def configure_pipeline(self, pipeline, properties): self.volume = pipeline.get_by_name("setvolume") from flumotion.component.effects.volume import volume comp_level = pipeline.get_by_name('volumelevel') vol = volume.Volume('inputVolume', comp_level, pipeline) # catch bus message for when camera disappears bus = pipeline.get_bus() bus.add_signal_watch() bus.connect('message::element', self._bus_message_received_cb) self.addEffect(vol) decoder = pipeline.get_by_name("decoder") if gstreamer.element_has_property(decoder, 'drop-factor'): if self.framerate: framerate = float(self.framerate.num / self.framerate.denom) if 12.5 < framerate: drop_factor = 1 elif 6.3 < framerate <= 12.5: drop_factor = 2 elif 3.2 < framerate <= 6.3: drop_factor = 4 elif framerate <= 3.2: drop_factor = 8 else: drop_factor = 1 decoder.set_property('drop-factor', drop_factor) vr = videorate.Videorate('videorate', decoder.get_pad("src"), pipeline, self.framerate) self.addEffect(vr) vr.plug() deinterlacer = deinterlace.Deinterlace('deinterlace', vr.effectBin.get_pad("src"), pipeline, self.deintMode, self.deintMethod) self.addEffect(deinterlacer) deinterlacer.plug() videoscaler = videoscale.Videoscale('videoscale', self, deinterlacer.effectBin.get_pad("src"), pipeline, self.width, self.height, self.is_square, self.add_borders) self.addEffect(videoscaler) videoscaler.plug() # Setting a tolerance of 20ms should be enough (1/2 frame), but # we set it to 40ms to be more conservatives ar = audioconvert.Audioconvert('audioconvert', comp_level.get_pad("src"), pipeline, tolerance=40 * gst.MSECOND) self.addEffect(ar) ar.plug()
def configure_pipeline(self, pipeline, properties): self.volume = pipeline.get_by_name("setvolume") from flumotion.component.effects.volume import volume comp_level = pipeline.get_by_name("volumelevel") vol = volume.Volume("inputVolume", comp_level, pipeline) decoder = pipeline.get_by_name("decoder") if gstreamer.element_has_property(decoder, "drop-factor"): if self.framerate: framerate = float(self.framerate.num / self.framerate.denom) if 12.5 < framerate: drop_factor = 1 elif 6.3 < framerate <= 12.5: drop_factor = 2 elif 3.2 < framerate <= 6.3: drop_factor = 4 elif framerate <= 3.2: drop_factor = 8 else: drop_factor = 1 decoder.set_property("drop-factor", drop_factor) vr = videorate.Videorate("videorate", decoder.get_pad("src"), pipeline, self.framerate) self.addEffect(vr) vr.plug() deinterlacer = deinterlace.Deinterlace( "deinterlace", vr.effectBin.get_pad("src"), pipeline, self.deintMode, self.deintMethod ) self.addEffect(deinterlacer) deinterlacer.plug() videoscaler = videoscale.Videoscale( "videoscale", self, deinterlacer.effectBin.get_pad("src"), pipeline, self.width, self.height, self.is_square, self.add_borders, ) self.addEffect(videoscaler) videoscaler.plug() # Setting a tolerance of 20ms should be enough (1/2 frame), but # we set it to 40ms to be more conservatives ar = audioconvert.Audioconvert("audioconvert", comp_level.get_pad("src"), pipeline, tolerance=40 * gst.MSECOND) self.addEffect(ar) ar.plug()
def _configureOutput(self): p = "" if self._is_square: p = ",pixel-aspect-ratio=(fraction)1/1" if self._width: p = "%s,width=(int)%d" % (p, self._width) if self._height: p = "%s,height=(int)%d" % (p, self._height) p = "video/x-raw%s;video/x-raw%s" % (p, p) caps = Gst.Caps(p) self._capsfilter.set_property("caps", caps) if gstreamer.element_has_property(self._videoscaler, 'add-borders'): self._videoscaler.set_property('add-borders', self._add_borders)
def do_set_property(self, property, value): if property.name == 'width': self._width = value elif property.name == 'height': self._height = value elif property.name == 'add-borders': if not gstreamer.element_has_property(self._videoscaler, 'add-borders'): self.warning("Can't add black borders because videoscale\ element doesn't have 'add-borders' property.") self._add_borders = value elif property.name == 'is-square': self._is_square = value else: raise AttributeError('unknown property %s' % property.name)
def _configureOutput(self): p = "" if self._is_square: p = ",pixel-aspect-ratio=(fraction)1/1" if self._width: p = "%s,width=(int)%d" % (p, self._width) if self._height: p = "%s,height=(int)%d" % (p, self._height) p = "video/x-raw-yuv%s;video/x-raw-rgb%s" % (p, p) self.info("out:%s" % p) caps = gst.Caps(p) self._capsfilter.set_property("caps", caps) if gstreamer.element_has_property(self._videoscaler, 'add-borders'): self._videoscaler.set_property('add-borders', self._add_borders)
def configure_pipeline(self, pipeline, properties): enc = pipeline.get_by_name('enc') cf = pipeline.get_by_name('cf') ar = pipeline.get_by_name('ar') art = pipeline.get_by_name('art') assert enc and cf and ar and art if self.bitrate > -1: enc.set_property('bitrate', self.bitrate) else: enc.set_property('quality', self.quality) if gstreamer.element_has_property(art, 'tolerance'): art.set_property('tolerance', self.DEFAULT_TOLERANCE) pad = ar.get_pad('sink') handle = None def buffer_probe(pad, buffer): # this comes from another thread caps = buffer.get_caps() in_rate = caps[0]['rate'] # now do necessary filtercaps self.rate = in_rate if self.bitrate > -1: maxsamplerate = get_max_sample_rate( self.bitrate, self.channels) if in_rate > maxsamplerate: self.rate = get_preferred_sample_rate(maxsamplerate) self.debug( 'rate %d > max rate %d (for %d kbit/sec), ' 'selecting rate %d instead' % ( in_rate, maxsamplerate, self.bitrate, self.rate)) caps_str = 'audio/x-raw-float, rate=%d, channels=%d' % (self.rate, self.channels) cf.set_property('caps', gst.caps_from_string(caps_str)) pad.remove_buffer_probe(handle) return True handle = pad.add_buffer_probe(buffer_probe)
def configure_pipeline(self, pipeline, properties): element = pipeline.get_by_name("encoder") for p in ["sharpness", "quick-compress", "noise-sensitivity"]: if properties.get(p, None) is not None: self.warnDeprecatedProperties([p]) props = ( "bitrate", "quality", ("speed", "speed-level"), ("keyframe-mindistance", "keyframe-freq"), ("keyframe-maxdistance", "keyframe-force"), ) for p in props: if isinstance(p, tuple): pproperty, eproperty = p else: pproperty = eproperty = p if not pproperty in properties: continue value = properties[pproperty] self.debug("Setting GStreamer property %s to %r" % (eproperty, value)) # FIXME: GStreamer 0.10 has bitrate in kbps, inconsistent # with all other elements, so fix it up if pproperty == "bitrate": value = int(value / 1000) # Check for the speed-level property, introduced in # gst-plugins-base-0.10.24 if eproperty == "speed-level": if not gstreamer.element_has_property(element, "speed-level"): self.warning( "Trying to set the 'speed-level' property " "in a old version of gstreamer's theora " "encoder element" ) self.warning("We will fallback to the 'quick' property") eproperty = "quick" value = value > 0 and True or False element.set_property(eproperty, value)
def configure_pipeline(self, pipeline, properties): element = pipeline.get_by_name('encoder') for p in ['sharpness', 'quick-compress', 'noise-sensitivity']: if properties.get(p, None) is not None: self.warnDeprecatedProperties([p]) props = ('bitrate', 'quality', ('speed', 'speed-level'), ('keyframe-mindistance', 'keyframe-freq'), ('keyframe-maxdistance', 'keyframe-force')) for p in props: if isinstance(p, tuple): pproperty, eproperty = p else: pproperty = eproperty = p if not pproperty in properties: continue value = properties[pproperty] self.debug('Setting GStreamer property %s to %r' % ( eproperty, value)) # FIXME: GStreamer 0.10 has bitrate in kbps, inconsistent # with all other elements, so fix it up if pproperty == 'bitrate': value = int(value/1000) # Check for the speed-level property, introduced in # gst-plugins-base-0.10.24 if eproperty == 'speed-level': if not gstreamer.element_has_property(element, 'speed-level'): self.warning("Trying to set the 'speed-level' property " "in a old version of gstreamer's theora " "encoder element") self.warning("We will fallback to the 'quick' property") eproperty = 'quick' value = value > 0 and True or False element.set_property(eproperty, value)
def __init__(self, framerate=Gst.Fraction(25, 1)): Gst.Bin.__init__(self) self._framerate = framerate self._videorate = Gst.ElementFactory.make("videorate") self._capsfilter = Gst.ElementFactory.make("capsfilter") self.add(self._videorate, self._capsfilter) self._videorate.link(self._capsfilter) # Set properties if gstreamer.element_has_property(self._videorate, 'skip-to-first'): self._videorate.set_property('skip-to-first', True) # Create source and sink pads self._sinkPad = Gst.GhostPad.new('sink', self._videorate.get_static_pad('sink')) self._srcPad = Gst.GhostPad.new('src', self._capsfilter.get_static_pad('src')) self.add_pad(self._sinkPad) self.add_pad(self._srcPad) self._setFramerate(framerate)
def __init__(self, framerate=gst.Fraction(25, 1)): gst.Bin.__init__(self) self._framerate = framerate self._videorate = gst.element_factory_make("videorate") self._capsfilter = gst.element_factory_make("capsfilter") self.add(self._videorate, self._capsfilter) self._videorate.link(self._capsfilter) # Set properties if gstreamer.element_has_property(self._videorate, 'skip-to-first'): self._videorate.set_property('skip-to-first', True) # Create source and sink pads self._sinkPad = gst.GhostPad('sink', self._videorate.get_pad('sink')) self._srcPad = gst.GhostPad('src', self._capsfilter.get_pad('src')) self.add_pad(self._sinkPad) self.add_pad(self._srcPad) self._sinkPad.set_event_function(self.eventfunc) self._setFramerate(framerate)
def configure_pipeline(self, pipeline, properties): # catch bus message for when camera disappears bus = pipeline.get_bus() bus.add_signal_watch() bus.connect('message::element', self._bus_message_received_cb) self.decoder = pipeline.get_by_name("decoder") if gstreamer.element_has_property(self.decoder, 'drop-factor'): if self.framerate: framerate = float(self.framerate.num / self.framerate.denom) if 12.5 < framerate: drop_factor = 1 elif 6.3 < framerate <= 12.5: drop_factor = 2 elif 3.2 < framerate <= 6.3: drop_factor = 4 elif framerate <= 3.2: drop_factor = 8 else: drop_factor = 1 self.decoder.set_property('drop-factor', drop_factor) return avproducer.AVProducerBase.configure_pipeline(self, pipeline, properties)
def makeAudioEncodeBin(config, analysis, tag, withRateControl=True, pipelineInfo=None, logger=None): logger = logger or log pipelineParts = list() bin = gst.Bin() # input queue element inqueue = gst.element_factory_make("queue", "audioinqueue-%s" % tag) # Cannot specify max_size_time property because of some buggy buffers # with invalid time that make the queue lock inqueue.props.max_size_time = 0 inqueue.props.max_size_buffers = 200 pipelineParts.append("queue") # audiorate element if withRateControl: rate = gst.element_factory_make("audiorate", "audiorate-%s" % tag) # Add a tolerance of 20ms to audiorate to fix cracking audio if gstreamer.element_has_property(rate, "tolerance"): rate.set_property("tolerance", DEFAULT_TOLERANCE) pipelineParts.append("audiorate") else: rate = None # audioconvert element convert = gst.element_factory_make("audioconvert", "audioconvert-%s" % tag) pipelineParts.append("audioconvert") # audioresample element # Use legacyresample if available because after 0.10.22 the old # audioresample element got renamed to legacyresample and replaced # by speexresample that cause audio/video synchronization issues. resamplerName = "audioresample" if gstreamer.element_factory_exists("legacyresample"): resamplerName = "legacyresample" if config.audioResampler: if gstreamer.element_factory_exists(config.audioResampler): resamplerName = config.audioResampler else: logger.warning("Audio resampler %s doesn't exist, defaulting to %s", config.audioResampler, resamplerName) resample = gst.element_factory_make(resamplerName, "%s-%s" % (resamplerName, tag)) pipelineParts.append(resamplerName) # capsfilter element capsfilter = gst.element_factory_make("capsfilter", "audiocapsfilter-%s" % tag) # Because the analysis not reliably give channel # and rate info, do not not rely on it. if config.audioRate or config.audioChannels: capsList = [] if config.audioRate: capsList.append("rate=%d" % config.audioRate) elif analysis.audioRate: capsList.append("rate=%d" % analysis.audioRate) if config.audioChannels: capsList.append("channels=%d" % config.audioChannels) elif analysis.audioChannels: capsList.append("channels=%d" % analysis.audioChannels) caps = ", ".join(capsList) if caps: fullcaps = "audio/x-raw-int, %s;audio/x-raw-float, %s" % (caps, caps) logger.debug("Audio capsfilter: '%s'", fullcaps) pipelineParts.append("'%s'" % fullcaps) capsfilter.props.caps = gst.caps_from_string(fullcaps) else: logger.debug("No audio capsfilter") # encoder elements encode = gstutils.parse_bin_from_description(config.audioEncoder, True) pipelineParts.extend(map(str.strip, config.audioEncoder.split("!"))) # output queue element outqueue = gst.element_factory_make("queue", "audioutqueue-%s" % tag) outqueue.props.max_size_time = gst.SECOND * 20 outqueue.props.max_size_buffers = 0 pipelineParts.append("queue") if rate: bin.add(inqueue, rate, convert, resample, capsfilter, encode, outqueue) gst.element_link_many(inqueue, rate, convert, resample, capsfilter, encode, outqueue) else: bin.add(inqueue, convert, resample, capsfilter, encode, outqueue) gst.element_link_many(inqueue, convert, resample, capsfilter, encode, outqueue) bin.add_pad(gst.GhostPad("sink", inqueue.get_pad("sink"))) bin.add_pad(gst.GhostPad("src", outqueue.get_pad("src"))) pipelineDesc = " ! ".join(pipelineParts) logger.debug("Audio pipeline: %s", pipelineDesc) if pipelineInfo != None: pipelineInfo["audio"] = pipelineDesc return bin