class NewsegmentEater(gst.BaseTransform): __gstdetails__ = ( "Description", "Klass", "Description", "Author") sink_template = gst.PadTemplate("sink", gst.PAD_SINK, gst.PAD_ALWAYS, gst.Caps('ANY')) src_template = gst.PadTemplate("src", gst.PAD_SRC, gst.PAD_ALWAYS, gst.Caps('ANY')) __gsttemplates__ = (sink_template, src_template) def __init__(self): gst.BaseTransform.__init__(self) def do_event(self, event): res = gst.BaseTransform.do_event(self, event) if event.type == gst.EVENT_NEWSEGMENT: # don't forward the event downstream return False return res
def testTracks(self): timeline = Timeline() stream1 = VideoStream(gst.Caps('video/x-raw-rgb'), 'src0') stream2 = AudioStream(gst.Caps('audio/x-raw-int'), 'src1') track1 = Track(stream1) track2 = Track(stream2) # start with 2 tracks timeline.addTrack(track1) timeline.addTrack(track2) factory = TimelineSourceFactory(timeline) bin = factory.makeBin() self.failUnlessEqual(len(list(bin)), 2) self.failUnlessEqual(set(factory.getOutputStreams()), set([stream1, stream2])) # add a new track stream3 = AudioStream(gst.Caps('audio/x-raw-int'), 'src2') track3 = Track(stream3) timeline.addTrack(track3) self.failUnlessEqual(len(list(bin)), 3) self.failUnlessEqual(set(factory.getOutputStreams()), set([stream1, stream2, stream3])) # remove a track timeline.removeTrack(track3) self.failUnlessEqual(len(list(bin)), 2) self.failUnlessEqual(set(factory.getOutputStreams()), set([stream1, stream2])) factory.clean()
def testNewDecodedPadNotFixed(self): video_template = gst.PadTemplate( 'video_00', gst.PAD_SRC, gst.PAD_ALWAYS, gst.Caps('video/x-raw-rgb, ' 'framerate=[0/1, %d/1]' % ((2**31) - 1))) audio_template = gst.PadTemplate( 'audio_00', gst.PAD_SRC, gst.PAD_ALWAYS, gst.Caps('audio/x-raw-int, ' 'rate=[1, %d]' % ((2**31) - 1))) video = gst.Pad(video_template) audio = gst.Pad(audio_template) video_ghost = gst.GhostPad("video", video) audio_ghost = gst.GhostPad("audio", audio) self.failUnlessEqual(self.discoverer.current_streams, []) self.discoverer._newDecodedPadCb(None, video_ghost, False) self.failUnlessEqual(len(self.discoverer.current_streams), 0) self.failUnlessEqual(self.discoverer.new_video_pad_cb, 1) self.discoverer._newDecodedPadCb(None, audio_ghost, False) self.failUnlessEqual(len(self.discoverer.current_streams), 0) self.failUnlessEqual(self.discoverer.new_video_pad_cb, 1) # fix the caps video.set_caps(gst.Caps('video/x-raw-rgb, framerate=25/1')) self.failUnlessEqual(len(self.discoverer.current_streams), 1) self.failUnlessEqual(self.discoverer.new_video_pad_cb, 1) audio.set_caps(gst.Caps('audio/x-raw-int, rate=44100')) self.failUnlessEqual(len(self.discoverer.current_streams), 2) self.failUnlessEqual(self.discoverer.new_video_pad_cb, 1)
class EOSSir(gst.Element): __gstdetails__ = ("EOSSir", "Generic", "pushes EOS after the first buffer", "Alessandro Decina <*****@*****.**>") srctemplate = gst.PadTemplate("src", gst.PAD_SRC, gst.PAD_ALWAYS, gst.Caps("ANY")) sinktemplate = gst.PadTemplate("sink", gst.PAD_SINK, gst.PAD_ALWAYS, gst.Caps("ANY")) __gsttemplates__ = (srctemplate, sinktemplate) def __init__(self): gst.Element.__init__(self) self.sinkpad = gst.Pad(self.sinktemplate, "sink") self.sinkpad.set_chain_function(self.chain) self.add_pad(self.sinkpad) self.srcpad = gst.Pad(self.srctemplate, "src") self.add_pad(self.srcpad) def chain(self, pad, buf): ret = self.srcpad.push(buf) if ret == gst.FLOW_OK: self.info("pushed, doing EOS") self.srcpad.push_event(gst.event_new_eos()) return ret
class FixSeekStart(gst.BaseTransform): __gstdetails__ = ("Description", "Klass", "Description", "Author") sink_template = gst.PadTemplate("sink", gst.PAD_SINK, gst.PAD_ALWAYS, gst.Caps('ANY')) src_template = gst.PadTemplate("src", gst.PAD_SRC, gst.PAD_ALWAYS, gst.Caps('ANY')) __gsttemplates__ = (sink_template, src_template) def __init__(self, track): gst.BaseTransform.__init__(self) self.track = track def do_src_event(self, event): if event.type == gst.EVENT_SEEK: rate, format, flags, cur_type, cur, stop_type, stop = \ event.parse_seek() if cur_type == gst.SEEK_TYPE_SET and cur >= self.track.duration: cur = self.track.duration - 1 * gst.NSECOND new_event = gst.event_new_seek(rate, format, flags, cur_type, cur, stop_type, stop) event = new_event return gst.BaseTransform.do_src_event(self, event)
def testMatchSamePadName(self): s1 = AudioStream(gst.Caps("audio/x-vorbis"), pad_name="src0") s2 = AudioStream(gst.Caps("audio/x-speex"), pad_name="src0") stream, rank = match_stream(s1, [s2]) self.failUnlessEqual(id(s2), id(stream)) self.failUnlessEqual(rank, STREAM_MATCH_SAME_PAD_NAME + STREAM_MATCH_SAME_TYPE)
class TestAudioStream(TestMultimediaStream, TestCase): streamClass = AudioStream unfixed_caps = gst.Caps('audio/x-raw-float, rate=48000, channels=2,' 'width=32, depth=32, endianness=4321; ' 'audio/x-raw-int, rate=44100, channels=2, width=32, depth=32, ' 'endianness=4321') fixed_caps = gst.Caps('audio/x-raw-int, rate=44100, channels=2, ' 'width=32, depth=32, endianness=4321') non_raw_caps = gst.Caps('audio/x-vorbis') def testAudioProperties(self): expected = {'audiotype': 'audio/x-raw-float', 'rate': 48000, 'channels': 2, 'width': 32, 'depth': 32} stream = AudioStream(self.unfixed_caps) self.checkProperties(stream, expected) expected = {'audiotype': 'audio/x-raw-int', 'rate': 44100, 'channels': 2, 'width': 32, 'depth': 32} stream = AudioStream(self.fixed_caps) self.checkProperties(stream, expected) # get None when trying to access these properties with non raw streams # NOTE: this is the current behaviour. Does it really make sense to try # to access these properties for non raw streams? Should we rather error # out? expected = dict((name, None) for name in expected.keys()) expected['audiotype'] = 'audio/x-vorbis' stream = AudioStream(self.non_raw_caps) self.checkProperties(stream, expected)
def testMatchStreamSameNameAndCompatibleCaps(self): s1 = AudioStream(gst.Caps("audio/x-vorbis, a={1, 2}"), pad_name="src0") s2 = AudioStream(gst.Caps("audio/x-vorbis, a={2, 3}"), pad_name="src0") stream, rank = match_stream(s1, [s2]) self.failUnlessEqual(id(s2), id(stream)) self.failUnlessEqual(rank, STREAM_MATCH_SAME_PAD_NAME + STREAM_MATCH_COMPATIBLE_CAPS)
def testPendingLink(self): a = Action() p = Pipeline() src = common.FakeGnlFactory() src.addOutputStream(VideoStream(gst.Caps("video/x-raw-yuv"), pad_name="src")) sink = common.FakeSinkFactory() sink.addInputStream(MultimediaStream(gst.Caps("any"), pad_name="sink")) # set the link, it will be activated once the pad is added a.setLink(src, sink) # Let's see if the link is present self.assertEquals(a._links, [(src, sink, None, None)]) p.setAction(a) gst.debug("about to activate action") a.activate() # only the producer and the consumer are created, the other elements are # created dinamically self.assertEquals(len(list(p._pipeline.elements())), 2) p.setState(STATE_PLAYING) time.sleep(1) # and make sure that all other elements were created (4) # FIXME if it's failing here, run the test a few times trying to raise # the time.sleep() above, it may just be racy... self.assertEquals(len(list(p._pipeline.elements())), 4) a.deactivate() p.setState(STATE_NULL) self.assertEquals(len(list(p._pipeline.elements())), 0) p.release()
def newBlankProject(self): """ start up a new blank project """ # if there's a running project we must close it if self.current is not None and not self.closeRunningProject(): return False # we don't have an URI here, None means we're loading a new project self.emit("new-project-loading", None) project = Project(_("New Project")) self.emit("new-project-created", project) self.current = project # FIXME: this should not be hard-coded # add default tracks for a new project video = VideoStream(gst.Caps('video/x-raw-rgb; video/x-raw-yuv')) track = Track(video) project.timeline.addTrack(track) audio = AudioStream(gst.Caps('audio/x-raw-int; audio/x-raw-float')) track = Track(audio) project.timeline.addTrack(track) project.connect("project-changed", self._projectChangedCb) self.emit("new-project-loaded", self.current) return True
def testSaveTimeline(self): video_stream = VideoStream(gst.Caps("video/x-raw-yuv")) audio_stream = AudioStream(gst.Caps("audio/x-raw-int")) source1 = VideoTestSourceFactory() source1.addOutputStream(video_stream) self.formatter._saveSource(source1) self.formatter._saveStream(video_stream) track_object = SourceTrackObject(source1, video_stream, start=10 * gst.SECOND, duration=20 * gst.SECOND, in_point=5 * gst.SECOND, media_duration=15 * gst.SECOND, priority=10) track = Track(video_stream) track.addTrackObject(track_object) self.formatter._saveTrackObject(track_object) timeline_object = TimelineObject(source1) timeline_object.addTrackObject(track_object) self.formatter._saveTimelineObject(timeline_object) timeline = Timeline() timeline.addTrack(track) element = self.formatter._saveTimeline(timeline) self.failUnlessEqual(element.tag, "timeline") tracks = element.find("tracks") self.failUnlessEqual(len(tracks), 1)
def testSavetimelineObjects(self): video_stream = VideoStream(gst.Caps("video/x-raw-yuv")) audio_stream = AudioStream(gst.Caps("audio/x-raw-int")) source1 = FileSourceFactory("file1.ogg") source1.addOutputStream(video_stream) # these two calls are needed to populate the context for the -ref # elements self.formatter._saveSource(source1) self.formatter._saveStream(video_stream) track_object = SourceTrackObject(source1, video_stream, start=10 * gst.SECOND, duration=20 * gst.SECOND, in_point=5 * gst.SECOND, media_duration=15 * gst.SECOND, priority=10) track = Track(video_stream) track.addTrackObject(track_object) self.formatter._saveTrackObject(track_object) timeline_object = TimelineObject(source1) timeline_object.addTrackObject(track_object) element = self.formatter._saveTimelineObjects([timeline_object]) self.failUnlessEqual(len(element), 1)
def testSaveTrack(self): video_stream = VideoStream(gst.Caps("video/x-raw-yuv")) audio_stream = AudioStream(gst.Caps("audio/x-raw-int")) source1 = VideoTestSourceFactory() # these two calls are needed to populate the context for the -ref # elements self.formatter._saveSource(source1) self.formatter._saveStream(video_stream) track_object = SourceTrackObject(source1, video_stream, start=10 * gst.SECOND, duration=20 * gst.SECOND, in_point=5 * gst.SECOND, media_duration=15 * gst.SECOND, priority=10) track = Track(video_stream) track.addTrackObject(track_object) element = self.formatter._saveTrack(track) self.failUnlessEqual(element.tag, "track") track_objects_element = element.find("track-objects") self.failUnlessEqual(len(track_objects_element), 1)
def testPads(self): timeline = Timeline() stream1 = VideoStream(gst.Caps('video/x-raw-rgb'), 'src0') stream2 = AudioStream(gst.Caps('audio/x-raw-int'), 'src1') track1 = Track(stream1) track2 = Track(stream2) timeline.addTrack(track1) timeline.addTrack(track2) factory = TimelineSourceFactory(timeline) bin = factory.makeBin() self.failUnlessEqual(len(list(bin.src_pads())), 0) pad1 = gst.Pad('src0', gst.PAD_SRC) pad1.set_caps(gst.Caps('asd')) pad1.set_active(True) track1.composition.add_pad(pad1) pad2 = gst.Pad('src0', gst.PAD_SRC) pad2.set_caps(gst.Caps('asd')) pad2.set_active(True) track2.composition.add_pad(pad2) self.failUnlessEqual(len(list(bin.src_pads())), 2) track1.composition.remove_pad(pad1) self.failUnlessEqual(len(list(bin.src_pads())), 1) track2.composition.remove_pad(pad2) self.failUnlessEqual(len(list(bin.src_pads())), 0) factory.clean()
def do_set_property(self, prop, val): if prop.name == 'sr': self.sr = val self.excR_capsfilter.set_property("caps", gst.Caps("audio/x-raw-float, rate=%d" % val)) self.excI_capsfilter.set_property("caps", gst.Caps("audio/x-raw-float, rate=%d" % val)) self.dctrlR_capsfilter.set_property("caps", gst.Caps("audio/x-raw-float, rate=%d" % val)) self.dctrlI_capsfilter.set_property("caps", gst.Caps("audio/x-raw-float, rate=%d" % val)) self.excR_firbank.set_property("fir-matrix", [numpy.hanning(val+1)]) self.excI_firbank.set_property("fir-matrix", [numpy.hanning(val+1)]) self.dctrlR_firbank.set_property("fir-matrix", [numpy.hanning(val+1)]) self.dctrlI_firbank.set_property("fir-matrix", [numpy.hanning(val+1)]) elif prop.name == 'time-domain': self.time_domain = val self.excR_firbank.set_property("time-domain", val) self.excI_firbank.set_property("time-domain", val) self.dctrlR_firbank.set_property("time-domain", val) self.dctrlI_firbank.set_property("time-domain", val) elif prop.name == 'olgR': self.olgR = val self.dctrl_mod_w_mod_olgR.set_property("amplification", val) self.dctrlR_excI_olgR.set_property("amplification", val) self.dctrlI_excR_olgR.set_property("amplification", val) self.dctrlI_excI_olgR.set_property("amplification", val) self.dctrlR_excR_olgR.set_property("amplification", val) elif prop.name == 'olgI': self.olgI = val self.dctrl_mod_w_mod_olgI.set_property("amplification", val) self.dctrlR_excI_olgI.set_property("amplification", val) self.dctrlI_excR_olgI.set_property("amplification", val) self.dctrlI_excI_olgI.set_property("amplification", val) self.dctrlR_excR_olgI.set_property("amplification", val) elif prop.name == 'wR': self.wR = val self.dctrlR_excI_olgI_wR.set_property("amplification", val) self.dctrlR_excI_olgR_wR.set_property("amplification", val) self.dctrlI_excR_olgI_wR.set_property("amplification", val) self.dctrlI_excR_olgR_wR.set_property("amplification", val) self.dctrlI_excI_olgR_wR.set_property("amplification", val) self.dctrlI_excI_olgI_wR.set_property("amplification", val) self.dctrlR_excR_olgR_wR.set_property("amplification", val) self.dctrlR_excR_olgI_wR.set_property("amplification", val) elif prop.name == 'wI': self.wI = val self.dctrlR_excI_olgI_wI.set_property("amplification", val) self.dctrlR_excI_olgR_wI.set_property("amplification", val) self.dctrlI_excR_olgI_wI.set_property("amplification", val) self.dctrlI_excR_olgR_wI.set_property("amplification", val) self.dctrlI_excI_olgR_wI.set_property("amplification", val) self.dctrlI_excI_olgI_wI.set_property("amplification", val) self.dctrlR_excR_olgR_wI.set_property("amplification", val) self.dctrlR_excR_olgI_wI.set_property("amplification", val) elif prop.name == 'wmod': self.wmod = val self.dctrl_mod_w_mod.set_property("amplification", val) elif prop.name == 'olgmod': self.olgmod = val self.dctrl_mod_w_mod_olg_mod.set_property("amplification", val) else: raise AssertionError
def testMatchStreamSameNameAndSameCaps(self): s1 = AudioStream(gst.Caps("audio/x-vorbis"), pad_name="src0") s2 = AudioStream(gst.Caps("audio/x-vorbis"), pad_name="src0") stream, rank = match_stream(s1, [s2]) self.failUnlessEqual(id(s2), id(stream)) self.failUnlessEqual( rank, STREAM_MATCH_SAME_PAD_NAME + STREAM_MATCH_SAME_CAPS) self.failUnlessEqual(rank, STREAM_MATCH_MAXIMUM)
class Equalizer(gst.Element): _sinkpadtemplate = gst.PadTemplate("sink", gst.PAD_SINK, gst.PAD_ALWAYS, gst.Caps("audio/x-raw-spectrum")) _srcpadtemplate = gst.PadTemplate("src", gst.PAD_SRC, gst.PAD_ALWAYS, gst.Caps("audio/x-raw-spectrum")) __gstdetails__ = ("spectrum_equalizer", "Audio/Filter", "equalizer that deals with spectrum data", "Leberwurscht") __gproperties__ = { "transmission": (gobject.TYPE_PYOBJECT, "transmission", "transmission for each spectral band", gobject.PARAM_READWRITE) } def __init__(self, *args, **kwargs): self.frequencies = Math.get_frq(2049, 44100) self.transmission = None gst.Element.__init__(self, *args, **kwargs) if not self.transmission: self.transmission = numpy.ones(2049) self.sinkpad = gst.Pad(self._sinkpadtemplate, "sink") self.sinkpad.use_fixed_caps() self.srcpad = gst.Pad(self._srcpadtemplate, "src") self.srcpad.use_fixed_caps() self.sinkpad.set_chain_function(self.chainfunc) self.sinkpad.set_event_function(self.eventfunc) self.add_pad(self.sinkpad) self.srcpad.set_event_function(self.srceventfunc) self.add_pad(self.srcpad) # custom property def do_get_property(self, pspec): return getattr(self, pspec.name) def do_set_property(self, pspec, value): setattr(self, pspec.name, value) def chainfunc(self, pad, buffer): gst.log("Passing buffer with ts %d" % (buffer.timestamp)) fft = numpy.frombuffer(buffer, numpy.complex128) fft = fft * self.transmission b = gst.Buffer(fft) b.set_caps(self.srcpad.get_caps()) b.timestamp = buffer.timestamp return self.srcpad.push(b) def eventfunc(self, pad, event): return self.srcpad.push_event(event) def srceventfunc(self, pad, event): return self.sinkpad.push_event(event)
def testGetReleaseQueueForFactoryStream(self): factory = FakeSinkFactory() stream = VideoStream(gst.Caps('any'), 'sink') factory.addInputStream(stream) self.failUnlessRaises(PipelineError, self.pipeline.getQueueForFactoryStream, factory, stream, True) # getBinForFactoryStream(factory, stream) must be called before self.failUnlessRaises(PipelineError, self.pipeline.getQueueForFactoryStream, factory, stream, True) # create the bin bin1 = self.pipeline.getBinForFactoryStream(factory, stream, True) # try to get a cached queue self.failUnlessRaises(PipelineError, self.pipeline.getQueueForFactoryStream, factory, stream, False) # create queue queue1 = self.pipeline.getQueueForFactoryStream(factory, stream, True) self.failUnless(isinstance(queue1, gst.Element)) gst.debug("pouet") # get the cached instance queue2 = self.pipeline.getQueueForFactoryStream(factory, stream, True) self.failUnlessEqual(id(queue1), id(queue2)) # release self.pipeline.releaseQueueForFactoryStream(factory, stream) gst.debug("pouet") # there's still a queue alive, so we can't release the bin self.failUnlessRaises(PipelineError, self.pipeline.releaseBinForFactoryStream, factory, stream) self.pipeline.releaseQueueForFactoryStream(factory, stream) gst.debug("pouet2") self.failUnlessRaises(PipelineError, self.pipeline.releaseQueueForFactoryStream, factory, stream) # should always fail with a src bin factory2 = FakeSourceFactory() stream2 = VideoStream(gst.Caps('any'), 'src') factory2.addOutputStream(stream2) bin1 = self.pipeline.getBinForFactoryStream(factory, stream, True) self.failUnlessRaises(PipelineError, self.pipeline.getQueueForFactoryStream, factory2, stream2, True) self.pipeline.releaseBinForFactoryStream(factory, stream) self.pipeline.releaseBinForFactoryStream(factory, stream) self.assertEquals(factory.current_bins, 0)
def setUp(self): common.TestCase.setUp(self) self.vsrc = common.FakeSourceFactory("videotestsrc") self.vsrc.addOutputStream( VideoStream(gst.Caps("video/x-raw-yuv"), pad_name="src")) self.asrc = common.FakeSourceFactory("audiotestsrc") self.asrc.addOutputStream( AudioStream(gst.Caps("audio/x-raw-float"), pad_name="src")) self.vsettings = StreamEncodeSettings(encoder="theoraenc") self.asettings = StreamEncodeSettings(encoder="vorbisenc")
def testLoadTimeline(self): # we need a project for this to work self.formatter.project = Project() # create fake document tree timeline_element = Element("timeline") tracks_element = SubElement(timeline_element, "tracks") track_element = SubElement(tracks_element, "track") stream_element = SubElement(track_element, "stream", id="1", type="pitivi.stream.VideoStream", caps="video/x-raw-rgb") track_objects_element = SubElement(track_element, "track-objects") track_object = SubElement( track_objects_element, "track-object", type="pitivi.timeline.track.SourceTrackObject", start=ts(1 * gst.SECOND), duration=ts(10 * gst.SECOND), in_point=ts(5 * gst.SECOND), media_duration=ts(15 * gst.SECOND), priority=ts(5), id="1") factory_ref = SubElement(track_object, "factory-ref", id="1") stream_ref = SubElement(track_object, "stream-ref", id="1") timeline_objects_element = SubElement(timeline_element, "timeline-objects") timeline_object_element = \ SubElement(timeline_objects_element, "timeline-object") factory_ref = SubElement(timeline_object_element, "factory-ref", id="1") stream_ref = SubElement(timeline_object_element, "stream-ref", id="1") track_object_refs = SubElement(timeline_object_element, "track-object-refs") track_object_ref = SubElement(track_object_refs, "track-object-ref", id="1") # insert fake streams and factories into context factory = VideoTestSourceFactory() self.formatter._context.factories["1"] = factory stream = VideoStream(gst.Caps("video/x-raw-rgb")) self.formatter._context.streams["1"] = stream video_stream = VideoStream(gst.Caps("video/x-raw-yuv")) source1 = VideoTestSourceFactory() self.formatter._context.factories["2"] = source1 self.formatter._context.track_objects["1"] = SourceTrackObject( source1, video_stream) # point gun at foot; pull trigger self.formatter._loadTimeline(timeline_element) self.failUnlessEqual(len(self.formatter.project.timeline.tracks), 1)
class TestVideoStream(TestMultimediaStream, TestCase): streamClass = VideoStream unfixed_caps = gst.Caps( 'video/x-raw-rgb, width=320, height=240, ' 'framerate=30/1; ' 'video/x-raw-yuv, width=320, height=240, framerate=30/1, ' 'format=(fourcc)I420') fixed_caps = gst.Caps('video/x-raw-yuv, width=320, height=240, ' 'framerate=30/1, format=(fourcc)I420') non_raw_caps = gst.Caps('video/x-theora') def testVideoProperties(self): expected = { 'videotype': 'video/x-raw-rgb', 'width': 320, 'height': 240, 'framerate': gst.Fraction(30, 1), 'format': None, 'par': gst.Fraction(1, 1), 'dar': gst.Fraction(4, 3) } stream = VideoStream(self.unfixed_caps) self.checkProperties(stream, expected) expected['videotype'] = 'video/x-raw-yuv' expected['format'] = gst.Fourcc('I420') stream = VideoStream(self.fixed_caps) self.checkProperties(stream, expected) expected['videotype'] = 'video/x-theora' expected['width'] = None expected['height'] = None expected['format'] = None expected['framerate'] = gst.Fraction(1, 1) stream = VideoStream(self.non_raw_caps) self.checkProperties(stream, expected) def testParAndDar(self): caps = gst.Caps('video/x-raw-int, width=320, height=240, ' 'pixel-aspect-ratio=2/1') stream = VideoStream(caps) self.failUnlessEqual(stream.par, gst.Fraction(2, 1)) self.failUnlessEqual(stream.dar, gst.Fraction(640, 240)) caps = gst.Caps('video/x-raw-int, width=320, height=240') stream = VideoStream(caps) self.failUnlessEqual(stream.par, gst.Fraction(1, 1)) self.failUnlessEqual(stream.dar, gst.Fraction(320, 240)) # no width and height, default to 4/3 caps = gst.Caps('video/x-raw-int') stream = VideoStream(caps) self.failUnlessEqual(stream.par, gst.Fraction(1, 1)) self.failUnlessEqual(stream.dar, gst.Fraction(4, 3))
def testSaveFactoryRef(self): video_stream = VideoStream(gst.Caps("video/x-raw-yuv")) audio_stream = AudioStream(gst.Caps("audio/x-raw-int")) source1 = FileSourceFactory("file1.ogg") source1.addOutputStream(video_stream) source1.addOutputStream(audio_stream) element = self.formatter._saveSource(source1) element_ref = self.formatter._saveFactoryRef(source1) self.failUnlessEqual(element_ref.tag, "factory-ref") self.failUnlessEqual(element_ref.attrib["id"], element.attrib["id"])
def _fillTimeline(self): # audio and video track video = VideoStream(gst.Caps('video/x-raw-rgb; video/x-raw-yuv')) track = Track(video) self.project.timeline.addTrack(track) audio = AudioStream(gst.Caps('audio/x-raw-int; audio/x-raw-float')) track = Track(audio) self.project.timeline.addTrack(track) for uri in self._uris: factory = self.project.sources.getUri(uri) self.project.timeline.addSourceFactory(factory)
def testMatchStreamGroupsOrder(self): stream1 = AudioStream(gst.Caps("audio/x-vorbis")) stream2 = AudioStream(gst.Caps("audio/x-vorbis")) stream3 = AudioStream(gst.Caps("audio/x-vorbis")) known_best_map = {(stream1, stream2): STREAM_MATCH_SAME_CAPS} group_a = [stream1] group_b = [stream2, stream3] best_map = match_stream_groups(group_a, group_b) self.failUnlessEqual(known_best_map, best_map)
def testMakeStreamBin(self): # streams are usually populated by the discoverer so here we have to do # that ourselves video1 = VideoStream(gst.Caps('video/x-raw-rgb, width=2048'), pad_name='src0') video2 = VideoStream(gst.Caps('video/x-raw-rgb, width=320'), pad_name='src1') audio = AudioStream(gst.Caps('audio/x-raw-int'), pad_name='src2') self.factory.addOutputStream(video1) self.factory.addOutputStream(video2) self.factory.addOutputStream(audio)
def testMatchStreamNoMatch(self): s1 = AudioStream(gst.Caps("audio/x-vorbis")) s2 = VideoStream(gst.Caps("video/x-theora")) stream, rank = match_stream(s1, []) self.failUnlessEqual(stream, None) self.failUnlessEqual(rank, STREAM_MATCH_NONE) stream, rank = match_stream(s1, [s2]) self.failUnlessEqual(stream, None) self.failUnlessEqual(rank, STREAM_MATCH_NONE)
class FFT(gst.Element): _sinkpadtemplate = gst.PadTemplate( "sink", gst.PAD_SINK, gst.PAD_ALWAYS, gst.Caps( "audio/x-raw-float, rate=44100, channels=1, width=64, endianness=1234" )) _srcpadtemplate = gst.PadTemplate("src", gst.PAD_SRC, gst.PAD_ALWAYS, gst.Caps("audio/x-raw-spectrum")) __gstdetails__ = ("fft", "Audio/Filter", "fft element", "Leberwurscht") # __gproperties__ = {"":(gobject.TYPE_INT, "mode", "editing mode", 0, MODES_NUM-1, MODE_DEFAULT, gobject.PARAM_READWRITE)} def __init__(self): gst.Element.__init__(self) self.sinkpad = gst.Pad(self._sinkpadtemplate, "sink") self.sinkpad.use_fixed_caps() self.srcpad = gst.Pad(self._srcpadtemplate, "src") self.srcpad.use_fixed_caps() self.sinkpad.set_chain_function(self.chainfunc) self.sinkpad.set_event_function(self.eventfunc) self.add_pad(self.sinkpad) self.srcpad.set_event_function(self.srceventfunc) self.add_pad(self.srcpad) self.adapter = gst.Adapter() def chainfunc(self, pad, buffer): # fixme: need to reset adapter when starting - see gstspectrum.c self.adapter.push(buffer) end_time = buffer.timestamp + buffer.duration gst.log("Got buffer with ts %d and length %d" % (buffer.timestamp, len(buffer))) l = 4096 bytes_num = l * 8 while self.adapter.available() >= bytes_num: time_till_end = int(self.adapter.available() / 8. / 44100 * gst.SECOND) data = numpy.frombuffer(self.adapter.peek(bytes_num)) fft = numpy.fft.rfft(data) # length of this array is l/2 + 1 b = gst.Buffer(fft) b.timestamp = end_time - time_till_end b.set_caps(self.srcpad.get_caps()) self.srcpad.push(b) self.adapter.flush(bytes_num) return gst.FLOW_OK def eventfunc(self, pad, event): return self.srcpad.push_event(event) def srceventfunc(self, pad, event): return self.sinkpad.push_event(event)
def testGetReleaseTeeForFactoryStream(self): factory = VideoTestSourceFactory() stream = VideoStream(gst.Caps('video/x-raw-rgb; video/x-raw-yuv'), 'src') factory.addOutputStream(stream) self.failUnlessRaises(PipelineError, self.pipeline.getTeeForFactoryStream, factory, stream, True) # getBinForFactoryStream(factory, stream) must be called before self.failUnlessRaises(PipelineError, self.pipeline.getTeeForFactoryStream, factory, stream, True) # create the bin bin1 = self.pipeline.getBinForFactoryStream(factory, stream, True) # try to get a cached tee self.failUnlessRaises(PipelineError, self.pipeline.getTeeForFactoryStream, factory, stream, False) # create tee tee1 = self.pipeline.getTeeForFactoryStream(factory, stream, True) self.failUnless(isinstance(tee1, gst.Element)) # get the cached instance tee2 = self.pipeline.getTeeForFactoryStream(factory, stream, True) self.failUnlessEqual(id(tee1), id(tee2)) # release self.pipeline.releaseTeeForFactoryStream(factory, stream) # there's still a tee alive, so we can't release the bin #self.failUnlessRaises(PipelineError, # self.pipeline.releaseBinForFactoryStream, factory, stream) self.pipeline.releaseTeeForFactoryStream(factory, stream) self.failUnlessRaises(PipelineError, self.pipeline.releaseTeeForFactoryStream, factory, stream) # should always fail with a sink bin factory2 = FakeSinkFactory() stream2 = VideoStream(gst.Caps('video/x-raw-rgb; video/x-raw-yuv'), 'src') factory2.addInputStream(stream2) self.failUnlessRaises(PipelineError, self.pipeline.getTeeForFactoryStream, factory2, stream2, True) self.pipeline.releaseBinForFactoryStream(factory, stream)
def testSaveSource(self): video_stream = VideoStream(gst.Caps("video/x-raw-yuv")) audio_stream = AudioStream(gst.Caps("audio/x-raw-int")) source1 = FileSourceFactory("file1.ogg") source1.addOutputStream(video_stream) source1.addOutputStream(audio_stream) element = self.formatter._saveSource(source1) self.failUnlessEqual(element.tag, "source") self.failUnlessEqual(element.attrib["type"], qual(source1.__class__)) self.failUnlessEqual(element.attrib["filename"], "file1.ogg") streams = element.find("output-streams") self.failUnlessEqual(len(streams), 2)
def testNewDecodedPadFixed(self): video = gst.Pad('video_00', gst.PAD_SRC) video.set_caps(gst.Caps('video/x-raw-rgb')) audio = gst.Pad('audio_00', gst.PAD_SRC) audio.set_caps(gst.Caps('audio/x-raw-int')) self.failUnlessEqual(self.discoverer.current_streams, []) self.discoverer._newDecodedPadCb(None, video, False) self.failUnlessEqual(len(self.discoverer.current_streams), 1) self.failUnlessEqual(self.discoverer.new_video_pad_cb, 1) self.discoverer._newDecodedPadCb(None, audio, False) self.failUnlessEqual(len(self.discoverer.current_streams), 2) self.failUnlessEqual(self.discoverer.new_video_pad_cb, 1)