def testSaveTimelineObject(self): video_stream = VideoStream(gst.Caps("video/x-raw-yuv")) audio_stream = AudioStream(gst.Caps("audio/x-raw-int")) source1 = FileSourceFactory("file1.ogg") source1.addOutputStream(video_stream) # these two calls are needed to populate the context for the -ref # elements self.formatter._saveSource(source1) self.formatter._saveStream(video_stream) track_object = SourceTrackObject(source1, video_stream, start=10 * gst.SECOND, duration=20 * gst.SECOND, in_point=5 * gst.SECOND, media_duration=15 * gst.SECOND, priority=10) track = Track(video_stream) track.addTrackObject(track_object) self.formatter._saveTrackObject(track_object) timeline_object = TimelineObject(source1) timeline_object.addTrackObject(track_object) element = self.formatter._saveTimelineObject(timeline_object) self.failUnlessEqual(element.tag, "timeline-object") self.failIfEqual(element.find("factory-ref"), None) track_object_refs = element.find("track-object-refs") self.failUnlessEqual(len(track_object_refs), 1)
def newBlankProject(self): """ start up a new blank project """ # if there's a running project we must close it if self.current is not None and not self.closeRunningProject(): return False # we don't have an URI here, None means we're loading a new project self.emit("new-project-loading", None) project = Project(_("New Project")) self.emit("new-project-created", project) self.current = project # FIXME: this should not be hard-coded # add default tracks for a new project video = VideoStream(gst.Caps('video/x-raw-rgb; video/x-raw-yuv')) track = Track(video) project.timeline.addTrack(track) audio = AudioStream(gst.Caps('audio/x-raw-int; audio/x-raw-float')) track = Track(audio) project.timeline.addTrack(track) project.connect("project-changed", self._projectChangedCb) self.emit("new-project-loaded", self.current) return True
def testSaveTimeline(self): video_stream = VideoStream(gst.Caps("video/x-raw-yuv")) audio_stream = AudioStream(gst.Caps("audio/x-raw-int")) source1 = VideoTestSourceFactory() source1.addOutputStream(video_stream) self.formatter._saveSource(source1) self.formatter._saveStream(video_stream) track_object = SourceTrackObject(source1, video_stream, start=10 * gst.SECOND, duration=20 * gst.SECOND, in_point=5 * gst.SECOND, media_duration=15 * gst.SECOND, priority=10) track = Track(video_stream) track.addTrackObject(track_object) self.formatter._saveTrackObject(track_object) timeline_object = TimelineObject(source1) timeline_object.addTrackObject(track_object) self.formatter._saveTimelineObject(timeline_object) timeline = Timeline() timeline.addTrack(track) element = self.formatter._saveTimeline(timeline) self.failUnlessEqual(element.tag, "timeline") tracks = element.find("tracks") self.failUnlessEqual(len(tracks), 1)
def testSaveTrackEffect(self): video_stream = VideoStream(gst.Caps("video/x-raw-yuv")) audio_stream = AudioStream(gst.Caps("audio/x-raw-int")) effect1 = EffectFactory('identity', 'identity') effect1.addOutputStream(video_stream) effect1.addInputStream(video_stream) #It is necessary to had the identity factory to the #effect_factories_dictionnary self.formatter.avalaible_effects._effect_factories_dict['identity'] =\ effect1 track_effect = TrackEffect(effect1, video_stream, start=10 * gst.SECOND, duration=20 * gst.SECOND, in_point=5 * gst.SECOND, media_duration=15 * gst.SECOND, priority=10) track = Track(video_stream) track.addTrackObject(track_effect) element = self.formatter._saveTrackObject(track_effect) self.failUnlessEqual(element.tag, "track-object") self.failUnlessEqual(element.attrib["type"], qual(track_effect.__class__)) self.failUnlessEqual(element.attrib["start"], ts(10 * gst.SECOND)) self.failUnlessEqual(element.attrib["duration"], ts(20 * gst.SECOND)) self.failUnlessEqual(element.attrib["in_point"], ts(5 * gst.SECOND)) self.failUnlessEqual(element.attrib["media_duration"], ts(15 * gst.SECOND)) self.failUnlessEqual(element.attrib["priority"], "(int)10") effect_element = element.find('effect') self.failIfEqual(effect_element, None) self.failIfEqual(effect_element.find("factory"), None) self.failIfEqual(effect_element.find("gst-element-properties"), None)
def testConnectionAndDisconnection(self): timeline = Timeline() stream = new_stream() factory = new_source_factory() track = Track(stream) track_object1 = SourceTrackObject(factory, stream) track.addTrackObject(track_object1) timeline.addTrack(track) timeline_object1 = TimelineObject(factory) timeline_object1.addTrackObject(track_object1) timeline.addTimelineObject(timeline_object1) self.observer.startObserving(timeline) self.failUnless(timeline.connected) self.failUnless(timeline_object1.connected) timeline.removeTimelineObject(timeline_object1) self.failIf(timeline_object1.connected) timeline.addTimelineObject(timeline_object1) self.failUnless(timeline_object1) self.observer.stopObserving(timeline) self.failIf(timeline.connected) self.failIf(timeline_object1.connected)
def testPads(self): timeline = Timeline() stream1 = VideoStream(gst.Caps('video/x-raw-rgb'), 'src0') stream2 = AudioStream(gst.Caps('audio/x-raw-int'), 'src1') track1 = Track(stream1) track2 = Track(stream2) timeline.addTrack(track1) timeline.addTrack(track2) factory = TimelineSourceFactory(timeline) bin = factory.makeBin() self.failUnlessEqual(len(list(bin.src_pads())), 0) pad1 = gst.Pad('src0', gst.PAD_SRC) pad1.set_caps(gst.Caps('asd')) pad1.set_active(True) track1.composition.add_pad(pad1) pad2 = gst.Pad('src0', gst.PAD_SRC) pad2.set_caps(gst.Caps('asd')) pad2.set_active(True) track2.composition.add_pad(pad2) self.failUnlessEqual(len(list(bin.src_pads())), 2) track1.composition.remove_pad(pad1) self.failUnlessEqual(len(list(bin.src_pads())), 1) track2.composition.remove_pad(pad2) self.failUnlessEqual(len(list(bin.src_pads())), 0) factory.clean()
def testTracks(self): timeline = Timeline() stream1 = VideoStream(gst.Caps('video/x-raw-rgb'), 'src0') stream2 = AudioStream(gst.Caps('audio/x-raw-int'), 'src1') track1 = Track(stream1) track2 = Track(stream2) # start with 2 tracks timeline.addTrack(track1) timeline.addTrack(track2) factory = TimelineSourceFactory(timeline) bin = factory.makeBin() self.failUnlessEqual(len(list(bin)), 2) self.failUnlessEqual(set(factory.getOutputStreams()), set([stream1, stream2])) # add a new track stream3 = AudioStream(gst.Caps('audio/x-raw-int'), 'src2') track3 = Track(stream3) timeline.addTrack(track3) self.failUnlessEqual(len(list(bin)), 3) self.failUnlessEqual(set(factory.getOutputStreams()), set([stream1, stream2, stream3])) # remove a track timeline.removeTrack(track3) self.failUnlessEqual(len(list(bin)), 2) self.failUnlessEqual(set(factory.getOutputStreams()), set([stream1, stream2])) factory.clean()
def testSavetimelineObjects(self): video_stream = VideoStream(gst.Caps("video/x-raw-yuv")) audio_stream = AudioStream(gst.Caps("audio/x-raw-int")) source1 = FileSourceFactory("file1.ogg") source1.addOutputStream(video_stream) # these two calls are needed to populate the context for the -ref # elements self.formatter._saveSource(source1) self.formatter._saveStream(video_stream) track_object = SourceTrackObject(source1, video_stream, start=10 * gst.SECOND, duration=20 * gst.SECOND, in_point=5 * gst.SECOND, media_duration=15 * gst.SECOND, priority=10) track = Track(video_stream) track.addTrackObject(track_object) self.formatter._saveTrackObject(track_object) timeline_object = TimelineObject(source1) timeline_object.addTrackObject(track_object) element = self.formatter._saveTimelineObjects([timeline_object]) self.failUnlessEqual(len(element), 1)
def testSaveTrack(self): video_stream = VideoStream(gst.Caps("video/x-raw-yuv")) audio_stream = AudioStream(gst.Caps("audio/x-raw-int")) source1 = VideoTestSourceFactory() # these two calls are needed to populate the context for the -ref # elements self.formatter._saveSource(source1) self.formatter._saveStream(video_stream) track_object = SourceTrackObject(source1, video_stream, start=10 * gst.SECOND, duration=20 * gst.SECOND, in_point=5 * gst.SECOND, media_duration=15 * gst.SECOND, priority=10) track = Track(video_stream) track.addTrackObject(track_object) element = self.formatter._saveTrack(track) self.failUnlessEqual(element.tag, "track") track_objects_element = element.find("track-objects") self.failUnlessEqual(len(track_objects_element), 1)
def setUp(self): TestCase.setUp(self) self.factory = StubFactory() self.stream = VideoStream(gst.Caps('video/x-raw-rgb')) self.factory.addOutputStream(self.stream) self.track1 = Track(self.stream) self.track2 = Track(self.stream)
def _fillTimeline(self): # audio and video track video = VideoStream(gst.Caps('video/x-raw-rgb; video/x-raw-yuv')) track = Track(video) self.project.timeline.addTrack(track) audio = AudioStream(gst.Caps('audio/x-raw-int; audio/x-raw-float')) track = Track(audio) self.project.timeline.addTrack(track) for uri in self._uris: factory = self.project.sources.getUri(uri) self.project.timeline.addSourceFactory(factory)
def testLoadTrackEffect(self): # create fake document tree element = Element("track-object",\ type="pitivi.timeline.track.TrackEffect", start=ts(1 * gst.SECOND), duration=ts(10 * gst.SECOND), in_point=ts(5 * gst.SECOND), media_duration=ts(15 * gst.SECOND), priority=ts(5), id="1") effect_elem = SubElement(element, "effect") factory_elem = SubElement(effect_elem, "factory", name="identity") properties_elem = SubElement(effect_elem, "gst-element-properties", sync="(bool)True") # insert our fake factory into the context stream = AudioStream(gst.Caps("audio/x-raw-int")) factory = EffectFactory('identity') factory.addInputStream(stream) factory.addOutputStream(stream) self.formatter.avalaible_effects._effect_factories_dict[ 'identity'] = factory track = Track(stream) track_object = self.formatter._loadTrackObject(track, element) self.failUnless(isinstance(track_object, TrackEffect)) self.failUnlessEqual(track_object.factory, factory) self.failUnlessEqual(track_object.stream, stream) self.failUnlessEqual(track_object.start, 1 * gst.SECOND) self.failUnlessEqual(track_object.duration, 10 * gst.SECOND) self.failUnlessEqual(track_object.in_point, 5 * gst.SECOND) self.failUnlessEqual(track_object.media_duration, 15 * gst.SECOND) self.failUnlessEqual(track_object.priority, 5)
def testAudioOnly(self): audio_factory1 = AudioTestSourceFactory(3) audio_factory1.duration = 10 * gst.SECOND stream = AudioStream(gst.Caps('audio/x-raw-int'), 'src0') audio_factory1.addOutputStream(stream) timeline = Timeline() track = Track(stream) track_object1 = SourceTrackObject(audio_factory1, stream) track_object1.start = 2 * gst.SECOND track.addTrackObject(track_object1) timeline.addTrack(track) factory = TimelineSourceFactory(timeline) bin = factory.makeBin() self.failUnlessEqual(len(list(bin)), 1) self.failUnlessEqual(factory.duration, 12 * gst.SECOND) fakesink = gst.element_factory_make('fakesink') def bin_pad_added_cb(bin, pad): pad.link(fakesink.get_pad('sink')) bin.connect('pad-added', bin_pad_added_cb) def error_cb(bus, message): gerror, debug = message.parse_error() self.fail('%s: %s' % (gerror.message, debug)) def eos_cb(bus, message): self.loop.quit() pipeline = gst.Pipeline() bus = pipeline.get_bus() bus.add_signal_watch() bus.connect('message::error', error_cb) bus.connect('message::eos', eos_cb) pipeline.add(bin) pipeline.add(fakesink) pipeline.set_state(gst.STATE_PLAYING) self.loop.run() pipeline.set_state(gst.STATE_NULL) factory.clean()
def setUp(self): # create a pipeline self.pipeline = gst.Pipeline() self.track1 = Track(yuv("I420")) track_object1 = make_track_object(yuv("I420")) track_object2 = make_track_object(yuv("Y42B")) track_object3 = make_track_object(yuv("Y444")) track_object4 = make_track_object(rgb()) track_object5 = make_track_object(yuv("AYUV")) for i, track_object in enumerate( (track_object1, track_object2, track_object3, track_object4, track_object5)): self.track1.addTrackObject(track_object) # set priorities from 1 to 5 track_object.priority = i + 1 # track_object5 falls outside (0s, 15s) so it isn't linked to videomixer track_object5.start = 15 * gst.SECOND # make a fakesink for the pipeline and connect it as necessary with a callback composition = self.track1.composition fakesink = gst.element_factory_make('fakesink') def bin_pad_added_cb(composition, pad): pad.link(fakesink.get_pad('sink')) composition.connect("pad-added", bin_pad_added_cb) # add the composition and fakesink to the pipeline and set state to paused to preroll self.pipeline.add(composition) self.pipeline.add(fakesink) self.pipeline.set_state(gst.STATE_PAUSED) # wait for preroll to complete bus = self.pipeline.get_bus() msg = bus.timed_pop_filtered( gst.CLOCK_TIME_NONE, gst.MESSAGE_ASYNC_DONE | gst.MESSAGE_ERROR) if msg.type == gst.MESSAGE_ERROR: gerror, debug = msg.parse_error() print "\nError message: %s\nDebug info: %s" % (gerror, debug) self.failUnlessEqual(msg.type, gst.MESSAGE_ASYNC_DONE) self.svmbin = list(self.track1.mixer.elements())[0]
def testLoadProject(self): video_stream = VideoStream(gst.Caps("video/x-raw-yuv")) audio_stream = AudioStream(gst.Caps("audio/x-raw-int")) source1 = VideoTestSourceFactory() self.formatter._saveSource(source1) self.formatter._saveStream(video_stream) track_object = SourceTrackObject(source1, video_stream, start=10 * gst.SECOND, duration=20 * gst.SECOND, in_point=5 * gst.SECOND, media_duration=15 * gst.SECOND, priority=10) track = Track(video_stream) track.addTrackObject(track_object) self.formatter._saveTrackObject(track_object) timeline_object = TimelineObject(source1) timeline_object.addTrackObject(track_object) self.formatter._saveTimelineObject(timeline_object) timeline = Timeline() timeline.addTrack(track) self.formatter._saveTimeline(timeline) project = Project() project.timeline = timeline project.sources.addFactory(source1) element = self.formatter._serializeProject(project) self.failUnlessEqual(element.tag, "pitivi") self.failIfEqual(element.find("factories"), None) self.failIfEqual(element.find("timeline"), None) indent(element) f = file("/tmp/untitled.pptv", "w") f.write(tostring(element)) f.close()
def setUp(self): self.stream = new_stream() self.factory = new_factory() self.track1 = Track(self.stream) self.track2 = Track(self.stream) self.timeline = Timeline() self.timeline.addTrack(self.track1) self.timeline.addTrack(self.track2) self.track_object1 = SourceTrackObject(self.factory, self.stream) self.track_object2 = SourceTrackObject(self.factory, self.stream) self.track1.addTrackObject(self.track_object1) self.track2.addTrackObject(self.track_object2) self.timeline_object1 = TimelineObject(self.factory) self.timeline_object1.addTrackObject(self.track_object1) self.timeline_object1.addTrackObject(self.track_object2) self.action_log = UndoableActionLog() self.observer = TestTimelineLogObserver(self.action_log) self.observer.startObserving(self.timeline)
def _loadTrack(self, element): self.debug("%r", element) stream_element = element.find("stream") stream = self._loadStream(stream_element) track = Track(stream) track_objects_element = element.find("track-objects") for track_object_element in track_objects_element: self._loadTrackObject(track, track_object_element) return track
def setUp(self): self.stream = new_stream() self.factory = new_source_factory() self.effect_factory = TestEffectFactory(self.stream) self.track1 = Track(self.stream) self.track2 = Track(self.stream) self.timeline = Timeline() self.timeline.addTrack(self.track1) self.timeline.addTrack(self.track2) self.track_object1 = SourceTrackObject(self.factory, self.stream) self.track_object2 = SourceTrackObject(self.factory, self.stream) self.track_effect1 = TrackEffect(self.effect_factory, self.stream) self.track_effect2 = TrackEffect(self.effect_factory, self.stream) self.track1.addTrackObject(self.track_object1) self.track2.addTrackObject(self.track_object2) self.timeline_object1 = TimelineObject(self.factory) self.timeline_object1.addTrackObject(self.track_object1) self.timeline_object1.addTrackObject(self.track_object2) self.action_log = UndoableActionLog() self.observer = TestTimelineLogObserver(self.action_log) self.observer.startObserving(self.timeline)
def testSplitObjectKeyframes(self): DURATION = 10 * gst.SECOND factory = AudioTestSourceFactory() factory.duration = DURATION stream_ = AudioStream(gst.Caps("audio/x-raw-int")) obj = SourceTrackObject(factory, stream_) track = Track(stream_) track.addTrackObject(obj) obj.start = 3 * gst.SECOND obj.duration = DURATION # create a three keyframes at: 3, 6 and 9 seconds interpolator = obj.getInterpolator("volume") keyframes = dict(((t * gst.SECOND, (t % 2, gst.INTERPOLATE_LINEAR)) for t in xrange(3, 10, 3))) expected = [] expected2 = [] for time, (value, mode) in keyframes.iteritems(): kf = interpolator.newKeyframe(time, value, mode) if time < (5 * gst.SECOND): expected.append(kf) else: expected2.append(kf) def getKeyframes(obj): keyframes = obj.getInterpolator("volume").getInteriorKeyframes() return list(keyframes) obj2 = obj.splitObject(8 * gst.SECOND) keyframes = getKeyframes(obj) keyframes2 = getKeyframes(obj2) self.failUnlessEqual(keyframes, expected) self.failUnlessEqual(keyframes2, expected2)
def setUp(self): # create a pipeline self.pipeline = gst.Pipeline() self.track1 = Track(yuv("I420")) track_object1 = make_track_object(yuv("I420")) track_object2 = make_track_object(yuv("Y42B")) track_object3 = make_track_object(yuv("Y444")) track_object4 = make_track_object(rgb()) track_object5 = make_track_object(yuv("AYUV")) for i, track_object in enumerate((track_object1, track_object2, track_object3, track_object4, track_object5)): self.track1.addTrackObject(track_object) # set priorities from 1 to 5 track_object.priority = i + 1 # track_object5 falls outside (0s, 15s) so it isn't linked to videomixer track_object5.start = 15 * gst.SECOND # make a fakesink for the pipeline and connect it as necessary with a callback composition = self.track1.composition fakesink = gst.element_factory_make('fakesink') def bin_pad_added_cb(composition, pad): pad.link(fakesink.get_pad('sink')) composition.connect("pad-added", bin_pad_added_cb) # add the composition and fakesink to the pipeline and set state to paused to preroll self.pipeline.add(composition) self.pipeline.add(fakesink) self.pipeline.set_state(gst.STATE_PAUSED) # wait for preroll to complete bus = self.pipeline.get_bus() msg = bus.timed_pop_filtered(gst.CLOCK_TIME_NONE, gst.MESSAGE_ASYNC_DONE | gst.MESSAGE_ERROR) if msg.type == gst.MESSAGE_ERROR: gerror, debug = msg.parse_error() print "\nError message: %s\nDebug info: %s" % (gerror, debug) self.failUnlessEqual(msg.type, gst.MESSAGE_ASYNC_DONE) self.svmbin = list(self.track1.mixer.elements())[0]
def setUp(self): self.mainloop = gobject.MainLoop() samples = os.path.join(os.path.dirname(__file__), "samples") self.facs = [] self.facs.append([PictureFileSourceFactory('file://' + os.path.join(samples, "flat_colour1_640x480.png")), VideoStream(gst.Caps("video/x-raw-rgb,bpp=(int)24,depth=(int)24,endianness=(int)4321,red_mask=(int)16711680,green_mask=(int)65280,blue_mask=(int)255"))]) self.facs.append([PictureFileSourceFactory('file://' + os.path.join(samples, "flat_colour2_640x480.png")), VideoStream(gst.Caps("video/x-raw-rgb,bpp=(int)24,depth=(int)24,endianness=(int)4321,red_mask=(int)16711680,green_mask=(int)65280,blue_mask=(int)255"))]) self.facs.append([PictureFileSourceFactory('file://' + os.path.join(samples, "flat_colour3_320x180.png")), VideoStream(gst.Caps("video/x-raw-rgb,bpp=(int)24,depth=(int)24,endianness=(int)4321,red_mask=(int)16711680,green_mask=(int)65280,blue_mask=(int)255"))]) # one video with a different resolution self.facs.append([VideoTestSourceFactory(), VideoStream(gst.Caps('video/x-raw-yuv,width=(int)640,height=(int)480,format=(fourcc)I420'))]) # configure durations and add output streams to factories for fac in self.facs: factory = fac[0] stream = fac[1] factory.duration = self.clip_duration factory.addOutputStream(stream) self.track_objects = [] self.track = Track(self.facs[0][1]) self.timeline = Timeline() self.timeline.addTrack(self.track) vsettings = StreamEncodeSettings(encoder="theoraenc") rsettings = RenderSettings(settings=[vsettings], muxer="oggmux") self.fakesink = common.FakeSinkFactory() rendersink = RenderSinkFactory(RenderFactory(settings=rsettings), self.fakesink) self.render = RenderAction() self.pipeline = Pipeline() self.pipeline.connect("eos", self._renderEOSCb) self.pipeline.connect("error", self._renderErrorCb) self.pipeline.addAction(self.render) self.render.addConsumers(rendersink) timeline_factory = TimelineSourceFactory(self.timeline) self.render.addProducers(timeline_factory)
def testLoadTrackSource(self): # create fake document tree element = Element("track-object", type="pitivi.timeline.track.SourceTrackObject", start=ts(1 * gst.SECOND), duration=ts(10 * gst.SECOND), in_point=ts(5 * gst.SECOND), media_duration=ts(15 * gst.SECOND), priority=ts(5), id="1") factory_ref = SubElement(element, "factory-ref", id="1") stream_ref = SubElement(element, "stream-ref", id="1") # insert our fake factory into the context factory = AudioTestSourceFactory() factory.duration = 10 * gst.SECOND self.formatter._context.factories["1"] = factory # insert fake stream into the context stream = AudioStream(gst.Caps("audio/x-raw-int")) self.formatter._context.streams["1"] = stream # add a volume curve curves = SubElement(element, "curves") curve = SubElement(curves, "curve", property="volume", version="1") expected = dict( (long(t * gst.SECOND), (float(t % 2), gst.INTERPOLATE_LINEAR)) for t in xrange(1, 10)) start = SubElement(curve, "start", value="0.0", mode="2") for time, (value, mode) in expected.iteritems(): SubElement(curve, "keyframe", time=str(time), value=str(value), mode=str(mode)) end = SubElement(curve, "end", value=str(10 % 2), mode="2") track = Track(stream) # point gun at foot; pull trigger track_object = self.formatter._loadTrackObject(track, element) self.failUnless(isinstance(track_object, SourceTrackObject)) self.failUnlessEqual(track_object.factory, factory) self.failUnlessEqual(track_object.stream, stream) self.failUnlessEqual(track_object.start, 1 * gst.SECOND) self.failUnlessEqual(track_object.duration, 10 * gst.SECOND) self.failUnlessEqual(track_object.in_point, 5 * gst.SECOND) self.failUnlessEqual(track_object.media_duration, 15 * gst.SECOND) self.failUnlessEqual(track_object.priority, 5) self.failIfEqual(track_object.interpolators, None) interpolator = track_object.getInterpolator("volume") self.failIfEqual(interpolator, None) curve = dict(((kf.time, (kf.value, kf.mode)) for kf in interpolator.getInteriorKeyframes())) self.failUnlessEqual(curve, expected) self.failUnlessEqual(interpolator.start.value, 0.0) self.failUnlessEqual(interpolator.start.time, 5 * gst.SECOND) self.failUnlessEqual(interpolator.end.value, 0.0) self.failUnlessEqual(interpolator.end.time, 15 * gst.SECOND)
def testLoadInterpolatorV0(self): # create fake document tree element = Element("track-object", type="pitivi.timeline.track.SourceTrackObject", start=ts(1 * gst.SECOND), duration=ts(15 * gst.SECOND), in_point=ts(5 * gst.SECOND), media_duration=ts(15 * gst.SECOND), priority=ts(5), id="1") factory_ref = SubElement(element, "factory-ref", id="1") stream_ref = SubElement(element, "stream-ref", id="1") # insert our fake factory into the context factory = AudioTestSourceFactory() factory.duration = 20 * gst.SECOND self.formatter._context.factories["1"] = factory # insert fake stream into the context stream = AudioStream(gst.Caps("audio/x-raw-int")) self.formatter._context.streams["1"] = stream # add a volume curve curves = SubElement(element, "curves") curve = SubElement(curves, "curve", property="volume") expected = dict( (long(t * gst.SECOND), (float(t % 2), gst.INTERPOLATE_LINEAR)) for t in xrange(6, 15)) start = SubElement(curve, "start", value="1.0", mode="2") for time, (value, mode) in expected.iteritems(): SubElement(curve, "keyframe", time=str(time), value=str(value), mode=str(mode)) end = SubElement(curve, "end", value="1.0", mode="2") track = Track(stream) # point gun at foot; pull trigger track_object = self.formatter._loadTrackObject(track, element) self.failIfEqual(track_object.interpolators, None) interpolator = track_object.getInterpolator("volume") self.failIfEqual(interpolator, None) curve = dict(((kf.time, (kf.value, kf.mode)) for kf in interpolator.getInteriorKeyframes())) self.failUnlessEqual(curve, expected) # check that start keyframe value has been properly adjusted so as not # to change the shape of the curve. rounding is applied here because # the controller seems to round to a different precision # than python. we just want to check that the value is "in the # ballpark", indicating the keyframes have been updated. If you do the # math you'll see that both start and end keyframe values should be # about 1/6 self.failUnlessEqual(round(interpolator.start.value, 6), round((-5.0 / 6) + 1, 6)) self.failUnlessEqual(interpolator.start.time, 5 * gst.SECOND) # check that end keyrame value has been properly adjusted so as not to # change the shape of the curve self.failUnlessEqual(interpolator.end.time, 15 * gst.SECOND) self.failUnlessEqual(round(interpolator.end.value, 6), round((15.0 / 6) - (7.0 / 3), 6))
class TestTimelineUndo(TestCase): def setUp(self): self.stream = new_stream() self.factory = new_source_factory() self.effect_factory = TestEffectFactory(self.stream) self.track1 = Track(self.stream) self.track2 = Track(self.stream) self.timeline = Timeline() self.timeline.addTrack(self.track1) self.timeline.addTrack(self.track2) self.track_object1 = SourceTrackObject(self.factory, self.stream) self.track_object2 = SourceTrackObject(self.factory, self.stream) self.track_effect1 = TrackEffect(self.effect_factory, self.stream) self.track_effect2 = TrackEffect(self.effect_factory, self.stream) self.track1.addTrackObject(self.track_object1) self.track2.addTrackObject(self.track_object2) self.timeline_object1 = TimelineObject(self.factory) self.timeline_object1.addTrackObject(self.track_object1) self.timeline_object1.addTrackObject(self.track_object2) self.action_log = UndoableActionLog() self.observer = TestTimelineLogObserver(self.action_log) self.observer.startObserving(self.timeline) def testAddTimelineObject(self): stacks = [] def commitCb(action_log, stack, nested): stacks.append(stack) self.action_log.connect("commit", commitCb) self.action_log.begin("add clip") self.timeline.addTimelineObject(self.timeline_object1) self.action_log.commit() self.failUnlessEqual(len(stacks), 1) stack = stacks[0] self.failUnlessEqual(len(stack.done_actions), 1) action = stack.done_actions[0] self.failUnless(isinstance(action, TimelineObjectAdded)) self.failUnless(self.timeline_object1 \ in self.timeline.timeline_objects) self.action_log.undo() self.failIf(self.timeline_object1 \ in self.timeline.timeline_objects) self.action_log.redo() self.failUnless(self.timeline_object1 \ in self.timeline.timeline_objects) def testRemoveTimelineObject(self): stacks = [] def commitCb(action_log, stack, nested): stacks.append(stack) self.action_log.connect("commit", commitCb) self.timeline.addTimelineObject(self.timeline_object1) self.action_log.begin("remove clip") self.timeline.removeTimelineObject(self.timeline_object1, deep=True) self.action_log.commit() self.failUnlessEqual(len(stacks), 1) stack = stacks[0] self.failUnlessEqual(len(stack.done_actions), 1) action = stack.done_actions[0] self.failUnless(isinstance(action, TimelineObjectRemoved)) self.failIf(self.timeline_object1 \ in self.timeline.timeline_objects) self.action_log.undo() self.failUnless(self.timeline_object1 \ in self.timeline.timeline_objects) self.action_log.redo() self.failIf(self.timeline_object1 \ in self.timeline.timeline_objects) def testAddEffectToTimelineObject(self): stacks = [] pipeline = Pipeline() def commitCb(action_log, stack, nested): stacks.append(stack) self.action_log.connect("commit", commitCb) self.observer.pipeline = pipeline #FIXME Should I commit it and check there are 2 elements #in the stacks self.timeline.addTimelineObject(self.timeline_object1) self.track1.addTrackObject(self.track_effect1) self.action_log.begin("add effect") self.timeline_object1.addTrackObject(self.track_effect1) self.action_log.commit() self.failUnlessEqual(len(stacks), 1) stack = stacks[0] self.failUnlessEqual(len(stack.done_actions), 1) action = stack.done_actions[0] self.failUnless(isinstance(action, TrackEffectAdded)) self.failUnless(self.track_effect1 \ in self.timeline_object1.track_objects) self.failUnless(self.track_effect1 \ in self.track1.track_objects) self.failUnless(len([effect for effect in \ self.timeline_object1.track_objects if isinstance(effect, TrackEffect)]) == 1) self.failUnless(len([effect for effect in self.track1.track_objects if isinstance(effect, TrackEffect)]) == 1) self.action_log.undo() self.failIf(self.track_effect1 \ in self.timeline_object1.track_objects) self.failIf(self.track_effect1 \ in self.track1.track_objects) self.action_log.redo() self.failUnless(len([effect for effect in self.timeline_object1.track_objects if isinstance(effect, TrackEffect)]) == 1) self.failUnless(len([effect for effect in self.track1.track_objects if isinstance(effect, TrackEffect)]) == 1) self.timeline.removeTimelineObject(self.timeline_object1, deep=True) def testTimelineObjectPropertyChange(self): stacks = [] def commitCb(action_log, stack, nested): stacks.append(stack) self.action_log.connect("commit", commitCb) self.timeline_object1.start = 5 * gst.SECOND self.timeline_object1.duration = 20 * gst.SECOND self.timeline.addTimelineObject(self.timeline_object1) self.action_log.begin("modify clip") self.timeline_object1.start = 10 * gst.SECOND self.action_log.commit() self.failUnlessEqual(len(stacks), 1) stack = stacks[0] self.failUnlessEqual(len(stack.done_actions), 1) action = stack.done_actions[0] self.failUnless(isinstance(action, TimelineObjectPropertyChanged)) self.failUnlessEqual(self.timeline_object1.start, 10 * gst.SECOND) self.action_log.undo() self.failUnlessEqual(self.timeline_object1.start, 5 * gst.SECOND) self.action_log.redo() self.failUnlessEqual(self.timeline_object1.start, 10 * gst.SECOND) self.timeline_object1.priority = 10 self.action_log.begin("priority change") self.timeline_object1.priority = 20 self.action_log.commit() self.failUnlessEqual(self.timeline_object1.priority, 20) self.action_log.undo() self.failUnlessEqual(self.timeline_object1.priority, 10) self.action_log.redo() self.failUnlessEqual(self.timeline_object1.priority, 20) def testUngroup(self): self.timeline_object1.start = 5 * gst.SECOND self.timeline_object1.duration = 20 * gst.SECOND self.timeline.addTimelineObject(self.timeline_object1) self.timeline.setSelectionToObj(self.track_object1, SELECT_ADD) self.failUnlessEqual(len(self.timeline.timeline_objects), 1) self.failUnlessEqual(self.timeline.timeline_objects[0].start, 5 * gst.SECOND) self.failUnlessEqual(self.timeline.timeline_objects[0].duration, 20 * gst.SECOND) self.action_log.begin("ungroup") self.timeline.ungroupSelection() self.action_log.commit() self.failUnlessEqual(len(self.timeline.timeline_objects), 2) self.failUnlessEqual(self.timeline.timeline_objects[0].start, 5 * gst.SECOND) self.failUnlessEqual(self.timeline.timeline_objects[0].duration, 20 * gst.SECOND) self.failUnlessEqual(self.timeline.timeline_objects[1].start, 5 * gst.SECOND) self.failUnlessEqual(self.timeline.timeline_objects[1].duration, 20 * gst.SECOND) self.action_log.undo() self.failUnlessEqual(len(self.timeline.timeline_objects), 1) self.failUnlessEqual(self.timeline.timeline_objects[0].start, 5 * gst.SECOND) self.failUnlessEqual(self.timeline.timeline_objects[0].duration, 20 * gst.SECOND)
def setUp(self): self.factory = StubFactory() self.stream = AudioStream(gst.Caps('audio/x-raw-int')) self.factory.addOutputStream(self.stream) self.track1 = Track(self.stream) self.timeline = Timeline()
class TestAlpha(TestCase): def setUp(self): # create a pipeline self.pipeline = gst.Pipeline() self.track1 = Track(yuv("I420")) track_object1 = make_track_object(yuv("I420")) track_object2 = make_track_object(yuv("Y42B")) track_object3 = make_track_object(yuv("Y444")) track_object4 = make_track_object(rgb()) track_object5 = make_track_object(yuv("AYUV")) for i, track_object in enumerate((track_object1, track_object2, track_object3, track_object4, track_object5)): self.track1.addTrackObject(track_object) # set priorities from 1 to 5 track_object.priority = i + 1 # track_object5 falls outside (0s, 15s) so it isn't linked to videomixer track_object5.start = 15 * gst.SECOND # make a fakesink for the pipeline and connect it as necessary with a callback composition = self.track1.composition fakesink = gst.element_factory_make('fakesink') def bin_pad_added_cb(composition, pad): pad.link(fakesink.get_pad('sink')) composition.connect("pad-added", bin_pad_added_cb) # add the composition and fakesink to the pipeline and set state to paused to preroll self.pipeline.add(composition) self.pipeline.add(fakesink) self.pipeline.set_state(gst.STATE_PAUSED) # wait for preroll to complete bus = self.pipeline.get_bus() msg = bus.timed_pop_filtered(gst.CLOCK_TIME_NONE, gst.MESSAGE_ASYNC_DONE | gst.MESSAGE_ERROR) if msg.type == gst.MESSAGE_ERROR: gerror, debug = msg.parse_error() print "\nError message: %s\nDebug info: %s" % (gerror, debug) self.failUnlessEqual(msg.type, gst.MESSAGE_ASYNC_DONE) self.svmbin = list(self.track1.mixer.elements())[0] def tearDown(self): self.pipeline.set_state(gst.STATE_NULL) TestCase.tearDown(self) def failUnlessAlphaIsSet(self): # check that each SmartVideomixerBin input has alpha set on its # capsfilter for input in self.svmbin.inputs.values(): capsfilter = input[2] self.failUnless(capsfilter.props.caps[0].has_key("format")) def failUnlessAlphaIsNotSet(self): # check that each SmartVideomixerBin input has alpha _not_ set on its # capsfilter for input in self.svmbin.inputs.values(): capsfilter = input[2] self.failIf(capsfilter.props.caps[0].has_key("format")) def testKeyframesOnDifferentObjects(self): # no alpha < 1.0 keyframes for track_obj in self.track1.track_objects: set_all_keyframes(track_obj, 1.0) self.failUnlessAlphaIsNotSet() track_object1 = self.track1.track_objects[0] track_object2 = self.track1.track_objects[1] # one alpha < 1.0 keyframe set_one_keyframe(track_object1, 0.8) self.failUnlessAlphaIsSet() # two alpha < 1.0 keyframes set_one_keyframe(track_object2, 0.5) self.failUnlessAlphaIsSet() # one alpha < 1.0 keyframe set_one_keyframe(track_object1, 1.0) self.failUnlessAlphaIsSet() # no alpha < 1.0 keyframes set_one_keyframe(track_object2, 1.0) self.failUnlessAlphaIsNotSet() def testKeyframesOnSameObject(self): for track_obj in self.track1.track_objects: set_all_keyframes(track_obj, 1.0) self.failUnlessAlphaIsNotSet() track_object1 = self.track1.track_objects[0] interpolator1 = track_object1.getInterpolator("alpha") keyframe1 = interpolator1.newKeyframe(1 * gst.SECOND, 0.8) self.failUnlessAlphaIsSet() keyframe2 = interpolator1.newKeyframe(2 * gst.SECOND, 0.5) self.failUnlessAlphaIsSet() interpolator1.setKeyframeValue(keyframe1, 1.0) self.failUnlessAlphaIsSet() interpolator1.removeKeyframe(keyframe2) self.failUnlessAlphaIsNotSet() def testRemoveTrackObjects(self): for track_obj in self.track1.track_objects: set_all_keyframes(track_obj, 1.0) self.failUnlessAlphaIsNotSet() track_object1 = self.track1.track_objects[0] track_object2 = self.track1.track_objects[1] # set one keyframe below 1.0 set_one_keyframe(track_object1, 0.8) self.failUnlessAlphaIsSet() # track_object2 has no alpha < 1.0 keyframes, removing it shouldn't # trigger an alpha change self.track1.removeTrackObject(track_object2) self.failUnlessAlphaIsSet() # track_object1 does have an alpha < 1.0 keyframe, removing it should # trigger an alpha change self.track1.removeTrackObject(track_object1) self.failUnlessAlphaIsNotSet() def testRequestPads(self): # requesting a new pad should never trigger an alpha change template = gst.PadTemplate("sink_%u", gst.PAD_SINK, gst.PAD_REQUEST, gst.Caps("video/x-raw-yuv;video/x-raw-rgb")) for track_obj in self.track1.track_objects: set_all_keyframes(track_obj, 1.0) # when unset, should remain unset self.failUnlessAlphaIsNotSet() test_pad1 = self.svmbin.do_request_new_pad(template) self.failUnlessAlphaIsNotSet() obj = self.track1.track_objects[0] set_one_keyframe(obj, 0.8) # when set, should remain set self.failUnlessAlphaIsSet() test_pad2 = self.svmbin.do_request_new_pad(template) self.failUnlessAlphaIsSet() def testTransitions(self): for track_obj in self.track1.track_objects: set_all_keyframes(track_obj, 1.0) self.failUnlessAlphaIsNotSet() track_object1 = self.track1.track_objects[0] track_object2 = self.track1.track_objects[1] track_object1.start = 0 track_object2.start = 10 * gst.SECOND old_priority = track_object2.priority track_object2.priority = track_object1.priority self.track1.updateTransitions() self.failUnlessAlphaIsSet() track_object2.priority = old_priority self.track1.updateTransitions() self.failUnlessAlphaIsNotSet()
class TestStillImage(TestCase): clip_duration = 3 * gst.SECOND def setUp(self): self.mainloop = gobject.MainLoop() samples = os.path.join(os.path.dirname(__file__), "samples") self.facs = [] self.facs.append([PictureFileSourceFactory('file://' + os.path.join(samples, "flat_colour1_640x480.png")), VideoStream(gst.Caps("video/x-raw-rgb,bpp=(int)24,depth=(int)24,endianness=(int)4321,red_mask=(int)16711680,green_mask=(int)65280,blue_mask=(int)255"))]) self.facs.append([PictureFileSourceFactory('file://' + os.path.join(samples, "flat_colour2_640x480.png")), VideoStream(gst.Caps("video/x-raw-rgb,bpp=(int)24,depth=(int)24,endianness=(int)4321,red_mask=(int)16711680,green_mask=(int)65280,blue_mask=(int)255"))]) self.facs.append([PictureFileSourceFactory('file://' + os.path.join(samples, "flat_colour3_320x180.png")), VideoStream(gst.Caps("video/x-raw-rgb,bpp=(int)24,depth=(int)24,endianness=(int)4321,red_mask=(int)16711680,green_mask=(int)65280,blue_mask=(int)255"))]) # one video with a different resolution self.facs.append([VideoTestSourceFactory(), VideoStream(gst.Caps('video/x-raw-yuv,width=(int)640,height=(int)480,format=(fourcc)I420'))]) # configure durations and add output streams to factories for fac in self.facs: factory = fac[0] stream = fac[1] factory.duration = self.clip_duration factory.addOutputStream(stream) self.track_objects = [] self.track = Track(self.facs[0][1]) self.timeline = Timeline() self.timeline.addTrack(self.track) vsettings = StreamEncodeSettings(encoder="theoraenc") rsettings = RenderSettings(settings=[vsettings], muxer="oggmux") self.fakesink = common.FakeSinkFactory() rendersink = RenderSinkFactory(RenderFactory(settings=rsettings), self.fakesink) self.render = RenderAction() self.pipeline = Pipeline() self.pipeline.connect("eos", self._renderEOSCb) self.pipeline.connect("error", self._renderErrorCb) self.pipeline.addAction(self.render) self.render.addConsumers(rendersink) timeline_factory = TimelineSourceFactory(self.timeline) self.render.addProducers(timeline_factory) def tearDown(self): self.mainloop.quit() def configureStreams(self, inputs, offsets): count = 0 for i in inputs: factory = self.facs[i][0] stream = self.facs[i][1] track_object = SourceTrackObject(factory, stream) self.track_objects.append(track_object) track_object.start = offsets[count] self.track.addTrackObject(track_object) count += 1 def startRender(self): self.render.activate() self.data_written = 0 self.fakesink.bins[0].props.signal_handoffs = True self.fakesink.bins[0].connect("handoff", self._fakesinkHandoffCb) self.pipeline.play() self.mainloop.run() def _fakesinkHandoffCb(self, fakesink, buf, pad): self.data_written += buf.size def _renderEOSCb(self, obj): self.mainloop.quit() # check the render was successful self.assertTrue(self.data_written > 0) def _renderErrorCb(self, obj, error, details): print "Error: %s\nDetails: %s" % (str(error), str(details)) self.fail("Pipeline rendering error") def cleanUp(self): self.render.deactivate() self.track.removeAllTrackObjects() self.track_objects = [] def testRendering(self): # use one of the still image streams self.configureStreams(range(1), [0]) self.startRender() self.cleanUp() # use two images with the same resolution and concatenate them self.configureStreams(range(2), [0, self.clip_duration]) self.startRender() self.cleanUp() # concatenate images with different resolutions self.configureStreams(range(3), [0, self.clip_duration, 2 * self.clip_duration]) self.startRender() self.cleanUp() # mix images with different resolutions by overlapping self.configureStreams(range(3), [0, self.clip_duration // 2, self.clip_duration]) self.startRender() self.cleanUp() # mix images and videos with the same resolution self.configureStreams([0, 1, 3], [0, self.clip_duration, 2 * self.clip_duration]) self.startRender() self.cleanUp() # mix images and videos with different resolutions self.configureStreams(range(4), [0, self.clip_duration, 2 * self.clip_duration, 3 * self.clip_duration]) self.startRender() self.cleanUp() # mix images and videos with different resolutions by overlapping self.configureStreams(range(4), [0, self.clip_duration // 2, self.clip_duration, (3 * self.clip_duration) // 2]) self.startRender() self.cleanUp()
class TestAlpha(TestCase): def setUp(self): # create a pipeline self.pipeline = gst.Pipeline() self.track1 = Track(yuv("I420")) track_object1 = make_track_object(yuv("I420")) track_object2 = make_track_object(yuv("Y42B")) track_object3 = make_track_object(yuv("Y444")) track_object4 = make_track_object(rgb()) track_object5 = make_track_object(yuv("AYUV")) for i, track_object in enumerate( (track_object1, track_object2, track_object3, track_object4, track_object5)): self.track1.addTrackObject(track_object) # set priorities from 1 to 5 track_object.priority = i + 1 # track_object5 falls outside (0s, 15s) so it isn't linked to videomixer track_object5.start = 15 * gst.SECOND # make a fakesink for the pipeline and connect it as necessary with a callback composition = self.track1.composition fakesink = gst.element_factory_make('fakesink') def bin_pad_added_cb(composition, pad): pad.link(fakesink.get_pad('sink')) composition.connect("pad-added", bin_pad_added_cb) # add the composition and fakesink to the pipeline and set state to paused to preroll self.pipeline.add(composition) self.pipeline.add(fakesink) self.pipeline.set_state(gst.STATE_PAUSED) # wait for preroll to complete bus = self.pipeline.get_bus() msg = bus.timed_pop_filtered( gst.CLOCK_TIME_NONE, gst.MESSAGE_ASYNC_DONE | gst.MESSAGE_ERROR) if msg.type == gst.MESSAGE_ERROR: gerror, debug = msg.parse_error() print "\nError message: %s\nDebug info: %s" % (gerror, debug) self.failUnlessEqual(msg.type, gst.MESSAGE_ASYNC_DONE) self.svmbin = list(self.track1.mixer.elements())[0] def tearDown(self): self.pipeline.set_state(gst.STATE_NULL) TestCase.tearDown(self) def failUnlessAlphaIsSet(self): # check that each SmartVideomixerBin input has alpha set on its # capsfilter for input in self.svmbin.inputs.values(): capsfilter = input[2] self.failUnless(capsfilter.props.caps[0].has_key("format")) def failUnlessAlphaIsNotSet(self): # check that each SmartVideomixerBin input has alpha _not_ set on its # capsfilter for input in self.svmbin.inputs.values(): capsfilter = input[2] self.failIf(capsfilter.props.caps[0].has_key("format")) def testKeyframesOnDifferentObjects(self): # no alpha < 1.0 keyframes for track_obj in self.track1.track_objects: set_all_keyframes(track_obj, 1.0) self.failUnlessAlphaIsNotSet() track_object1 = self.track1.track_objects[0] track_object2 = self.track1.track_objects[1] # one alpha < 1.0 keyframe set_one_keyframe(track_object1, 0.8) self.failUnlessAlphaIsSet() # two alpha < 1.0 keyframes set_one_keyframe(track_object2, 0.5) self.failUnlessAlphaIsSet() # one alpha < 1.0 keyframe set_one_keyframe(track_object1, 1.0) self.failUnlessAlphaIsSet() # no alpha < 1.0 keyframes set_one_keyframe(track_object2, 1.0) self.failUnlessAlphaIsNotSet() def testKeyframesOnSameObject(self): for track_obj in self.track1.track_objects: set_all_keyframes(track_obj, 1.0) self.failUnlessAlphaIsNotSet() track_object1 = self.track1.track_objects[0] interpolator1 = track_object1.getInterpolator("alpha") keyframe1 = interpolator1.newKeyframe(1 * gst.SECOND, 0.8) self.failUnlessAlphaIsSet() keyframe2 = interpolator1.newKeyframe(2 * gst.SECOND, 0.5) self.failUnlessAlphaIsSet() interpolator1.setKeyframeValue(keyframe1, 1.0) self.failUnlessAlphaIsSet() interpolator1.removeKeyframe(keyframe2) self.failUnlessAlphaIsNotSet() def testRemoveTrackObjects(self): for track_obj in self.track1.track_objects: set_all_keyframes(track_obj, 1.0) self.failUnlessAlphaIsNotSet() track_object1 = self.track1.track_objects[0] track_object2 = self.track1.track_objects[1] # set one keyframe below 1.0 set_one_keyframe(track_object1, 0.8) self.failUnlessAlphaIsSet() # track_object2 has no alpha < 1.0 keyframes, removing it shouldn't # trigger an alpha change self.track1.removeTrackObject(track_object2) self.failUnlessAlphaIsSet() # track_object1 does have an alpha < 1.0 keyframe, removing it should # trigger an alpha change self.track1.removeTrackObject(track_object1) self.failUnlessAlphaIsNotSet() def testRequestPads(self): # requesting a new pad should never trigger an alpha change template = gst.PadTemplate("sink_%u", gst.PAD_SINK, gst.PAD_REQUEST, gst.Caps("video/x-raw-yuv;video/x-raw-rgb")) for track_obj in self.track1.track_objects: set_all_keyframes(track_obj, 1.0) # when unset, should remain unset self.failUnlessAlphaIsNotSet() test_pad1 = self.svmbin.do_request_new_pad(template) self.failUnlessAlphaIsNotSet() obj = self.track1.track_objects[0] set_one_keyframe(obj, 0.8) # when set, should remain set self.failUnlessAlphaIsSet() test_pad2 = self.svmbin.do_request_new_pad(template) self.failUnlessAlphaIsSet() def testTransitions(self): for track_obj in self.track1.track_objects: set_all_keyframes(track_obj, 1.0) self.failUnlessAlphaIsNotSet() track_object1 = self.track1.track_objects[0] track_object2 = self.track1.track_objects[1] track_object1.start = 0 track_object2.start = 10 * gst.SECOND old_priority = track_object2.priority track_object2.priority = track_object1.priority self.track1.updateTransitions() self.failUnlessAlphaIsSet() track_object2.priority = old_priority self.track1.updateTransitions() self.failUnlessAlphaIsNotSet()
def testSaveTrackSource(self): video_stream = VideoStream(gst.Caps("video/x-raw-yuv")) audio_stream = AudioStream(gst.Caps("audio/x-raw-int")) source1 = FileSourceFactory("file1.ogg") source1.addOutputStream(video_stream) # these two calls are needed to populate the context for the -ref # elements self.formatter._saveSource(source1) self.formatter._saveStream(video_stream) track_source = SourceTrackObject(source1, video_stream, start=10 * gst.SECOND, duration=20 * gst.SECOND, in_point=5 * gst.SECOND, media_duration=15 * gst.SECOND, priority=10) track = Track(video_stream) track.addTrackObject(track_source) # create an interpolator and insert it into the track object fakevol = gst.element_factory_make("volume") prop = get_controllable_properties(fakevol)[1][1] volcurve = Interpolator(track_source, fakevol, prop) track_source.interpolators[prop.name] = (prop, volcurve) # add some points to the interpolator value = float(0) volcurve.start.setObjectTime(0) volcurve.start.value = 0 for t in xrange(3, 15, 3): value = int(t % 2) volcurve.newKeyframe(t * gst.SECOND, value) volcurve.end.setObjectTime(15 * gst.SECOND) volcurve.end.value = 15 % 2 element = self.formatter._saveTrackObject(track_source) self.failUnlessEqual(element.tag, "track-object") self.failUnlessEqual(element.attrib["type"], qual(track_source.__class__)) self.failUnlessEqual(element.attrib["start"], ts(10 * gst.SECOND)) self.failUnlessEqual(element.attrib["duration"], ts(20 * gst.SECOND)) self.failUnlessEqual(element.attrib["in_point"], ts(5 * gst.SECOND)) self.failUnlessEqual(element.attrib["media_duration"], ts(15 * gst.SECOND)) self.failUnlessEqual(element.attrib["priority"], "(int)10") self.failIfEqual(element.find("factory-ref"), None) self.failIfEqual(element.find("stream-ref"), None) # find the interpolation keyframes curves = element.find("curves") self.failIfEqual(curves, None) curve = curves.find("curve") self.failIfEqual(curve, None) self.failUnlessEqual(curve.attrib["property"], "volume") # compute a dictionary of keyframes saved_points = dict( ((obj.attrib["time"], (obj.attrib["value"], obj.attrib["mode"])) for obj in curve.getiterator("keyframe"))) # compare this with the expected values expected = dict(((str(t * gst.SECOND), ("(gdouble)%s" % (t % 2), "2")) for t in xrange(3, 15, 3))) self.failUnlessEqual(expected, saved_points)
class TestGap(TestCase): def setUp(self): self.factory = StubFactory() self.stream = AudioStream(gst.Caps('audio/x-raw-int')) self.factory.addOutputStream(self.stream) self.track1 = Track(self.stream) self.timeline = Timeline() def makeTimelineObject(self): track_object = SourceTrackObject(self.factory, self.stream) self.track1.addTrackObject(track_object) timeline_object = TimelineObject(self.factory) timeline_object.addTrackObject(track_object) self.timeline.addTimelineObject(timeline_object) return timeline_object def testGapCmp(self): gap1 = Gap(None, None, start=10, duration=5) gap2 = Gap(None, None, start=10, duration=5) self.failUnlessEqual(gap1, gap2) gap2 = Gap(None, None, start=15, duration=4) self.failUnless(gap1 > gap2) self.failUnless(gap2 < gap1) def testFindAroundObject(self): timeline_object1 = self.makeTimelineObject() timeline_object2 = self.makeTimelineObject() timeline_object1.start = 5 * gst.SECOND timeline_object1.duration = 10 * gst.SECOND timeline_object2.start = 20 * gst.SECOND timeline_object2.duration = 10 * gst.SECOND left_gap, right_gap = Gap.findAroundObject(timeline_object1) self.failUnlessEqual(left_gap.left_object, None) self.failUnlessEqual(left_gap.right_object, timeline_object1) self.failUnlessEqual(left_gap.start, 0 * gst.SECOND) self.failUnlessEqual(left_gap.duration, 5 * gst.SECOND) self.failUnlessEqual(right_gap.left_object, timeline_object1) self.failUnlessEqual(right_gap.right_object, timeline_object2) self.failUnlessEqual(right_gap.start, 15 * gst.SECOND) self.failUnlessEqual(right_gap.duration, 5 * gst.SECOND) left_gap, right_gap = Gap.findAroundObject(timeline_object2) self.failUnlessEqual(left_gap.left_object, timeline_object1) self.failUnlessEqual(left_gap.right_object, timeline_object2) self.failUnlessEqual(left_gap.start, 15 * gst.SECOND) self.failUnlessEqual(left_gap.duration, 5 * gst.SECOND) self.failUnlessEqual(right_gap.left_object, timeline_object2) self.failUnlessEqual(right_gap.right_object, None) self.failUnlessEqual(right_gap.start, 30 * gst.SECOND) self.failUnlessEqual(right_gap.duration, infinity) # make the objects overlap timeline_object2.start = 10 * gst.SECOND left_gap, right_gap = Gap.findAroundObject(timeline_object1) self.failUnlessEqual(right_gap.left_object, timeline_object1) self.failUnlessEqual(right_gap.right_object, timeline_object2) self.failUnlessEqual(right_gap.start, 15 * gst.SECOND) self.failUnlessEqual(right_gap.duration, -5 * gst.SECOND) def testGapFinder(self): timeline_object1 = self.makeTimelineObject() timeline_object2 = self.makeTimelineObject() timeline_object3 = self.makeTimelineObject() timeline_object4 = self.makeTimelineObject() timeline_object1.start = 5 * gst.SECOND timeline_object1.duration = 10 * gst.SECOND timeline_object1.priority = 1 timeline_object2.start = 20 * gst.SECOND timeline_object2.duration = 10 * gst.SECOND timeline_object2.priority = 1 timeline_object3.start = 31 * gst.SECOND timeline_object3.duration = 10 * gst.SECOND timeline_object3.priority = 2 timeline_object4.start = 50 * gst.SECOND timeline_object4.duration = 10 * gst.SECOND timeline_object4.priority = 2 gap_finder = SmallestGapsFinder( set([timeline_object2, timeline_object3])) gap_finder.update(*Gap.findAroundObject(timeline_object2)) gap_finder.update(*Gap.findAroundObject(timeline_object3)) left_gap = gap_finder.left_gap right_gap = gap_finder.right_gap self.failUnlessEqual(left_gap.left_object, timeline_object1) self.failUnlessEqual(left_gap.right_object, timeline_object2) self.failUnlessEqual(left_gap.start, 15 * gst.SECOND) self.failUnlessEqual(left_gap.duration, 5 * gst.SECOND) self.failUnlessEqual(right_gap.left_object, timeline_object3) self.failUnlessEqual(right_gap.right_object, timeline_object4) self.failUnlessEqual(right_gap.start, 41 * gst.SECOND) self.failUnlessEqual(right_gap.duration, 9 * gst.SECOND) # make timeline_object3 and timeline_object4 overlap timeline_object3.duration = 20 * gst.SECOND gap_finder = SmallestGapsFinder(set([timeline_object4])) gap_finder.update(*Gap.findAroundObject(timeline_object4)) left_gap = gap_finder.left_gap right_gap = gap_finder.right_gap self.failUnlessEqual(left_gap, invalid_gap) self.failUnlessEqual(right_gap.left_object, timeline_object4) self.failUnlessEqual(right_gap.right_object, None) self.failUnlessEqual(right_gap.start, 60 * gst.SECOND) self.failUnlessEqual(right_gap.duration, infinity) gap_finder = SmallestGapsFinder(set([timeline_object3])) gap_finder.update(*Gap.findAroundObject(timeline_object3)) left_gap = gap_finder.left_gap right_gap = gap_finder.right_gap self.failUnlessEqual(left_gap.left_object, None) self.failUnlessEqual(left_gap.right_object, timeline_object3) self.failUnlessEqual(left_gap.start, 0 * gst.SECOND) self.failUnlessEqual(left_gap.duration, 31 * gst.SECOND) self.failUnlessEqual(right_gap, invalid_gap) def testFindAllGaps(self): simple = ((3 * gst.SECOND, 1 * gst.SECOND), (1 * gst.SECOND, 1 * gst.SECOND)) objs = [] for start, duration in simple: obj = self.makeTimelineObject() obj.start = start obj.duration = duration objs.append(obj) result = [(g.start, g.initial_duration) for g in Gap.findAllGaps(objs)] self.assertEquals(result, [ (0 * gst.SECOND, 1 * gst.SECOND), (2 * gst.SECOND, 1 * gst.SECOND), ]) complex = [ (1 * gst.SECOND, 2 * gst.SECOND), (6 * gst.SECOND, 2 * gst.SECOND), (10 * gst.SECOND, 2 * gst.SECOND), (8 * gst.SECOND, 2 * gst.SECOND), (14 * gst.SECOND, 1 * gst.SECOND), (4 * gst.SECOND, 1 * gst.SECOND), ] objs = [] for start, duration in complex: obj = self.makeTimelineObject() obj.start = start obj.duration = duration objs.append(obj) result = [(g.start, g.initial_duration) for g in Gap.findAllGaps(objs)] self.assertEquals(result, [ (0 * gst.SECOND, 1 * gst.SECOND), (3 * gst.SECOND, 1 * gst.SECOND), (5 * gst.SECOND, 1 * gst.SECOND), (12 * gst.SECOND, 2 * gst.SECOND), ]) complex.append((2 * gst.SECOND, 5 * gst.SECOND)) objs = [] for start, duration in complex: obj = self.makeTimelineObject() obj.start = start obj.duration = duration objs.append(obj) result = [(g.start, g.initial_duration) for g in Gap.findAllGaps(objs)] self.assertEquals(result, [ (0 * gst.SECOND, 1 * gst.SECOND), (12 * gst.SECOND, 2 * gst.SECOND), ])
def testSplitObject(self): DURATION = 10 * gst.SECOND factory = AudioTestSourceFactory() factory.duration = DURATION stream_ = AudioStream(gst.Caps("audio/x-raw-int")) obj = SourceTrackObject(factory, stream_) track = Track(stream_) track.addTrackObject(obj) obj.start = 3 * gst.SECOND obj.duration = DURATION # create a zig-zag volume curve interpolator = obj.getInterpolator("volume") expected = dict(((t * gst.SECOND, (t % 2, gst.INTERPOLATE_LINEAR)) for t in xrange(3, 10, 3))) for time, (value, mode) in expected.iteritems(): interpolator.newKeyframe(time, value, mode) def getKeyframes(obj): keyframes = obj.getInterpolator("volume").getInteriorKeyframes() return dict(((kf.time, (kf.value, kf.mode)) for kf in keyframes)) monitor = TrackSignalMonitor(obj) self.failUnlessRaises(TrackError, obj.splitObject, 2 * gst.SECOND) self.failUnlessRaises(TrackError, obj.splitObject, 14 * gst.SECOND) # should these be possible (ie create empty objects) ? self.failUnlessRaises(TrackError, obj.splitObject, 3 * gst.SECOND) self.failUnlessRaises(TrackError, obj.splitObject, 13 * gst.SECOND) # splitObject at 4s should result in: # obj (start 3, end 4) other1 (start 4, end 13) other1 = obj.splitObject(4 * gst.SECOND) self.failUnlessEqual(expected, getKeyframes(other1)) self.failUnlessEqual(obj.start, 3 * gst.SECOND) self.failUnlessEqual(obj.in_point, 0 * gst.SECOND) self.failUnlessEqual(obj.duration, 1 * gst.SECOND) self.failUnlessEqual(obj.rate, 1) self.failUnlessEqual(other1.start, 4 * gst.SECOND) self.failUnlessEqual(other1.in_point, 1 * gst.SECOND) self.failUnlessEqual(other1.duration, 9 * gst.SECOND) self.failUnlessEqual(other1.rate, 1) self.failUnlessEqual(monitor.start_changed_count, 0) self.failUnlessEqual(monitor.duration_changed_count, 1) # move other1 back to start = 1 other1.start = 1 * gst.SECOND # splitObject again other1 monitor = TrackSignalMonitor(other1) other2 = other1.splitObject(6 * gst.SECOND) self.failUnlessEqual(other1.start, 1 * gst.SECOND) self.failUnlessEqual(other1.in_point, 1 * gst.SECOND) self.failUnlessEqual(other1.duration, 5 * gst.SECOND) self.failUnlessEqual(other1.rate, 1) self.failUnlessEqual(other2.start, 6 * gst.SECOND) self.failUnlessEqual(other2.in_point, 6 * gst.SECOND) self.failUnlessEqual(other2.duration, 4 * gst.SECOND) self.failUnlessEqual(other2.rate, 1) self.failUnlessEqual(monitor.start_changed_count, 0) self.failUnlessEqual(monitor.duration_changed_count, 1)
class TestGap(TestCase): def setUp(self): self.factory = StubFactory() self.stream = AudioStream(gst.Caps('audio/x-raw-int')) self.factory.addOutputStream(self.stream) self.track1 = Track(self.stream) self.timeline = Timeline() def makeTimelineObject(self): track_object = SourceTrackObject(self.factory, self.stream) self.track1.addTrackObject(track_object) timeline_object = TimelineObject(self.factory) timeline_object.addTrackObject(track_object) self.timeline.addTimelineObject(timeline_object) return timeline_object def testGapCmp(self): gap1 = Gap(None, None, start=10, duration=5) gap2 = Gap(None, None, start=10, duration=5) self.failUnlessEqual(gap1, gap2) gap2 = Gap(None, None, start=15, duration=4) self.failUnless(gap1 > gap2) self.failUnless(gap2 < gap1) def testFindAroundObject(self): timeline_object1 = self.makeTimelineObject() timeline_object2 = self.makeTimelineObject() timeline_object1.start = 5 * gst.SECOND timeline_object1.duration = 10 * gst.SECOND timeline_object2.start = 20 * gst.SECOND timeline_object2.duration = 10 * gst.SECOND left_gap, right_gap = Gap.findAroundObject(timeline_object1) self.failUnlessEqual(left_gap.left_object, None) self.failUnlessEqual(left_gap.right_object, timeline_object1) self.failUnlessEqual(left_gap.start, 0 * gst.SECOND) self.failUnlessEqual(left_gap.duration, 5 * gst.SECOND) self.failUnlessEqual(right_gap.left_object, timeline_object1) self.failUnlessEqual(right_gap.right_object, timeline_object2) self.failUnlessEqual(right_gap.start, 15 * gst.SECOND) self.failUnlessEqual(right_gap.duration, 5 * gst.SECOND) left_gap, right_gap = Gap.findAroundObject(timeline_object2) self.failUnlessEqual(left_gap.left_object, timeline_object1) self.failUnlessEqual(left_gap.right_object, timeline_object2) self.failUnlessEqual(left_gap.start, 15 * gst.SECOND) self.failUnlessEqual(left_gap.duration, 5 * gst.SECOND) self.failUnlessEqual(right_gap.left_object, timeline_object2) self.failUnlessEqual(right_gap.right_object, None) self.failUnlessEqual(right_gap.start, 30 * gst.SECOND) self.failUnlessEqual(right_gap.duration, infinity) # make the objects overlap timeline_object2.start = 10 * gst.SECOND left_gap, right_gap = Gap.findAroundObject(timeline_object1) self.failUnlessEqual(right_gap.left_object, timeline_object1) self.failUnlessEqual(right_gap.right_object, timeline_object2) self.failUnlessEqual(right_gap.start, 15 * gst.SECOND) self.failUnlessEqual(right_gap.duration, -5 * gst.SECOND) def testGapFinder(self): timeline_object1 = self.makeTimelineObject() timeline_object2 = self.makeTimelineObject() timeline_object3 = self.makeTimelineObject() timeline_object4 = self.makeTimelineObject() timeline_object1.start = 5 * gst.SECOND timeline_object1.duration = 10 * gst.SECOND timeline_object1.priority = 1 timeline_object2.start = 20 * gst.SECOND timeline_object2.duration = 10 * gst.SECOND timeline_object2.priority = 1 timeline_object3.start = 31 * gst.SECOND timeline_object3.duration = 10 * gst.SECOND timeline_object3.priority = 2 timeline_object4.start = 50 * gst.SECOND timeline_object4.duration = 10 * gst.SECOND timeline_object4.priority = 2 gap_finder = SmallestGapsFinder(set([timeline_object2, timeline_object3])) gap_finder.update(*Gap.findAroundObject(timeline_object2)) gap_finder.update(*Gap.findAroundObject(timeline_object3)) left_gap = gap_finder.left_gap right_gap = gap_finder.right_gap self.failUnlessEqual(left_gap.left_object, timeline_object1) self.failUnlessEqual(left_gap.right_object, timeline_object2) self.failUnlessEqual(left_gap.start, 15 * gst.SECOND) self.failUnlessEqual(left_gap.duration, 5 * gst.SECOND) self.failUnlessEqual(right_gap.left_object, timeline_object3) self.failUnlessEqual(right_gap.right_object, timeline_object4) self.failUnlessEqual(right_gap.start, 41 * gst.SECOND) self.failUnlessEqual(right_gap.duration, 9 * gst.SECOND) # make timeline_object3 and timeline_object4 overlap timeline_object3.duration = 20 * gst.SECOND gap_finder = SmallestGapsFinder(set([timeline_object4])) gap_finder.update(*Gap.findAroundObject(timeline_object4)) left_gap = gap_finder.left_gap right_gap = gap_finder.right_gap self.failUnlessEqual(left_gap, invalid_gap) self.failUnlessEqual(right_gap.left_object, timeline_object4) self.failUnlessEqual(right_gap.right_object, None) self.failUnlessEqual(right_gap.start, 60 * gst.SECOND) self.failUnlessEqual(right_gap.duration, infinity) gap_finder = SmallestGapsFinder(set([timeline_object3])) gap_finder.update(*Gap.findAroundObject(timeline_object3)) left_gap = gap_finder.left_gap right_gap = gap_finder.right_gap self.failUnlessEqual(left_gap.left_object, None) self.failUnlessEqual(left_gap.right_object, timeline_object3) self.failUnlessEqual(left_gap.start, 0 * gst.SECOND) self.failUnlessEqual(left_gap.duration, 31 * gst.SECOND) self.failUnlessEqual(right_gap, invalid_gap) def testFindAllGaps(self): simple = ( (3 * gst.SECOND, 1 * gst.SECOND), (1 * gst.SECOND, 1 * gst.SECOND) ) objs = [] for start, duration in simple: obj = self.makeTimelineObject() obj.start = start obj.duration = duration objs.append(obj) result = [(g.start, g.initial_duration) for g in Gap.findAllGaps(objs)] self.assertEquals(result, [ (0 * gst.SECOND, 1 * gst.SECOND), (2 * gst.SECOND, 1 * gst.SECOND), ]) complex = [ (1 * gst.SECOND, 2 * gst.SECOND), (6 * gst.SECOND, 2 * gst.SECOND), (10 * gst.SECOND, 2 * gst.SECOND), (8 * gst.SECOND, 2 * gst.SECOND), (14 * gst.SECOND, 1 * gst.SECOND), (4 * gst.SECOND, 1 * gst.SECOND), ] objs = [] for start, duration in complex: obj = self.makeTimelineObject() obj.start = start obj.duration = duration objs.append(obj) result = [(g.start, g.initial_duration) for g in Gap.findAllGaps(objs)] self.assertEquals(result, [ (0 * gst.SECOND, 1 * gst.SECOND), (3 * gst.SECOND, 1 * gst.SECOND), (5 * gst.SECOND, 1 * gst.SECOND), (12 * gst.SECOND, 2 * gst.SECOND), ]) complex.append((2 * gst.SECOND, 5 * gst.SECOND)) objs = [] for start, duration in complex: obj = self.makeTimelineObject() obj.start = start obj.duration = duration objs.append(obj) result = [(g.start, g.initial_duration) for g in Gap.findAllGaps(objs)] self.assertEquals(result, [ (0 * gst.SECOND, 1 * gst.SECOND), (12 * gst.SECOND, 2 * gst.SECOND), ])
def setUp(self): self.mainloop = gobject.MainLoop() samples = os.path.join(os.path.dirname(__file__), "samples") self.facs = [] self.facs.append([ PictureFileSourceFactory( 'file://' + os.path.join(samples, "flat_colour1_640x480.png")), VideoStream( gst.Caps( "video/x-raw-rgb,bpp=(int)24,depth=(int)24,endianness=(int)4321,red_mask=(int)16711680,green_mask=(int)65280,blue_mask=(int)255" )) ]) self.facs.append([ PictureFileSourceFactory( 'file://' + os.path.join(samples, "flat_colour2_640x480.png")), VideoStream( gst.Caps( "video/x-raw-rgb,bpp=(int)24,depth=(int)24,endianness=(int)4321,red_mask=(int)16711680,green_mask=(int)65280,blue_mask=(int)255" )) ]) self.facs.append([ PictureFileSourceFactory( 'file://' + os.path.join(samples, "flat_colour3_320x180.png")), VideoStream( gst.Caps( "video/x-raw-rgb,bpp=(int)24,depth=(int)24,endianness=(int)4321,red_mask=(int)16711680,green_mask=(int)65280,blue_mask=(int)255" )) ]) # one video with a different resolution self.facs.append([ VideoTestSourceFactory(), VideoStream( gst.Caps( 'video/x-raw-yuv,width=(int)640,height=(int)480,format=(fourcc)I420' )) ]) # configure durations and add output streams to factories for fac in self.facs: factory = fac[0] stream = fac[1] factory.duration = self.clip_duration factory.addOutputStream(stream) self.track_objects = [] self.track = Track(self.facs[0][1]) self.timeline = Timeline() self.timeline.addTrack(self.track) vsettings = StreamEncodeSettings(encoder="theoraenc") rsettings = RenderSettings(settings=[vsettings], muxer="oggmux") self.fakesink = common.FakeSinkFactory() rendersink = RenderSinkFactory(RenderFactory(settings=rsettings), self.fakesink) self.render = RenderAction() self.pipeline = Pipeline() self.pipeline.connect("eos", self._renderEOSCb) self.pipeline.connect("error", self._renderErrorCb) self.pipeline.addAction(self.render) self.render.addConsumers(rendersink) timeline_factory = TimelineSourceFactory(self.timeline) self.render.addProducers(timeline_factory)
class TestStillImage(TestCase): clip_duration = 3 * gst.SECOND def setUp(self): self.mainloop = gobject.MainLoop() samples = os.path.join(os.path.dirname(__file__), "samples") self.facs = [] self.facs.append([ PictureFileSourceFactory( 'file://' + os.path.join(samples, "flat_colour1_640x480.png")), VideoStream( gst.Caps( "video/x-raw-rgb,bpp=(int)24,depth=(int)24,endianness=(int)4321,red_mask=(int)16711680,green_mask=(int)65280,blue_mask=(int)255" )) ]) self.facs.append([ PictureFileSourceFactory( 'file://' + os.path.join(samples, "flat_colour2_640x480.png")), VideoStream( gst.Caps( "video/x-raw-rgb,bpp=(int)24,depth=(int)24,endianness=(int)4321,red_mask=(int)16711680,green_mask=(int)65280,blue_mask=(int)255" )) ]) self.facs.append([ PictureFileSourceFactory( 'file://' + os.path.join(samples, "flat_colour3_320x180.png")), VideoStream( gst.Caps( "video/x-raw-rgb,bpp=(int)24,depth=(int)24,endianness=(int)4321,red_mask=(int)16711680,green_mask=(int)65280,blue_mask=(int)255" )) ]) # one video with a different resolution self.facs.append([ VideoTestSourceFactory(), VideoStream( gst.Caps( 'video/x-raw-yuv,width=(int)640,height=(int)480,format=(fourcc)I420' )) ]) # configure durations and add output streams to factories for fac in self.facs: factory = fac[0] stream = fac[1] factory.duration = self.clip_duration factory.addOutputStream(stream) self.track_objects = [] self.track = Track(self.facs[0][1]) self.timeline = Timeline() self.timeline.addTrack(self.track) vsettings = StreamEncodeSettings(encoder="theoraenc") rsettings = RenderSettings(settings=[vsettings], muxer="oggmux") self.fakesink = common.FakeSinkFactory() rendersink = RenderSinkFactory(RenderFactory(settings=rsettings), self.fakesink) self.render = RenderAction() self.pipeline = Pipeline() self.pipeline.connect("eos", self._renderEOSCb) self.pipeline.connect("error", self._renderErrorCb) self.pipeline.addAction(self.render) self.render.addConsumers(rendersink) timeline_factory = TimelineSourceFactory(self.timeline) self.render.addProducers(timeline_factory) def tearDown(self): self.mainloop.quit() def configureStreams(self, inputs, offsets): count = 0 for i in inputs: factory = self.facs[i][0] stream = self.facs[i][1] track_object = SourceTrackObject(factory, stream) self.track_objects.append(track_object) track_object.start = offsets[count] self.track.addTrackObject(track_object) count += 1 def startRender(self): self.render.activate() self.data_written = 0 self.fakesink.bins[0].props.signal_handoffs = True self.fakesink.bins[0].connect("handoff", self._fakesinkHandoffCb) self.pipeline.play() self.mainloop.run() def _fakesinkHandoffCb(self, fakesink, buf, pad): self.data_written += buf.size def _renderEOSCb(self, obj): self.mainloop.quit() # check the render was successful self.assertTrue(self.data_written > 0) def _renderErrorCb(self, obj, error, details): print "Error: %s\nDetails: %s" % (str(error), str(details)) self.fail("Pipeline rendering error") def cleanUp(self): self.render.deactivate() self.track.removeAllTrackObjects() self.track_objects = [] def testRendering(self): # use one of the still image streams self.configureStreams(range(1), [0]) self.startRender() self.cleanUp() # use two images with the same resolution and concatenate them self.configureStreams(range(2), [0, self.clip_duration]) self.startRender() self.cleanUp() # concatenate images with different resolutions self.configureStreams(range(3), [0, self.clip_duration, 2 * self.clip_duration]) self.startRender() self.cleanUp() # mix images with different resolutions by overlapping self.configureStreams(range(3), [0, self.clip_duration // 2, self.clip_duration]) self.startRender() self.cleanUp() # mix images and videos with the same resolution self.configureStreams([0, 1, 3], [0, self.clip_duration, 2 * self.clip_duration]) self.startRender() self.cleanUp() # mix images and videos with different resolutions self.configureStreams(range(4), [ 0, self.clip_duration, 2 * self.clip_duration, 3 * self.clip_duration ]) self.startRender() self.cleanUp() # mix images and videos with different resolutions by overlapping self.configureStreams(range(4), [ 0, self.clip_duration // 2, self.clip_duration, (3 * self.clip_duration) // 2 ]) self.startRender() self.cleanUp()
def testSaveTrackObject(self): video_stream = VideoStream(gst.Caps("video/x-raw-yuv")) audio_stream = AudioStream(gst.Caps("audio/x-raw-int")) source1 = FileSourceFactory("file1.ogg") source1.addOutputStream(video_stream) # these two calls are needed to populate the context for the -ref # elements self.formatter._saveSource(source1) self.formatter._saveStream(video_stream) track_object = SourceTrackObject(source1, video_stream, start=10 * gst.SECOND, duration=20 * gst.SECOND, in_point=5 * gst.SECOND, media_duration=15 * gst.SECOND, priority=10) track = Track(video_stream) track.addTrackObject(track_object) # create an interpolator and insert it into the track object fakevol = gst.element_factory_make("volume") prop = get_controllable_properties(fakevol)[1][1] volcurve = Interpolator(track_object, fakevol, prop) track_object.interpolators[prop.name] = (prop, volcurve) # add some points to the interpolator value = float(0) volcurve.start.setObjectTime(0) volcurve.start.value = 0 for t in xrange(3, 15, 3): value = int(t % 2) volcurve.newKeyframe(t * gst.SECOND, value) volcurve.end.setObjectTime(15 * gst.SECOND) volcurve.end.value = 15 % 2 element = self.formatter._saveTrackObject(track_object) self.failUnlessEqual(element.tag, "track-object") self.failUnlessEqual(element.attrib["type"], qual(track_object.__class__)) self.failUnlessEqual(element.attrib["start"], ts(10 * gst.SECOND)) self.failUnlessEqual(element.attrib["duration"], ts(20 * gst.SECOND)) self.failUnlessEqual(element.attrib["in_point"], ts(5 * gst.SECOND)) self.failUnlessEqual(element.attrib["media_duration"], ts(15 * gst.SECOND)) self.failUnlessEqual(element.attrib["priority"], "(int)10") self.failIfEqual(element.find("factory-ref"), None) self.failIfEqual(element.find("stream-ref"), None) # find the interpolation keyframes curves = element.find("curves") self.failIfEqual(curves, None) curve = curves.find("curve") self.failIfEqual(curve, None) self.failUnlessEqual(curve.attrib["property"], "volume") # compute a dictionary of keyframes saved_points = dict(((obj.attrib["time"], (obj.attrib["value"], obj.attrib["mode"])) for obj in curve.getiterator("keyframe"))) # compare this with the expected values expected = dict(((str(t * gst.SECOND), ("(gdouble)%s" % (t % 2), "2")) for t in xrange(3, 15, 3))) self.failUnlessEqual(expected, saved_points)