def newBlankProject(self): """ start up a new blank project """ # if there's a running project we must close it if self.current is not None and not self.closeRunningProject(): return False # we don't have an URI here, None means we're loading a new project self.emit("new-project-loading", None) project = Project(_("New Project")) self.emit("new-project-created", project) self.current = project # FIXME: this should not be hard-coded # add default tracks for a new project video = VideoStream(gst.Caps('video/x-raw-rgb; video/x-raw-yuv')) track = Track(video) project.timeline.addTrack(track) audio = AudioStream(gst.Caps('audio/x-raw-int; audio/x-raw-float')) track = Track(audio) project.timeline.addTrack(track) project.connect("project-changed", self._projectChangedCb) self.emit("new-project-loaded", self.current) return True
def testPads(self): timeline = Timeline() stream1 = VideoStream(gst.Caps('video/x-raw-rgb'), 'src0') stream2 = AudioStream(gst.Caps('audio/x-raw-int'), 'src1') track1 = Track(stream1) track2 = Track(stream2) timeline.addTrack(track1) timeline.addTrack(track2) factory = TimelineSourceFactory(timeline) bin = factory.makeBin() self.failUnlessEqual(len(list(bin.src_pads())), 0) pad1 = gst.Pad('src0', gst.PAD_SRC) pad1.set_caps(gst.Caps('asd')) pad1.set_active(True) track1.composition.add_pad(pad1) pad2 = gst.Pad('src0', gst.PAD_SRC) pad2.set_caps(gst.Caps('asd')) pad2.set_active(True) track2.composition.add_pad(pad2) self.failUnlessEqual(len(list(bin.src_pads())), 2) track1.composition.remove_pad(pad1) self.failUnlessEqual(len(list(bin.src_pads())), 1) track2.composition.remove_pad(pad2) self.failUnlessEqual(len(list(bin.src_pads())), 0) factory.clean()
def testTracks(self): timeline = Timeline() stream1 = VideoStream(gst.Caps('video/x-raw-rgb'), 'src0') stream2 = AudioStream(gst.Caps('audio/x-raw-int'), 'src1') track1 = Track(stream1) track2 = Track(stream2) # start with 2 tracks timeline.addTrack(track1) timeline.addTrack(track2) factory = TimelineSourceFactory(timeline) bin = factory.makeBin() self.failUnlessEqual(len(list(bin)), 2) self.failUnlessEqual(set(factory.getOutputStreams()), set([stream1, stream2])) # add a new track stream3 = AudioStream(gst.Caps('audio/x-raw-int'), 'src2') track3 = Track(stream3) timeline.addTrack(track3) self.failUnlessEqual(len(list(bin)), 3) self.failUnlessEqual(set(factory.getOutputStreams()), set([stream1, stream2, stream3])) # remove a track timeline.removeTrack(track3) self.failUnlessEqual(len(list(bin)), 2) self.failUnlessEqual(set(factory.getOutputStreams()), set([stream1, stream2])) factory.clean()
def setUp(self): TestCase.setUp(self) self.factory = StubFactory() self.stream = VideoStream(gst.Caps('video/x-raw-rgb')) self.factory.addOutputStream(self.stream) self.track1 = Track(self.stream) self.track2 = Track(self.stream)
def _fillTimeline(self): # audio and video track video = VideoStream(gst.Caps('video/x-raw-rgb; video/x-raw-yuv')) track = Track(video) self.project.timeline.addTrack(track) audio = AudioStream(gst.Caps('audio/x-raw-int; audio/x-raw-float')) track = Track(audio) self.project.timeline.addTrack(track) for uri in self._uris: factory = self.project.sources.getUri(uri) self.project.timeline.addSourceFactory(factory)
def testConnectionAndDisconnection(self): timeline = Timeline() stream = new_stream() factory = new_source_factory() track = Track(stream) track_object1 = SourceTrackObject(factory, stream) track.addTrackObject(track_object1) timeline.addTrack(track) timeline_object1 = TimelineObject(factory) timeline_object1.addTrackObject(track_object1) timeline.addTimelineObject(timeline_object1) self.observer.startObserving(timeline) self.failUnless(timeline.connected) self.failUnless(timeline_object1.connected) timeline.removeTimelineObject(timeline_object1) self.failIf(timeline_object1.connected) timeline.addTimelineObject(timeline_object1) self.failUnless(timeline_object1) self.observer.stopObserving(timeline) self.failIf(timeline.connected) self.failIf(timeline_object1.connected)
def testLoadTrackEffect(self): # create fake document tree element = Element("track-object",\ type="pitivi.timeline.track.TrackEffect", start=ts(1 * gst.SECOND), duration=ts(10 * gst.SECOND), in_point=ts(5 * gst.SECOND), media_duration=ts(15 * gst.SECOND), priority=ts(5), id="1") effect_elem = SubElement(element, "effect") factory_elem = SubElement(effect_elem, "factory", name="identity") properties_elem = SubElement(effect_elem, "gst-element-properties", sync="(bool)True") # insert our fake factory into the context stream = AudioStream(gst.Caps("audio/x-raw-int")) factory = EffectFactory('identity') factory.addInputStream(stream) factory.addOutputStream(stream) self.formatter.avalaible_effects._effect_factories_dict[ 'identity'] = factory track = Track(stream) track_object = self.formatter._loadTrackObject(track, element) self.failUnless(isinstance(track_object, TrackEffect)) self.failUnlessEqual(track_object.factory, factory) self.failUnlessEqual(track_object.stream, stream) self.failUnlessEqual(track_object.start, 1 * gst.SECOND) self.failUnlessEqual(track_object.duration, 10 * gst.SECOND) self.failUnlessEqual(track_object.in_point, 5 * gst.SECOND) self.failUnlessEqual(track_object.media_duration, 15 * gst.SECOND) self.failUnlessEqual(track_object.priority, 5)
def testSaveTimeline(self): video_stream = VideoStream(gst.Caps("video/x-raw-yuv")) audio_stream = AudioStream(gst.Caps("audio/x-raw-int")) source1 = VideoTestSourceFactory() source1.addOutputStream(video_stream) self.formatter._saveSource(source1) self.formatter._saveStream(video_stream) track_object = SourceTrackObject(source1, video_stream, start=10 * gst.SECOND, duration=20 * gst.SECOND, in_point=5 * gst.SECOND, media_duration=15 * gst.SECOND, priority=10) track = Track(video_stream) track.addTrackObject(track_object) self.formatter._saveTrackObject(track_object) timeline_object = TimelineObject(source1) timeline_object.addTrackObject(track_object) self.formatter._saveTimelineObject(timeline_object) timeline = Timeline() timeline.addTrack(track) element = self.formatter._saveTimeline(timeline) self.failUnlessEqual(element.tag, "timeline") tracks = element.find("tracks") self.failUnlessEqual(len(tracks), 1)
def testSavetimelineObjects(self): video_stream = VideoStream(gst.Caps("video/x-raw-yuv")) audio_stream = AudioStream(gst.Caps("audio/x-raw-int")) source1 = FileSourceFactory("file1.ogg") source1.addOutputStream(video_stream) # these two calls are needed to populate the context for the -ref # elements self.formatter._saveSource(source1) self.formatter._saveStream(video_stream) track_object = SourceTrackObject(source1, video_stream, start=10 * gst.SECOND, duration=20 * gst.SECOND, in_point=5 * gst.SECOND, media_duration=15 * gst.SECOND, priority=10) track = Track(video_stream) track.addTrackObject(track_object) self.formatter._saveTrackObject(track_object) timeline_object = TimelineObject(source1) timeline_object.addTrackObject(track_object) element = self.formatter._saveTimelineObjects([timeline_object]) self.failUnlessEqual(len(element), 1)
def testSaveTrack(self): video_stream = VideoStream(gst.Caps("video/x-raw-yuv")) audio_stream = AudioStream(gst.Caps("audio/x-raw-int")) source1 = VideoTestSourceFactory() # these two calls are needed to populate the context for the -ref # elements self.formatter._saveSource(source1) self.formatter._saveStream(video_stream) track_object = SourceTrackObject(source1, video_stream, start=10 * gst.SECOND, duration=20 * gst.SECOND, in_point=5 * gst.SECOND, media_duration=15 * gst.SECOND, priority=10) track = Track(video_stream) track.addTrackObject(track_object) element = self.formatter._saveTrack(track) self.failUnlessEqual(element.tag, "track") track_objects_element = element.find("track-objects") self.failUnlessEqual(len(track_objects_element), 1)
def setUp(self): self.stream = new_stream() self.factory = new_source_factory() self.effect_factory = TestEffectFactory(self.stream) self.track1 = Track(self.stream) self.track2 = Track(self.stream) self.timeline = Timeline() self.timeline.addTrack(self.track1) self.timeline.addTrack(self.track2) self.track_object1 = SourceTrackObject(self.factory, self.stream) self.track_object2 = SourceTrackObject(self.factory, self.stream) self.track_effect1 = TrackEffect(self.effect_factory, self.stream) self.track_effect2 = TrackEffect(self.effect_factory, self.stream) self.track1.addTrackObject(self.track_object1) self.track2.addTrackObject(self.track_object2) self.timeline_object1 = TimelineObject(self.factory) self.timeline_object1.addTrackObject(self.track_object1) self.timeline_object1.addTrackObject(self.track_object2) self.action_log = UndoableActionLog() self.observer = TestTimelineLogObserver(self.action_log) self.observer.startObserving(self.timeline)
def _loadTrack(self, element): self.debug("%r", element) stream_element = element.find("stream") stream = self._loadStream(stream_element) track = Track(stream) track_objects_element = element.find("track-objects") for track_object_element in track_objects_element: self._loadTrackObject(track, track_object_element) return track
def setUp(self): # create a pipeline self.pipeline = gst.Pipeline() self.track1 = Track(yuv("I420")) track_object1 = make_track_object(yuv("I420")) track_object2 = make_track_object(yuv("Y42B")) track_object3 = make_track_object(yuv("Y444")) track_object4 = make_track_object(rgb()) track_object5 = make_track_object(yuv("AYUV")) for i, track_object in enumerate( (track_object1, track_object2, track_object3, track_object4, track_object5)): self.track1.addTrackObject(track_object) # set priorities from 1 to 5 track_object.priority = i + 1 # track_object5 falls outside (0s, 15s) so it isn't linked to videomixer track_object5.start = 15 * gst.SECOND # make a fakesink for the pipeline and connect it as necessary with a callback composition = self.track1.composition fakesink = gst.element_factory_make('fakesink') def bin_pad_added_cb(composition, pad): pad.link(fakesink.get_pad('sink')) composition.connect("pad-added", bin_pad_added_cb) # add the composition and fakesink to the pipeline and set state to paused to preroll self.pipeline.add(composition) self.pipeline.add(fakesink) self.pipeline.set_state(gst.STATE_PAUSED) # wait for preroll to complete bus = self.pipeline.get_bus() msg = bus.timed_pop_filtered( gst.CLOCK_TIME_NONE, gst.MESSAGE_ASYNC_DONE | gst.MESSAGE_ERROR) if msg.type == gst.MESSAGE_ERROR: gerror, debug = msg.parse_error() print "\nError message: %s\nDebug info: %s" % (gerror, debug) self.failUnlessEqual(msg.type, gst.MESSAGE_ASYNC_DONE) self.svmbin = list(self.track1.mixer.elements())[0]
def testAudioOnly(self): audio_factory1 = AudioTestSourceFactory(3) audio_factory1.duration = 10 * gst.SECOND stream = AudioStream(gst.Caps('audio/x-raw-int'), 'src0') audio_factory1.addOutputStream(stream) timeline = Timeline() track = Track(stream) track_object1 = SourceTrackObject(audio_factory1, stream) track_object1.start = 2 * gst.SECOND track.addTrackObject(track_object1) timeline.addTrack(track) factory = TimelineSourceFactory(timeline) bin = factory.makeBin() self.failUnlessEqual(len(list(bin)), 1) self.failUnlessEqual(factory.duration, 12 * gst.SECOND) fakesink = gst.element_factory_make('fakesink') def bin_pad_added_cb(bin, pad): pad.link(fakesink.get_pad('sink')) bin.connect('pad-added', bin_pad_added_cb) def error_cb(bus, message): gerror, debug = message.parse_error() self.fail('%s: %s' % (gerror.message, debug)) def eos_cb(bus, message): self.loop.quit() pipeline = gst.Pipeline() bus = pipeline.get_bus() bus.add_signal_watch() bus.connect('message::error', error_cb) bus.connect('message::eos', eos_cb) pipeline.add(bin) pipeline.add(fakesink) pipeline.set_state(gst.STATE_PLAYING) self.loop.run() pipeline.set_state(gst.STATE_NULL) factory.clean()
def testLoadProject(self): video_stream = VideoStream(gst.Caps("video/x-raw-yuv")) audio_stream = AudioStream(gst.Caps("audio/x-raw-int")) source1 = VideoTestSourceFactory() self.formatter._saveSource(source1) self.formatter._saveStream(video_stream) track_object = SourceTrackObject(source1, video_stream, start=10 * gst.SECOND, duration=20 * gst.SECOND, in_point=5 * gst.SECOND, media_duration=15 * gst.SECOND, priority=10) track = Track(video_stream) track.addTrackObject(track_object) self.formatter._saveTrackObject(track_object) timeline_object = TimelineObject(source1) timeline_object.addTrackObject(track_object) self.formatter._saveTimelineObject(timeline_object) timeline = Timeline() timeline.addTrack(track) self.formatter._saveTimeline(timeline) project = Project() project.timeline = timeline project.sources.addFactory(source1) element = self.formatter._serializeProject(project) self.failUnlessEqual(element.tag, "pitivi") self.failIfEqual(element.find("factories"), None) self.failIfEqual(element.find("timeline"), None) indent(element) f = file("/tmp/untitled.pptv", "w") f.write(tostring(element)) f.close()
def testSaveTrackEffect(self): video_stream = VideoStream(gst.Caps("video/x-raw-yuv")) audio_stream = AudioStream(gst.Caps("audio/x-raw-int")) effect1 = EffectFactory('identity', 'identity') effect1.addOutputStream(video_stream) effect1.addInputStream(video_stream) #It is necessary to had the identity factory to the #effect_factories_dictionnary self.formatter.avalaible_effects._effect_factories_dict['identity'] =\ effect1 track_effect = TrackEffect(effect1, video_stream, start=10 * gst.SECOND, duration=20 * gst.SECOND, in_point=5 * gst.SECOND, media_duration=15 * gst.SECOND, priority=10) track = Track(video_stream) track.addTrackObject(track_effect) element = self.formatter._saveTrackObject(track_effect) self.failUnlessEqual(element.tag, "track-object") self.failUnlessEqual(element.attrib["type"], qual(track_effect.__class__)) self.failUnlessEqual(element.attrib["start"], ts(10 * gst.SECOND)) self.failUnlessEqual(element.attrib["duration"], ts(20 * gst.SECOND)) self.failUnlessEqual(element.attrib["in_point"], ts(5 * gst.SECOND)) self.failUnlessEqual(element.attrib["media_duration"], ts(15 * gst.SECOND)) self.failUnlessEqual(element.attrib["priority"], "(int)10") effect_element = element.find('effect') self.failIfEqual(effect_element, None) self.failIfEqual(effect_element.find("factory"), None) self.failIfEqual(effect_element.find("gst-element-properties"), None)
def testSplitObjectKeyframes(self): DURATION = 10 * gst.SECOND factory = AudioTestSourceFactory() factory.duration = DURATION stream_ = AudioStream(gst.Caps("audio/x-raw-int")) obj = SourceTrackObject(factory, stream_) track = Track(stream_) track.addTrackObject(obj) obj.start = 3 * gst.SECOND obj.duration = DURATION # create a three keyframes at: 3, 6 and 9 seconds interpolator = obj.getInterpolator("volume") keyframes = dict(((t * gst.SECOND, (t % 2, gst.INTERPOLATE_LINEAR)) for t in xrange(3, 10, 3))) expected = [] expected2 = [] for time, (value, mode) in keyframes.iteritems(): kf = interpolator.newKeyframe(time, value, mode) if time < (5 * gst.SECOND): expected.append(kf) else: expected2.append(kf) def getKeyframes(obj): keyframes = obj.getInterpolator("volume").getInteriorKeyframes() return list(keyframes) obj2 = obj.splitObject(8 * gst.SECOND) keyframes = getKeyframes(obj) keyframes2 = getKeyframes(obj2) self.failUnlessEqual(keyframes, expected) self.failUnlessEqual(keyframes2, expected2)
def setUp(self): self.factory = StubFactory() self.stream = AudioStream(gst.Caps('audio/x-raw-int')) self.factory.addOutputStream(self.stream) self.track1 = Track(self.stream) self.timeline = Timeline()
def testSplitObject(self): DURATION = 10 * gst.SECOND factory = AudioTestSourceFactory() factory.duration = DURATION stream_ = AudioStream(gst.Caps("audio/x-raw-int")) obj = SourceTrackObject(factory, stream_) track = Track(stream_) track.addTrackObject(obj) obj.start = 3 * gst.SECOND obj.duration = DURATION # create a zig-zag volume curve interpolator = obj.getInterpolator("volume") expected = dict(((t * gst.SECOND, (t % 2, gst.INTERPOLATE_LINEAR)) for t in xrange(3, 10, 3))) for time, (value, mode) in expected.iteritems(): interpolator.newKeyframe(time, value, mode) def getKeyframes(obj): keyframes = obj.getInterpolator("volume").getInteriorKeyframes() return dict(((kf.time, (kf.value, kf.mode)) for kf in keyframes)) monitor = TrackSignalMonitor(obj) self.failUnlessRaises(TrackError, obj.splitObject, 2 * gst.SECOND) self.failUnlessRaises(TrackError, obj.splitObject, 14 * gst.SECOND) # should these be possible (ie create empty objects) ? self.failUnlessRaises(TrackError, obj.splitObject, 3 * gst.SECOND) self.failUnlessRaises(TrackError, obj.splitObject, 13 * gst.SECOND) # splitObject at 4s should result in: # obj (start 3, end 4) other1 (start 4, end 13) other1 = obj.splitObject(4 * gst.SECOND) self.failUnlessEqual(expected, getKeyframes(other1)) self.failUnlessEqual(obj.start, 3 * gst.SECOND) self.failUnlessEqual(obj.in_point, 0 * gst.SECOND) self.failUnlessEqual(obj.duration, 1 * gst.SECOND) self.failUnlessEqual(obj.rate, 1) self.failUnlessEqual(other1.start, 4 * gst.SECOND) self.failUnlessEqual(other1.in_point, 1 * gst.SECOND) self.failUnlessEqual(other1.duration, 9 * gst.SECOND) self.failUnlessEqual(other1.rate, 1) self.failUnlessEqual(monitor.start_changed_count, 0) self.failUnlessEqual(monitor.duration_changed_count, 1) # move other1 back to start = 1 other1.start = 1 * gst.SECOND # splitObject again other1 monitor = TrackSignalMonitor(other1) other2 = other1.splitObject(6 * gst.SECOND) self.failUnlessEqual(other1.start, 1 * gst.SECOND) self.failUnlessEqual(other1.in_point, 1 * gst.SECOND) self.failUnlessEqual(other1.duration, 5 * gst.SECOND) self.failUnlessEqual(other1.rate, 1) self.failUnlessEqual(other2.start, 6 * gst.SECOND) self.failUnlessEqual(other2.in_point, 6 * gst.SECOND) self.failUnlessEqual(other2.duration, 4 * gst.SECOND) self.failUnlessEqual(other2.rate, 1) self.failUnlessEqual(monitor.start_changed_count, 0) self.failUnlessEqual(monitor.duration_changed_count, 1)
def testLoadTrackSource(self): # create fake document tree element = Element("track-object", type="pitivi.timeline.track.SourceTrackObject", start=ts(1 * gst.SECOND), duration=ts(10 * gst.SECOND), in_point=ts(5 * gst.SECOND), media_duration=ts(15 * gst.SECOND), priority=ts(5), id="1") factory_ref = SubElement(element, "factory-ref", id="1") stream_ref = SubElement(element, "stream-ref", id="1") # insert our fake factory into the context factory = AudioTestSourceFactory() factory.duration = 10 * gst.SECOND self.formatter._context.factories["1"] = factory # insert fake stream into the context stream = AudioStream(gst.Caps("audio/x-raw-int")) self.formatter._context.streams["1"] = stream # add a volume curve curves = SubElement(element, "curves") curve = SubElement(curves, "curve", property="volume", version="1") expected = dict( (long(t * gst.SECOND), (float(t % 2), gst.INTERPOLATE_LINEAR)) for t in xrange(1, 10)) start = SubElement(curve, "start", value="0.0", mode="2") for time, (value, mode) in expected.iteritems(): SubElement(curve, "keyframe", time=str(time), value=str(value), mode=str(mode)) end = SubElement(curve, "end", value=str(10 % 2), mode="2") track = Track(stream) # point gun at foot; pull trigger track_object = self.formatter._loadTrackObject(track, element) self.failUnless(isinstance(track_object, SourceTrackObject)) self.failUnlessEqual(track_object.factory, factory) self.failUnlessEqual(track_object.stream, stream) self.failUnlessEqual(track_object.start, 1 * gst.SECOND) self.failUnlessEqual(track_object.duration, 10 * gst.SECOND) self.failUnlessEqual(track_object.in_point, 5 * gst.SECOND) self.failUnlessEqual(track_object.media_duration, 15 * gst.SECOND) self.failUnlessEqual(track_object.priority, 5) self.failIfEqual(track_object.interpolators, None) interpolator = track_object.getInterpolator("volume") self.failIfEqual(interpolator, None) curve = dict(((kf.time, (kf.value, kf.mode)) for kf in interpolator.getInteriorKeyframes())) self.failUnlessEqual(curve, expected) self.failUnlessEqual(interpolator.start.value, 0.0) self.failUnlessEqual(interpolator.start.time, 5 * gst.SECOND) self.failUnlessEqual(interpolator.end.value, 0.0) self.failUnlessEqual(interpolator.end.time, 15 * gst.SECOND)
def testLoadInterpolatorV0(self): # create fake document tree element = Element("track-object", type="pitivi.timeline.track.SourceTrackObject", start=ts(1 * gst.SECOND), duration=ts(15 * gst.SECOND), in_point=ts(5 * gst.SECOND), media_duration=ts(15 * gst.SECOND), priority=ts(5), id="1") factory_ref = SubElement(element, "factory-ref", id="1") stream_ref = SubElement(element, "stream-ref", id="1") # insert our fake factory into the context factory = AudioTestSourceFactory() factory.duration = 20 * gst.SECOND self.formatter._context.factories["1"] = factory # insert fake stream into the context stream = AudioStream(gst.Caps("audio/x-raw-int")) self.formatter._context.streams["1"] = stream # add a volume curve curves = SubElement(element, "curves") curve = SubElement(curves, "curve", property="volume") expected = dict( (long(t * gst.SECOND), (float(t % 2), gst.INTERPOLATE_LINEAR)) for t in xrange(6, 15)) start = SubElement(curve, "start", value="1.0", mode="2") for time, (value, mode) in expected.iteritems(): SubElement(curve, "keyframe", time=str(time), value=str(value), mode=str(mode)) end = SubElement(curve, "end", value="1.0", mode="2") track = Track(stream) # point gun at foot; pull trigger track_object = self.formatter._loadTrackObject(track, element) self.failIfEqual(track_object.interpolators, None) interpolator = track_object.getInterpolator("volume") self.failIfEqual(interpolator, None) curve = dict(((kf.time, (kf.value, kf.mode)) for kf in interpolator.getInteriorKeyframes())) self.failUnlessEqual(curve, expected) # check that start keyframe value has been properly adjusted so as not # to change the shape of the curve. rounding is applied here because # the controller seems to round to a different precision # than python. we just want to check that the value is "in the # ballpark", indicating the keyframes have been updated. If you do the # math you'll see that both start and end keyframe values should be # about 1/6 self.failUnlessEqual(round(interpolator.start.value, 6), round((-5.0 / 6) + 1, 6)) self.failUnlessEqual(interpolator.start.time, 5 * gst.SECOND) # check that end keyrame value has been properly adjusted so as not to # change the shape of the curve self.failUnlessEqual(interpolator.end.time, 15 * gst.SECOND) self.failUnlessEqual(round(interpolator.end.value, 6), round((15.0 / 6) - (7.0 / 3), 6))
def testSaveTrackSource(self): video_stream = VideoStream(gst.Caps("video/x-raw-yuv")) audio_stream = AudioStream(gst.Caps("audio/x-raw-int")) source1 = FileSourceFactory("file1.ogg") source1.addOutputStream(video_stream) # these two calls are needed to populate the context for the -ref # elements self.formatter._saveSource(source1) self.formatter._saveStream(video_stream) track_source = SourceTrackObject(source1, video_stream, start=10 * gst.SECOND, duration=20 * gst.SECOND, in_point=5 * gst.SECOND, media_duration=15 * gst.SECOND, priority=10) track = Track(video_stream) track.addTrackObject(track_source) # create an interpolator and insert it into the track object fakevol = gst.element_factory_make("volume") prop = get_controllable_properties(fakevol)[1][1] volcurve = Interpolator(track_source, fakevol, prop) track_source.interpolators[prop.name] = (prop, volcurve) # add some points to the interpolator value = float(0) volcurve.start.setObjectTime(0) volcurve.start.value = 0 for t in xrange(3, 15, 3): value = int(t % 2) volcurve.newKeyframe(t * gst.SECOND, value) volcurve.end.setObjectTime(15 * gst.SECOND) volcurve.end.value = 15 % 2 element = self.formatter._saveTrackObject(track_source) self.failUnlessEqual(element.tag, "track-object") self.failUnlessEqual(element.attrib["type"], qual(track_source.__class__)) self.failUnlessEqual(element.attrib["start"], ts(10 * gst.SECOND)) self.failUnlessEqual(element.attrib["duration"], ts(20 * gst.SECOND)) self.failUnlessEqual(element.attrib["in_point"], ts(5 * gst.SECOND)) self.failUnlessEqual(element.attrib["media_duration"], ts(15 * gst.SECOND)) self.failUnlessEqual(element.attrib["priority"], "(int)10") self.failIfEqual(element.find("factory-ref"), None) self.failIfEqual(element.find("stream-ref"), None) # find the interpolation keyframes curves = element.find("curves") self.failIfEqual(curves, None) curve = curves.find("curve") self.failIfEqual(curve, None) self.failUnlessEqual(curve.attrib["property"], "volume") # compute a dictionary of keyframes saved_points = dict( ((obj.attrib["time"], (obj.attrib["value"], obj.attrib["mode"])) for obj in curve.getiterator("keyframe"))) # compare this with the expected values expected = dict(((str(t * gst.SECOND), ("(gdouble)%s" % (t % 2), "2")) for t in xrange(3, 15, 3))) self.failUnlessEqual(expected, saved_points)
def setUp(self): self.mainloop = gobject.MainLoop() samples = os.path.join(os.path.dirname(__file__), "samples") self.facs = [] self.facs.append([ PictureFileSourceFactory( 'file://' + os.path.join(samples, "flat_colour1_640x480.png")), VideoStream( gst.Caps( "video/x-raw-rgb,bpp=(int)24,depth=(int)24,endianness=(int)4321,red_mask=(int)16711680,green_mask=(int)65280,blue_mask=(int)255" )) ]) self.facs.append([ PictureFileSourceFactory( 'file://' + os.path.join(samples, "flat_colour2_640x480.png")), VideoStream( gst.Caps( "video/x-raw-rgb,bpp=(int)24,depth=(int)24,endianness=(int)4321,red_mask=(int)16711680,green_mask=(int)65280,blue_mask=(int)255" )) ]) self.facs.append([ PictureFileSourceFactory( 'file://' + os.path.join(samples, "flat_colour3_320x180.png")), VideoStream( gst.Caps( "video/x-raw-rgb,bpp=(int)24,depth=(int)24,endianness=(int)4321,red_mask=(int)16711680,green_mask=(int)65280,blue_mask=(int)255" )) ]) # one video with a different resolution self.facs.append([ VideoTestSourceFactory(), VideoStream( gst.Caps( 'video/x-raw-yuv,width=(int)640,height=(int)480,format=(fourcc)I420' )) ]) # configure durations and add output streams to factories for fac in self.facs: factory = fac[0] stream = fac[1] factory.duration = self.clip_duration factory.addOutputStream(stream) self.track_objects = [] self.track = Track(self.facs[0][1]) self.timeline = Timeline() self.timeline.addTrack(self.track) vsettings = StreamEncodeSettings(encoder="theoraenc") rsettings = RenderSettings(settings=[vsettings], muxer="oggmux") self.fakesink = common.FakeSinkFactory() rendersink = RenderSinkFactory(RenderFactory(settings=rsettings), self.fakesink) self.render = RenderAction() self.pipeline = Pipeline() self.pipeline.connect("eos", self._renderEOSCb) self.pipeline.connect("error", self._renderErrorCb) self.pipeline.addAction(self.render) self.render.addConsumers(rendersink) timeline_factory = TimelineSourceFactory(self.timeline) self.render.addProducers(timeline_factory)