def testAddRemoveObjects(self): factory = self.factory stream = self.stream track1 = self.track1 track2 = self.track2 # add an object obj1 = SourceTrackObject(factory, stream) self.failUnlessEqual(obj1.track, None) track1.addTrackObject(obj1) self.failIfEqual(obj1.track, None) # can't add twice self.failUnlessRaises(TrackError, track1.addTrackObject, obj1) # can't add to two different tracks self.failUnlessRaises(TrackError, track2.addTrackObject, obj1) # add a second object obj2 = SourceTrackObject(factory, stream) self.failUnlessEqual(obj2.track, None) track1.addTrackObject(obj2) self.failIfEqual(obj2.track, None) # remove track1.removeTrackObject(obj1) self.failUnlessEqual(obj1.track, None) # can't remove twice self.failUnlessRaises(TrackError, track1.removeTrackObject, obj1) track1.removeTrackObject(obj2) self.failUnlessEqual(obj2.track, None)
def testCopyMakeBinNotCalled(self): factory = self.factory stream = self.stream obj1 = SourceTrackObject(factory, stream) # this used to raise an exception obj2 = obj1.copy() self.failUnlessEqual(obj1.start, obj2.start)
def setUp(self): TestCase.setUp(self) stream = AudioStream(gst.Caps("audio/x-raw-int")) self.factory = StubFactory() gst.debug("%r" % self.factory.duration) self.factory.addOutputStream(stream) self.track_object = SourceTrackObject(self.factory, stream) self.monitor = TrackSignalMonitor(self.track_object)
def configureStreams(self, inputs, offsets): count = 0 for i in inputs: factory = self.facs[i][0] stream = self.facs[i][1] track_object = SourceTrackObject(factory, stream) self.track_objects.append(track_object) track_object.start = offsets[count] self.track.addTrackObject(track_object) count += 1
def testConnectionAndDisconnection(self): timeline = Timeline() stream = new_stream() factory = new_source_factory() track = Track(stream) track_object1 = SourceTrackObject(factory, stream) track.addTrackObject(track_object1) timeline.addTrack(track) timeline_object1 = TimelineObject(factory) timeline_object1.addTrackObject(track_object1) timeline.addTimelineObject(timeline_object1) self.observer.startObserving(timeline) self.failUnless(timeline.connected) self.failUnless(timeline_object1.connected) timeline.removeTimelineObject(timeline_object1) self.failIf(timeline_object1.connected) timeline.addTimelineObject(timeline_object1) self.failUnless(timeline_object1) self.observer.stopObserving(timeline) self.failIf(timeline.connected) self.failIf(timeline_object1.connected)
def testSaveTimeline(self): video_stream = VideoStream(gst.Caps("video/x-raw-yuv")) audio_stream = AudioStream(gst.Caps("audio/x-raw-int")) source1 = VideoTestSourceFactory() source1.addOutputStream(video_stream) self.formatter._saveSource(source1) self.formatter._saveStream(video_stream) track_object = SourceTrackObject(source1, video_stream, start=10 * gst.SECOND, duration=20 * gst.SECOND, in_point=5 * gst.SECOND, media_duration=15 * gst.SECOND, priority=10) track = Track(video_stream) track.addTrackObject(track_object) self.formatter._saveTrackObject(track_object) timeline_object = TimelineObject(source1) timeline_object.addTrackObject(track_object) self.formatter._saveTimelineObject(timeline_object) timeline = Timeline() timeline.addTrack(track) element = self.formatter._saveTimeline(timeline) self.failUnlessEqual(element.tag, "timeline") tracks = element.find("tracks") self.failUnlessEqual(len(tracks), 1)
def testSavetimelineObjects(self): video_stream = VideoStream(gst.Caps("video/x-raw-yuv")) audio_stream = AudioStream(gst.Caps("audio/x-raw-int")) source1 = FileSourceFactory("file1.ogg") source1.addOutputStream(video_stream) # these two calls are needed to populate the context for the -ref # elements self.formatter._saveSource(source1) self.formatter._saveStream(video_stream) track_object = SourceTrackObject(source1, video_stream, start=10 * gst.SECOND, duration=20 * gst.SECOND, in_point=5 * gst.SECOND, media_duration=15 * gst.SECOND, priority=10) track = Track(video_stream) track.addTrackObject(track_object) self.formatter._saveTrackObject(track_object) timeline_object = TimelineObject(source1) timeline_object.addTrackObject(track_object) element = self.formatter._saveTimelineObjects([timeline_object]) self.failUnlessEqual(len(element), 1)
def testSaveTrack(self): video_stream = VideoStream(gst.Caps("video/x-raw-yuv")) audio_stream = AudioStream(gst.Caps("audio/x-raw-int")) source1 = VideoTestSourceFactory() # these two calls are needed to populate the context for the -ref # elements self.formatter._saveSource(source1) self.formatter._saveStream(video_stream) track_object = SourceTrackObject(source1, video_stream, start=10 * gst.SECOND, duration=20 * gst.SECOND, in_point=5 * gst.SECOND, media_duration=15 * gst.SECOND, priority=10) track = Track(video_stream) track.addTrackObject(track_object) element = self.formatter._saveTrack(track) self.failUnlessEqual(element.tag, "track") track_objects_element = element.find("track-objects") self.failUnlessEqual(len(track_objects_element), 1)
def testAudioOnly(self): audio_factory1 = AudioTestSourceFactory(3) audio_factory1.duration = 10 * gst.SECOND stream = AudioStream(gst.Caps('audio/x-raw-int'), 'src0') audio_factory1.addOutputStream(stream) timeline = Timeline() track = Track(stream) track_object1 = SourceTrackObject(audio_factory1, stream) track_object1.start = 2 * gst.SECOND track.addTrackObject(track_object1) timeline.addTrack(track) factory = TimelineSourceFactory(timeline) bin = factory.makeBin() self.failUnlessEqual(len(list(bin)), 1) self.failUnlessEqual(factory.duration, 12 * gst.SECOND) fakesink = gst.element_factory_make('fakesink') def bin_pad_added_cb(bin, pad): pad.link(fakesink.get_pad('sink')) bin.connect('pad-added', bin_pad_added_cb) def error_cb(bus, message): gerror, debug = message.parse_error() self.fail('%s: %s' % (gerror.message, debug)) def eos_cb(bus, message): self.loop.quit() pipeline = gst.Pipeline() bus = pipeline.get_bus() bus.add_signal_watch() bus.connect('message::error', error_cb) bus.connect('message::eos', eos_cb) pipeline.add(bin) pipeline.add(fakesink) pipeline.set_state(gst.STATE_PLAYING) self.loop.run() pipeline.set_state(gst.STATE_NULL) factory.clean()
def makeTimelineObject(self): track_object = SourceTrackObject(self.factory, self.stream) self.track1.addTrackObject(track_object) timeline_object = TimelineObject(self.factory) timeline_object.addTrackObject(track_object) self.timeline.addTimelineObject(timeline_object) return timeline_object
def testLoadTimeline(self): # we need a project for this to work self.formatter.project = Project() # create fake document tree timeline_element = Element("timeline") tracks_element = SubElement(timeline_element, "tracks") track_element = SubElement(tracks_element, "track") stream_element = SubElement(track_element, "stream", id="1", type="pitivi.stream.VideoStream", caps="video/x-raw-rgb") track_objects_element = SubElement(track_element, "track-objects") track_object = SubElement( track_objects_element, "track-object", type="pitivi.timeline.track.SourceTrackObject", start=ts(1 * gst.SECOND), duration=ts(10 * gst.SECOND), in_point=ts(5 * gst.SECOND), media_duration=ts(15 * gst.SECOND), priority=ts(5), id="1") factory_ref = SubElement(track_object, "factory-ref", id="1") stream_ref = SubElement(track_object, "stream-ref", id="1") timeline_objects_element = SubElement(timeline_element, "timeline-objects") timeline_object_element = \ SubElement(timeline_objects_element, "timeline-object") factory_ref = SubElement(timeline_object_element, "factory-ref", id="1") stream_ref = SubElement(timeline_object_element, "stream-ref", id="1") track_object_refs = SubElement(timeline_object_element, "track-object-refs") track_object_ref = SubElement(track_object_refs, "track-object-ref", id="1") # insert fake streams and factories into context factory = VideoTestSourceFactory() self.formatter._context.factories["1"] = factory stream = VideoStream(gst.Caps("video/x-raw-rgb")) self.formatter._context.streams["1"] = stream video_stream = VideoStream(gst.Caps("video/x-raw-yuv")) source1 = VideoTestSourceFactory() self.formatter._context.factories["2"] = source1 self.formatter._context.track_objects["1"] = SourceTrackObject( source1, video_stream) # point gun at foot; pull trigger self.formatter._loadTimeline(timeline_element) self.failUnlessEqual(len(self.formatter.project.timeline.tracks), 1)
def testGetTrackObjectsGroupedByLayer(self): factory = self.factory stream = self.stream track1 = self.track1 test_data = [ ("a", 0, 10, 0), ("b", 5, 15, 0), ("c", 20, 25, 0), ("d", 30, 35, 0), ("e", 30, 35, 2), ("f", 35, 45, 0), ("g", 40, 50, 0), ("h", 50, 60, 0), ("i", 55, 65, 1), ("j", 57, 60, 2), ("k", 62, 70, 3), ("l", 63, 67, 0), ] expected = [ ["a", "b", "c", "d", "f", "g", "h", "l"], ["i"], ["e", "j"], ["k"] ] objs = {} for name, start, end, priority in test_data: obj = SourceTrackObject(factory, stream) obj.start = start * gst.SECOND obj.duration = end * gst.SECOND - obj.start obj.priority = priority track1.addTrackObject(obj) objs[obj] = name result = [[objs[obj] for obj in layer] for layer in track1.getTrackObjectsGroupedByLayer()] self.failUnlessEqual(result, expected)
def addClip(name, start, end): obj = SourceTrackObject(factory, stream) obj.start = start * gst.SECOND obj.in_point = 0 obj.duration = end * gst.SECOND - obj.start obj.media_duration = obj.duration obj.name = name names[obj] = name objs[name] = obj track1.addTrackObject(obj)
def setUp(self): self.stream = new_stream() self.factory = new_source_factory() self.effect_factory = TestEffectFactory(self.stream) self.track1 = Track(self.stream) self.track2 = Track(self.stream) self.timeline = Timeline() self.timeline.addTrack(self.track1) self.timeline.addTrack(self.track2) self.track_object1 = SourceTrackObject(self.factory, self.stream) self.track_object2 = SourceTrackObject(self.factory, self.stream) self.track_effect1 = TrackEffect(self.effect_factory, self.stream) self.track_effect2 = TrackEffect(self.effect_factory, self.stream) self.track1.addTrackObject(self.track_object1) self.track2.addTrackObject(self.track_object2) self.timeline_object1 = TimelineObject(self.factory) self.timeline_object1.addTrackObject(self.track_object1) self.timeline_object1.addTrackObject(self.track_object2) self.action_log = UndoableActionLog() self.observer = TestTimelineLogObserver(self.action_log) self.observer.startObserving(self.timeline)
def testGetTrackObjectsGroupedByLayer(self): factory = self.factory stream = self.stream track1 = self.track1 test_data = [ ("a", 0, 10, 0), ("b", 5, 15, 0), ("c", 20, 25, 0), ("d", 30, 35, 0), ("e", 30, 35, 2), ("f", 35, 45, 0), ("g", 40, 50, 0), ("h", 50, 60, 0), ("i", 55, 65, 1), ("j", 57, 60, 2), ("k", 62, 70, 3), ("l", 63, 67, 0), ] expected = [["a", "b", "c", "d", "f", "g", "h", "l"], ["i"], ["e", "j"], ["k"]] objs = {} for name, start, end, priority in test_data: obj = SourceTrackObject(factory, stream) obj.start = start * gst.SECOND obj.duration = end * gst.SECOND - obj.start obj.priority = priority track1.addTrackObject(obj) objs[obj] = name result = [[objs[obj] for obj in layer] for layer in track1.getTrackObjectsGroupedByLayer()] self.failUnlessEqual(result, expected)
def testLoadProject(self): video_stream = VideoStream(gst.Caps("video/x-raw-yuv")) audio_stream = AudioStream(gst.Caps("audio/x-raw-int")) source1 = VideoTestSourceFactory() self.formatter._saveSource(source1) self.formatter._saveStream(video_stream) track_object = SourceTrackObject(source1, video_stream, start=10 * gst.SECOND, duration=20 * gst.SECOND, in_point=5 * gst.SECOND, media_duration=15 * gst.SECOND, priority=10) track = Track(video_stream) track.addTrackObject(track_object) self.formatter._saveTrackObject(track_object) timeline_object = TimelineObject(source1) timeline_object.addTrackObject(track_object) self.formatter._saveTimelineObject(timeline_object) timeline = Timeline() timeline.addTrack(track) self.formatter._saveTimeline(timeline) project = Project() project.timeline = timeline project.sources.addFactory(source1) element = self.formatter._serializeProject(project) self.failUnlessEqual(element.tag, "pitivi") self.failIfEqual(element.find("factories"), None) self.failIfEqual(element.find("timeline"), None) indent(element) f = file("/tmp/untitled.pptv", "w") f.write(tostring(element)) f.close()
def testRemoveAllTrackObjects(self): track = self.track1 factory = self.factory # check that can be called on an empty track track.removeAllTrackObjects() objs = [] for i in xrange(10): obj = SourceTrackObject(factory, self.stream) objs.append(obj) track.addTrackObject(obj) for obj in objs: self.failIfEqual(obj.track, None) track.removeAllTrackObjects() for obj in objs: self.failUnlessEqual(obj.track, None)
def testLoadTimelineObject(self): video_stream = VideoStream(gst.Caps("video/x-raw-yuv")) source1 = VideoTestSourceFactory() self.formatter._context.factories["1"] = source1 self.formatter._context.track_objects["1"] = SourceTrackObject( source1, video_stream) element = Element("timeline-object") factory_ref = SubElement(element, "factory-ref", id="1") stream_ref = SubElement(element, "stream-ref", id="1") track_object_refs = SubElement(element, "track-object-refs") track_object_ref = SubElement(track_object_refs, "track-object-ref", id="1") timeline_object = \ self.formatter._loadTimelineObject(element) self.failUnlessEqual(timeline_object.factory, source1) self.failUnlessEqual(len(timeline_object.track_objects), 1)
def testSplitObjectKeyframes(self): DURATION = 10 * gst.SECOND factory = AudioTestSourceFactory() factory.duration = DURATION stream_ = AudioStream(gst.Caps("audio/x-raw-int")) obj = SourceTrackObject(factory, stream_) track = Track(stream_) track.addTrackObject(obj) obj.start = 3 * gst.SECOND obj.duration = DURATION # create a three keyframes at: 3, 6 and 9 seconds interpolator = obj.getInterpolator("volume") keyframes = dict(((t * gst.SECOND, (t % 2, gst.INTERPOLATE_LINEAR)) for t in xrange(3, 10, 3))) expected = [] expected2 = [] for time, (value, mode) in keyframes.iteritems(): kf = interpolator.newKeyframe(time, value, mode) if time < (5 * gst.SECOND): expected.append(kf) else: expected2.append(kf) def getKeyframes(obj): keyframes = obj.getInterpolator("volume").getInteriorKeyframes() return list(keyframes) obj2 = obj.splitObject(8 * gst.SECOND) keyframes = getKeyframes(obj) keyframes2 = getKeyframes(obj2) self.failUnlessEqual(keyframes, expected) self.failUnlessEqual(keyframes2, expected2)
class TestTrackObject(TestCase): def setUp(self): TestCase.setUp(self) stream = AudioStream(gst.Caps("audio/x-raw-int")) self.factory = StubFactory() gst.debug("%r" % self.factory.duration) self.factory.addOutputStream(stream) self.track_object = SourceTrackObject(self.factory, stream) self.monitor = TrackSignalMonitor(self.track_object) def tearDown(self): self.monitor = None self.track_object.release() self.track_oject = None self.factory = None TestCase.tearDown(self) def testDefaultProperties(self): obj = self.track_object self.failUnlessEqual(obj.start, 0) self.failUnlessEqual(obj.duration, self.factory.duration) self.failUnlessEqual(obj.in_point, 0) self.failUnlessEqual(obj.out_point, self.factory.duration) self.failUnlessEqual(obj.media_duration, self.factory.duration) self.failUnlessEqual(obj.rate, 1) self.failUnlessEqual(obj.priority, 0) gnl_object = obj.gnl_object self.failUnlessEqual(gnl_object.props.start, 0) self.failUnlessEqual(gnl_object.props.duration, self.factory.duration) self.failUnlessEqual(gnl_object.props.media_start, 0) self.failUnlessEqual(gnl_object.props.media_stop, self.factory.duration) self.failUnlessEqual(gnl_object.props.media_duration, self.factory.duration) self.failUnlessEqual(gnl_object.props.rate, 1) self.failUnlessEqual(obj.priority, 0) def testChangePropertiesFromTrackObject(self): obj = self.track_object gnl_object = obj.gnl_object start = 1 * gst.SECOND obj.start = start self.failUnlessEqual(obj.start, start) self.failUnlessEqual(gnl_object.props.start, start) self.failUnlessEqual(self.monitor.start_changed_count, 1) duration = 10 * gst.SECOND obj.duration = duration self.failUnlessEqual(obj.duration, duration) self.failUnlessEqual(gnl_object.props.duration, duration) self.failUnlessEqual(self.monitor.duration_changed_count, 1) in_point = 5 * gst.SECOND obj.in_point = in_point self.failUnlessEqual(obj.in_point, in_point) self.failUnlessEqual(gnl_object.props.media_start, in_point) self.failUnlessEqual(self.monitor.in_point_changed_count, 1) media_duration = 5 * gst.SECOND obj.media_duration = media_duration self.failUnlessEqual(obj.media_duration, media_duration) self.failUnlessEqual(gnl_object.props.media_duration, media_duration) self.failUnlessEqual(obj.out_point, in_point + media_duration) self.failUnlessEqual(gnl_object.props.media_stop, in_point + media_duration) self.failUnlessEqual(self.monitor.media_duration_changed_count, 1) self.failUnlessEqual(self.monitor.out_point_changed_count, 1) # test video stream$ obj.stream_type = VideoStream priority = 100 gnl_priority = 3 * 100 + 3 + obj._stagger obj.priority = priority self.failUnlessEqual(obj.priority, priority) self.failUnlessEqual(gnl_object.props.priority, gnl_priority) self.failUnlessEqual(self.monitor.priority_changed_count, 1) # test audio stream obj.stream_type = AudioStream priority = 55 gnl_priority = 4 * 55 + 3 + 2 * obj._stagger obj.priority = priority self.failUnlessEqual(obj.priority, priority) self.failUnlessEqual(gnl_object.props.priority, gnl_priority) self.failUnlessEqual(self.monitor.priority_changed_count, 2) def testChangePropertiesFromGnlObject(self): obj = self.track_object gnl_object = obj.gnl_object start = 1 * gst.SECOND gnl_object.props.start = start self.failUnlessEqual(obj.start, start) self.failUnlessEqual(self.monitor.start_changed_count, 1) duration = 10 * gst.SECOND gnl_object.props.duration = duration self.failUnlessEqual(obj.duration, duration) self.failUnlessEqual(self.monitor.duration_changed_count, 1) in_point = 5 * gst.SECOND gnl_object.props.media_start = in_point self.failUnlessEqual(obj.in_point, in_point) self.failUnlessEqual(self.monitor.in_point_changed_count, 1) media_duration = 5 * gst.SECOND gnl_object.props.media_duration = media_duration self.failUnlessEqual(obj.media_duration, media_duration) self.failUnlessEqual(self.monitor.media_duration_changed_count, 1) self.failUnlessEqual(obj.out_point, in_point + media_duration) self.failUnlessEqual(self.monitor.media_duration_changed_count, 1) self.failUnlessEqual(self.monitor.out_point_changed_count, 1) # video stream obj.stream_type = VideoStream gnl_priority = 100 priority = (100 - 2 - obj._stagger) // 3 gnl_object.props.priority = gnl_priority self.failUnlessEqual(obj.priority, priority) self.failUnlessEqual(gnl_object.props.priority, gnl_priority) self.failUnlessEqual(self.monitor.priority_changed_count, 1) # video stream obj.stream_type = AudioStream gnl_priority = 55 priority = (55 - 2 - obj._stagger) // 4 gnl_object.props.priority = gnl_priority self.failUnlessEqual(obj.priority, priority) self.failUnlessEqual(gnl_object.props.priority, gnl_priority) self.failUnlessEqual(self.monitor.priority_changed_count, 2) def testTrimStart(self): obj = self.track_object # start at 2 seconds with length 10 seconds obj.start = 2 * gst.SECOND obj.in_point = 1 * gst.SECOND obj.duration = 10 * gst.SECOND self.failUnlessEqual(self.monitor.duration_changed_count, 1) # trim at lower edge monitor = TrackSignalMonitor(obj) time = 2 * gst.SECOND obj.trimStart(time) self.failUnlessEqual(obj.start, time) self.failUnlessEqual(obj.in_point, 1 * gst.SECOND) self.failUnlessEqual(obj.duration, 10 * gst.SECOND) self.failUnlessEqual(obj.rate, 1) # we didn't change the start/in-point/duration (it was the same as before) self.failUnlessEqual(monitor.start_changed_count, 0) self.failUnlessEqual(monitor.in_point_changed_count, 0) self.failUnlessEqual(monitor.duration_changed_count, 0) # trim at upper edge monitor = TrackSignalMonitor(obj) time = 12 * gst.SECOND obj.trimStart(time) self.failUnlessEqual(obj.start, time) self.failUnlessEqual(obj.in_point, 11 * gst.SECOND) self.failUnlessEqual(obj.duration, 0) self.failUnlessEqual(obj.rate, 1) self.failUnlessEqual(monitor.start_changed_count, 1) self.failUnlessEqual(monitor.in_point_changed_count, 1) self.failUnlessEqual(monitor.duration_changed_count, 1) # trim before lower edge, should clamp monitor = TrackSignalMonitor(obj) time = 0 * gst.SECOND obj.trimStart(time) self.failUnlessEqual(obj.start, 1 * gst.SECOND) self.failUnlessEqual(obj.in_point, 0) self.failUnlessEqual(obj.duration, 11 * gst.SECOND) self.failUnlessEqual(obj.rate, 1) self.failUnlessEqual(monitor.start_changed_count, 1) self.failUnlessEqual(monitor.in_point_changed_count, 1) self.failUnlessEqual(monitor.duration_changed_count, 1) # trimp past upper edge, should clamp monitor = TrackSignalMonitor(obj) time = 13 * gst.SECOND obj.trimStart(time) self.failUnlessEqual(obj.start, 12 * gst.SECOND) self.failUnlessEqual(obj.in_point, 11 * gst.SECOND) self.failUnlessEqual(obj.duration, 0) self.failUnlessEqual(obj.rate, 1) self.failUnlessEqual(monitor.start_changed_count, 1) self.failUnlessEqual(monitor.in_point_changed_count, 1) self.failUnlessEqual(monitor.duration_changed_count, 1) # trim somewhere in the middle monitor = TrackSignalMonitor(obj) time = 7 * gst.SECOND obj.trimStart(time) self.failUnlessEqual(obj.start, time) self.failUnlessEqual(obj.in_point, 6 * gst.SECOND) self.failUnlessEqual(obj.duration, 5 * gst.SECOND) self.failUnlessEqual(obj.rate, 1) self.failUnlessEqual(monitor.start_changed_count, 1) self.failUnlessEqual(monitor.in_point_changed_count, 1) self.failUnlessEqual(monitor.duration_changed_count, 1) obj.start = 10 * gst.SECOND obj.in_point = 11 * gst.SECOND obj.duration = 15 * gst.SECOND # this should be possible monitor = TrackSignalMonitor(obj) time = 0 * gst.SECOND obj.trimStart(time) self.failUnlessEqual(obj.start, 0 * gst.SECOND) self.failUnlessEqual(obj.in_point, 1 * gst.SECOND) self.failUnlessEqual(obj.duration, 25 * gst.SECOND) self.failUnlessEqual(obj.rate, 1) self.failUnlessEqual(monitor.start_changed_count, 1) self.failUnlessEqual(monitor.in_point_changed_count, 1) self.failUnlessEqual(monitor.duration_changed_count, 1) def testSplitObject(self): DURATION = 10 * gst.SECOND factory = AudioTestSourceFactory() factory.duration = DURATION stream_ = AudioStream(gst.Caps("audio/x-raw-int")) obj = SourceTrackObject(factory, stream_) track = Track(stream_) track.addTrackObject(obj) obj.start = 3 * gst.SECOND obj.duration = DURATION # create a zig-zag volume curve interpolator = obj.getInterpolator("volume") expected = dict(((t * gst.SECOND, (t % 2, gst.INTERPOLATE_LINEAR)) for t in xrange(3, 10, 3))) for time, (value, mode) in expected.iteritems(): interpolator.newKeyframe(time, value, mode) def getKeyframes(obj): keyframes = obj.getInterpolator("volume").getInteriorKeyframes() return dict(((kf.time, (kf.value, kf.mode)) for kf in keyframes)) monitor = TrackSignalMonitor(obj) self.failUnlessRaises(TrackError, obj.splitObject, 2 * gst.SECOND) self.failUnlessRaises(TrackError, obj.splitObject, 14 * gst.SECOND) # should these be possible (ie create empty objects) ? self.failUnlessRaises(TrackError, obj.splitObject, 3 * gst.SECOND) self.failUnlessRaises(TrackError, obj.splitObject, 13 * gst.SECOND) # splitObject at 4s should result in: # obj (start 3, end 4) other1 (start 4, end 13) other1 = obj.splitObject(4 * gst.SECOND) self.failUnlessEqual(expected, getKeyframes(other1)) self.failUnlessEqual(obj.start, 3 * gst.SECOND) self.failUnlessEqual(obj.in_point, 0 * gst.SECOND) self.failUnlessEqual(obj.duration, 1 * gst.SECOND) self.failUnlessEqual(obj.rate, 1) self.failUnlessEqual(other1.start, 4 * gst.SECOND) self.failUnlessEqual(other1.in_point, 1 * gst.SECOND) self.failUnlessEqual(other1.duration, 9 * gst.SECOND) self.failUnlessEqual(other1.rate, 1) self.failUnlessEqual(monitor.start_changed_count, 0) self.failUnlessEqual(monitor.duration_changed_count, 1) # move other1 back to start = 1 other1.start = 1 * gst.SECOND # splitObject again other1 monitor = TrackSignalMonitor(other1) other2 = other1.splitObject(6 * gst.SECOND) self.failUnlessEqual(other1.start, 1 * gst.SECOND) self.failUnlessEqual(other1.in_point, 1 * gst.SECOND) self.failUnlessEqual(other1.duration, 5 * gst.SECOND) self.failUnlessEqual(other1.rate, 1) self.failUnlessEqual(other2.start, 6 * gst.SECOND) self.failUnlessEqual(other2.in_point, 6 * gst.SECOND) self.failUnlessEqual(other2.duration, 4 * gst.SECOND) self.failUnlessEqual(other2.rate, 1) self.failUnlessEqual(monitor.start_changed_count, 0) self.failUnlessEqual(monitor.duration_changed_count, 1) def testSplitObjectKeyframes(self): DURATION = 10 * gst.SECOND factory = AudioTestSourceFactory() factory.duration = DURATION stream_ = AudioStream(gst.Caps("audio/x-raw-int")) obj = SourceTrackObject(factory, stream_) track = Track(stream_) track.addTrackObject(obj) obj.start = 3 * gst.SECOND obj.duration = DURATION # create a three keyframes at: 3, 6 and 9 seconds interpolator = obj.getInterpolator("volume") keyframes = dict(((t * gst.SECOND, (t % 2, gst.INTERPOLATE_LINEAR)) for t in xrange(3, 10, 3))) expected = [] expected2 = [] for time, (value, mode) in keyframes.iteritems(): kf = interpolator.newKeyframe(time, value, mode) if time < (5 * gst.SECOND): expected.append(kf) else: expected2.append(kf) def getKeyframes(obj): keyframes = obj.getInterpolator("volume").getInteriorKeyframes() return list(keyframes) obj2 = obj.splitObject(8 * gst.SECOND) keyframes = getKeyframes(obj) keyframes2 = getKeyframes(obj2) self.failUnlessEqual(keyframes, expected) self.failUnlessEqual(keyframes2, expected2)
def testSplitObject(self): DURATION = 10 * gst.SECOND factory = AudioTestSourceFactory() factory.duration = DURATION stream_ = AudioStream(gst.Caps("audio/x-raw-int")) obj = SourceTrackObject(factory, stream_) track = Track(stream_) track.addTrackObject(obj) obj.start = 3 * gst.SECOND obj.duration = DURATION # create a zig-zag volume curve interpolator = obj.getInterpolator("volume") expected = dict(((t * gst.SECOND, (t % 2, gst.INTERPOLATE_LINEAR)) for t in xrange(3, 10, 3))) for time, (value, mode) in expected.iteritems(): interpolator.newKeyframe(time, value, mode) def getKeyframes(obj): keyframes = obj.getInterpolator("volume").getInteriorKeyframes() return dict(((kf.time, (kf.value, kf.mode)) for kf in keyframes)) monitor = TrackSignalMonitor(obj) self.failUnlessRaises(TrackError, obj.splitObject, 2 * gst.SECOND) self.failUnlessRaises(TrackError, obj.splitObject, 14 * gst.SECOND) # should these be possible (ie create empty objects) ? self.failUnlessRaises(TrackError, obj.splitObject, 3 * gst.SECOND) self.failUnlessRaises(TrackError, obj.splitObject, 13 * gst.SECOND) # splitObject at 4s should result in: # obj (start 3, end 4) other1 (start 4, end 13) other1 = obj.splitObject(4 * gst.SECOND) self.failUnlessEqual(expected, getKeyframes(other1)) self.failUnlessEqual(obj.start, 3 * gst.SECOND) self.failUnlessEqual(obj.in_point, 0 * gst.SECOND) self.failUnlessEqual(obj.duration, 1 * gst.SECOND) self.failUnlessEqual(obj.rate, 1) self.failUnlessEqual(other1.start, 4 * gst.SECOND) self.failUnlessEqual(other1.in_point, 1 * gst.SECOND) self.failUnlessEqual(other1.duration, 9 * gst.SECOND) self.failUnlessEqual(other1.rate, 1) self.failUnlessEqual(monitor.start_changed_count, 0) self.failUnlessEqual(monitor.duration_changed_count, 1) # move other1 back to start = 1 other1.start = 1 * gst.SECOND # splitObject again other1 monitor = TrackSignalMonitor(other1) other2 = other1.splitObject(6 * gst.SECOND) self.failUnlessEqual(other1.start, 1 * gst.SECOND) self.failUnlessEqual(other1.in_point, 1 * gst.SECOND) self.failUnlessEqual(other1.duration, 5 * gst.SECOND) self.failUnlessEqual(other1.rate, 1) self.failUnlessEqual(other2.start, 6 * gst.SECOND) self.failUnlessEqual(other2.in_point, 6 * gst.SECOND) self.failUnlessEqual(other2.duration, 4 * gst.SECOND) self.failUnlessEqual(other2.rate, 1) self.failUnlessEqual(monitor.start_changed_count, 0) self.failUnlessEqual(monitor.duration_changed_count, 1)
def make_track_object(stream): factory = VideoTestSourceFactory() factory.duration = 15 * gst.SECOND return SourceTrackObject(factory, stream)
def testTransitionProperties(self): factory = self.factory track1 = self.track1 track1._update_transitions = False stream = self.stream test_data = [ ("a", 0, 10), ("b", 5, 15), ] objs = {} names = {} for name, start, end in test_data: obj = SourceTrackObject(factory, stream) obj.start = start * gst.SECOND obj.in_point = 0 obj.duration = end * gst.SECOND - obj.start obj.media_duration = obj.duration track1.addTrackObject(obj) names[obj] = name objs[name] = obj # add transitions and check that initial properties are properly # evaluated at = AudioTransition(objs["a"], objs["b"]) vt = VideoTransition(objs["a"], objs["b"]) # move a and b together, # check that transition start, duration are updated objs["a"].start = 5 * gst.SECOND objs["b"].start = 10 * gst.SECOND self.failUnlessEqual(vt.start, 10 * gst.SECOND) self.failUnlessEqual(vt.duration, 5 * gst.SECOND) self.failUnlessEqual(vt.operation.props.start, 10 * gst.SECOND) self.failUnlessEqual(vt.operation.props.duration, 5 * gst.SECOND) self.failUnlessEqual(at.start, 10 * gst.SECOND) self.failUnlessEqual(at.duration, 5 * gst.SECOND) self.failUnlessEqual(at.a_operation.props.start, 10 * gst.SECOND) self.failUnlessEqual(at.a_operation.props.duration, 5 * gst.SECOND) self.failUnlessEqual(at.b_operation.props.start, 10 * gst.SECOND) self.failUnlessEqual(at.b_operation.props.duration, 5 * gst.SECOND) # make A longer objs["a"].duration = 11 * gst.SECOND self.failUnlessEqual(vt.start, 10 * gst.SECOND) self.failUnlessEqual(vt.duration, 6 * gst.SECOND) self.failUnlessEqual(vt.operation.props.start, 10 * gst.SECOND) self.failUnlessEqual(vt.operation.props.duration, 6 * gst.SECOND) self.failUnlessEqual(at.start, 10 * gst.SECOND) self.failUnlessEqual(at.duration, 6 * gst.SECOND) self.failUnlessEqual(at.a_operation.props.start, 10 * gst.SECOND) self.failUnlessEqual(at.a_operation.props.duration, 6 * gst.SECOND) self.failUnlessEqual(at.b_operation.props.start, 10 * gst.SECOND) self.failUnlessEqual(at.b_operation.props.duration, 6 * gst.SECOND) # move B earlier objs["b"].start = 9 * gst.SECOND self.failUnlessEqual(vt.start, 9 * gst.SECOND) self.failUnlessEqual(vt.duration, 7 * gst.SECOND) self.failUnlessEqual(vt.operation.props.start, 9 * gst.SECOND) self.failUnlessEqual(vt.operation.props.duration, 7 * gst.SECOND) self.failUnlessEqual(vt.operation.props.media_duration, 7 * gst.SECOND) self.failUnlessEqual(at.start, 9 * gst.SECOND) self.failUnlessEqual(at.duration, 7 * gst.SECOND) self.failUnlessEqual(at.a_operation.props.start, 9 * gst.SECOND) self.failUnlessEqual(at.a_operation.props.duration, 7 * gst.SECOND) self.failUnlessEqual(at.a_operation.props.media_duration, 7 * gst.SECOND) self.failUnlessEqual(at.b_operation.props.start, 9 * gst.SECOND) self.failUnlessEqual(at.b_operation.props.duration, 7 * gst.SECOND) self.failUnlessEqual(at.b_operation.props.media_duration, 7 * gst.SECOND) # check priority is currently zero self.failUnlessEqual(vt.priority, 0) # check video transition priority basic properties self.failUnlessEqual(vt.operation.props.priority, 1) vt.a.priority = 2 vt.b.priority = 2 self.failUnlessEqual(vt.priority, 2) self.failUnlessEqual(vt.operation.props.priority, 7) self.failUnlessEqual(at.priority, 2) # check controller for even - odd stagger vt.a.updatePosition(0) vt.b.updatePosition(1) self.failUnlessEqual(vt.a.gnl_object.props.priority, 9) self.failUnlessEqual(at.a._stagger, 0) self.failUnlessEqual(at.b._stagger, 1) self.failUnlessEqual(vt.b.gnl_object.props.priority, 10) self.failUnlessEqual(vt.controller.get("alpha", 0), 1.0) self.failUnlessEqual(vt.controller.get("alpha", vt.duration), 0.0) self.failUnlessEqual(at.a_controller.get("volume", 0), 1.0) self.failUnlessEqual(at.a_controller.get("volume", vt.duration), 0.0) self.failUnlessEqual(at.b_controller.get("volume", 0), 0.0) self.failUnlessEqual(at.b_controller.get("volume", vt.duration), 1.0) self.failUnlessEqual(at.a_operation.props.priority, 9) self.failUnlessEqual(at.b_operation.props.priority, 11) # check controller for odd - even stagger vt.a.updatePosition(1) vt.b.updatePosition(2) self.failUnlessEqual(vt.controller.get("alpha", 0), 0.0) self.failUnlessEqual(vt.controller.get("alpha", vt.duration), 1.0) self.failUnlessEqual(at.a_controller.get("volume", 0), 1.0) self.failUnlessEqual(at.a_controller.get("volume", vt.duration), 0.0) self.failUnlessEqual(at.b_controller.get("volume", 0), 0.0) self.failUnlessEqual(at.b_controller.get("volume", vt.duration), 1.0) self.failUnlessEqual(at.a_operation.props.priority, 11) self.failUnlessEqual(at.b_operation.props.priority, 9)
def testGetNextTrackObject(self): factory = self.factory stream = self.stream track1 = self.track1 obj1 = SourceTrackObject(factory, stream) track1.addTrackObject(obj1) obj2 = SourceTrackObject(factory, stream) track1.addTrackObject(obj2) obj3 = SourceTrackObject(factory, stream) track1.addTrackObject(obj3) obj4 = SourceTrackObject(factory, stream) track1.addTrackObject(obj4) obj1.start = 1 * gst.SECOND obj1.duration = 5 * gst.SECOND obj1.priority = 1 obj2.start = 8 * gst.SECOND obj2.duration = 5 * gst.SECOND obj2.priority = 1 obj3.start = 6 * gst.SECOND obj3.duration = 5 * gst.SECOND obj3.priority = 2 obj4.start = 7 * gst.SECOND obj4.duration = 5 * gst.SECOND obj4.priority = 3 # no next object self.failUnlessRaises(TrackError, track1.getNextTrackObject, obj2) # same priority prev = track1.getNextTrackObject(obj1) self.failUnlessEqual(prev, obj2) # given priority prev = track1.getNextTrackObject(obj1, priority=2) self.failUnlessEqual(prev, obj3) # any priority prev = track1.getNextTrackObject(obj3, priority=None) self.failUnlessEqual(prev, obj4)
def testUpdateAfterAddingAndRemovingTrackObjects(self): factory = self.factory stream = self.stream track1 = self.track1 test_data = [ ("a", 0, 10), ("b", 5, 15), ("c", 20, 25), ("d", 30, 35), ("f", 35, 45), ("g", 40, 50), ("e", 30, 35), ("h", 50, 60), ("i", 55, 65), ("j", 57, 60), ("k", 62, 70), ("l", 63, 67), ] # object j makes track arrangment invalid valid_in_order = [True for x in "abcdefghi"] valid_in_order.extend([False, False, False]) added_in_order = [("a", "b"), ("f", "g"), ("d", "e"), ("h", "i"), ("i", "k"), ("h", "i")] removed_in_order = [("h", "i"), ("i", "k")] objs = {} names = {} added = [] removed = [] def transitionAddedCb(track, transition): added.append((names[transition.a], names[transition.b])) def transitionRemovedCb(track, transition): removed.append((names[transition.a], names[transition.b])) track1.connect("transition-added", transitionAddedCb) track1.connect("transition-removed", transitionRemovedCb) valid = [] for name, start, end in test_data: obj = SourceTrackObject(factory, stream) obj.start = start * gst.SECOND obj.in_point = 0 obj.duration = end * gst.SECOND - obj.start obj.media_duration = obj.duration names[obj] = name objs[name] = obj track1.addTrackObject(obj) valid.append(track1.valid_arrangement) # removing this object brings (h, i) back track1.removeTrackObject(objs["j"]) self.failUnlessEqual(added, added_in_order) self.failUnlessEqual(removed, removed_in_order) self.failUnlessEqual(valid, valid_in_order) # removing this should make the track valid again track1.removeTrackObject(objs["l"]) self.failUnlessEqual(track1.valid_arrangement, True)
def testUpdatesAfterEnablingUpdates(self): factory = self.factory stream = self.stream track1 = self.track1 test_data = [ ("a", 0, 10), ("b", 5, 15), ("c", 20, 25), ("d", 30, 35), ("e", 30, 35), ("f", 35, 45), ("g", 40, 50), ("h", 50, 60), ("i", 55, 65), ("j", 57, 60), ("k", 62, 70), ("l", 63, 67), ] expected = [("a", "b"), ("d", "e"), ("f", "g")] result = [] added = set() removed = set() def transitionAddedCb(track, transition): pair =(names[transition.a], names[transition.b]) result.append(pair) added.add(pair) def transitionRemovedCb(track, transition): pair = (names[transition.a], names[transition.b]) result.remove(pair) removed.add(pair) track1.connect("transition-added", transitionAddedCb) track1.connect("transition-removed", transitionRemovedCb) objs = {} names = {} for name, start, end in test_data: obj = SourceTrackObject(factory, stream) obj.start = start * gst.SECOND obj.in_point = 0 obj.duration = end * gst.SECOND - obj.start obj.media_duration = obj.duration names[obj] = name objs[name] = obj track1.addTrackObject(obj) self.failUnlessEqual(result, expected) track1.disableUpdates() # move c so that it overlaps with b # move g so that it overlaps d, e, f # update the transitions, check that we have the expected # configuration test_data = [ ("c", 12, 20), ("g", 30, 46), ] expected = [("a", "b"), ("b", "c")] added = set() removed = set() for name, start, end in test_data: objs[name].start = start * gst.SECOND objs[name].duration = (end - start) * gst.SECOND track1.enableUpdates() self.failUnlessEqual(result, expected) # check that *only* (b, c) was added in the update self.failUnlessEqual(added, set([("b", "c")])) # check that *only* (d, e) was removed in the update self.failUnlessEqual(removed, set([("d", "e"), ("f", "g")])) # move c to a different layer. check that (b, c) transition is removed track1.disableUpdates() added = set() removed = set() objs["c"].priority = 1 expected = [("a", "b")] track1.enableUpdates() self.failUnlessEqual(result, expected) self.failUnlessEqual(added, set()) self.failUnlessEqual(removed, set([("b", "c")]))
def testUpdatesAfterEnablingUpdates(self): factory = self.factory stream = self.stream track1 = self.track1 test_data = [ ("a", 0, 10), ("b", 5, 15), ("c", 20, 25), ("d", 30, 35), ("e", 30, 35), ("f", 35, 45), ("g", 40, 50), ("h", 50, 60), ("i", 55, 65), ("j", 57, 60), ("k", 62, 70), ("l", 63, 67), ] expected = [("a", "b"), ("d", "e"), ("f", "g")] result = [] added = set() removed = set() def transitionAddedCb(track, transition): pair = (names[transition.a], names[transition.b]) result.append(pair) added.add(pair) def transitionRemovedCb(track, transition): pair = (names[transition.a], names[transition.b]) result.remove(pair) removed.add(pair) track1.connect("transition-added", transitionAddedCb) track1.connect("transition-removed", transitionRemovedCb) objs = {} names = {} for name, start, end in test_data: obj = SourceTrackObject(factory, stream) obj.start = start * gst.SECOND obj.in_point = 0 obj.duration = end * gst.SECOND - obj.start obj.media_duration = obj.duration names[obj] = name objs[name] = obj track1.addTrackObject(obj) self.failUnlessEqual(result, expected) track1.disableUpdates() # move c so that it overlaps with b # move g so that it overlaps d, e, f # update the transitions, check that we have the expected # configuration test_data = [ ("c", 12, 20), ("g", 30, 46), ] expected = [("a", "b"), ("b", "c")] added = set() removed = set() for name, start, end in test_data: objs[name].start = start * gst.SECOND objs[name].duration = (end - start) * gst.SECOND track1.enableUpdates() self.failUnlessEqual(result, expected) # check that *only* (b, c) was added in the update self.failUnlessEqual(added, set([("b", "c")])) # check that *only* (d, e) was removed in the update self.failUnlessEqual(removed, set([("d", "e"), ("f", "g")])) # move c to a different layer. check that (b, c) transition is removed track1.disableUpdates() added = set() removed = set() objs["c"].priority = 1 expected = [("a", "b")] track1.enableUpdates() self.failUnlessEqual(result, expected) self.failUnlessEqual(added, set()) self.failUnlessEqual(removed, set([("b", "c")]))
def testAddRemoveTransitions(self): factory = self.factory track1 = self.track1 track1._update_transitions = False stream = self.stream test_data = [ ("a", 0, 10), ("b", 5, 15), ("c", 15, 20), ("d", 30, 35), ("e", 30, 35), ] transitions = [ ("a", "b"), ("d", "e"), ] objs = {} names = {} for name, start, end in test_data: obj = SourceTrackObject(factory, stream) obj.start = start * gst.SECOND obj.in_point = 0 obj.duration = end * gst.SECOND - obj.start obj.media_duration = obj.duration track1.addTrackObject(obj) names[obj] = name objs[name] = obj result = [] transition_objects = {} def addTransition(b, c): tr = Transition(objs[b], objs[c]) track1.addTransition(tr) def transitionAddedCb(track, transition): values = (names[transition.a], names[transition.b]) result.append(values) transition_objects[values] = transition def transitionRemovedCb(track, transition): values = (names[transition.a], names[transition.b]) result.remove(values) track1.connect("transition-added", transitionAddedCb) track1.connect("transition-removed", transitionRemovedCb) # add transitions and check that initial properties are properly # evaluated for a, b in transitions: addTransition(a, b) self.failUnlessEqual(result, transitions) # check that adding a transition with a bogus track object raises an # error track1.removeTrackObject(objs["c"]) self.failUnlessRaises(TrackError, addTransition, "b", "c") # check that adding a transition that already exists raises an error self.failUnlessRaises(TrackError, addTransition, "d", "e") # check that removing a transition directly works track1.removeTransition(transition_objects["d", "e"]) self.failUnlessEqual(result, [("a", "b")]) # check tht we can restore a transition after deleting it addTransition("d", "e") self.failUnlessEqual(result, [("a", "b"), ("d", "e")])
def testAddRemoveTransitions(self): factory = self.factory track1 = self.track1 track1._update_transitions = False stream = self.stream test_data = [ ("a", 0, 10), ("b", 5, 15), ("c", 15, 20), ("d", 30, 35), ("e", 30, 35), ] transitions = [ ("a", "b"), ("d", "e"), ] objs = {} names = {} for name, start, end in test_data: obj = SourceTrackObject(factory, stream) obj.start = start * gst.SECOND obj.in_point = 0 obj.duration = end * gst.SECOND - obj.start obj.media_duration = obj.duration track1.addTrackObject(obj) names[obj] = name objs[name] = obj result = [] transition_objects = {} def addTransition(b, c): tr = Transition(objs[b], objs[c]) track1.addTransition(tr) def transitionAddedCb(track, transition): values =(names[transition.a], names[transition.b]) result.append(values) transition_objects[values] = transition def transitionRemovedCb(track, transition): values =(names[transition.a], names[transition.b]) result.remove(values) track1.connect("transition-added", transitionAddedCb) track1.connect("transition-removed", transitionRemovedCb) # add transitions and check that initial properties are properly # evaluated for a, b in transitions: addTransition(a, b) self.failUnlessEqual(result, transitions) # check that adding a transition with a bogus track object raises an # error track1.removeTrackObject(objs["c"]) self.failUnlessRaises(TrackError, addTransition, "b", "c") # check that adding a transition that already exists raises an error self.failUnlessRaises(TrackError, addTransition, "d", "e") # check that removing a transition directly works track1.removeTransition(transition_objects["d", "e"]) self.failUnlessEqual(result, [("a", "b")]) # check tht we can restore a transition after deleting it addTransition("d", "e") self.failUnlessEqual(result, [("a", "b"), ("d", "e")])
def testGetValidTransitionSlots(self): factory = self.factory stream = self.stream track1 = self.track1 test_data = [ ("a", 0, 10), ("b", 5, 15), ("c", 20, 25), ("d", 30, 35), ("e", 30, 35), ("f", 35, 45), ("g", 40, 50), ("h", 50, 60), ("i", 55, 65), ("j", 57, 60), ("k", 62, 70), ("l", 63, 67), ] expected = [["a", "b"], ["d", "e"], ["f", "g"]] objs = {} names = {} ordered = [] for name, start, end in test_data: obj = SourceTrackObject(factory, stream) obj.start = start * gst.SECOND obj.duration = end * gst.SECOND - obj.start track1.addTrackObject(obj) objs[name] = obj names[obj] = name ordered.append(obj) slots, valid = track1.getValidTransitionSlots(ordered) result = [[names[obj] for obj in layer] for layer in slots] self.failUnlessEqual(result, expected) self.failUnlessEqual(track1.valid_arrangement, False) test_data = [ ("a", 0, 5), ("b", 9, 12), ("c", 8, 13) ] ordered = [] for name, start, end in test_data: ordered.append(objs[name]) objs[name].start = gst.SECOND * start objs[name].duration = gst.SECOND * (end - start) slots, valid = track1.getValidTransitionSlots(ordered) self.failUnlessEqual(valid, False) test_data = [ ("a", 0, 5), ("b", 1, 4), ("c", 8, 13), ] ordered = [] for name, start, end in test_data: ordered.append(objs[name]) objs[name].start = gst.SECOND * start objs[name].duration = gst.SECOND * (end - start) slots, valid = track1.getValidTransitionSlots(ordered) self.failUnlessEqual(valid, False)
def testMaxPriority(self): track = self.track1 factory = self.factory obj1 = SourceTrackObject(factory, self.stream) obj1.priority = 10 self.failUnlessEqual(track.max_priority, 0) track.addTrackObject(obj1) self.failUnlessEqual(track.max_priority, 10) obj2 = SourceTrackObject(factory, self.stream) obj2.priority = 5 track.addTrackObject(obj2) self.failUnlessEqual(track.max_priority, 10) obj3 = SourceTrackObject(factory, self.stream) obj3.priority = 14 track.addTrackObject(obj3) self.failUnlessEqual(track.max_priority, 14) obj3.priority = 9 self.failUnlessEqual(track.max_priority, 10) obj2.priority = 11 self.failUnlessEqual(track.max_priority, 11) track.removeTrackObject(obj1) self.failUnlessEqual(track.max_priority, 11) track.removeTrackObject(obj2) self.failUnlessEqual(track.max_priority, 9) track.removeTrackObject(obj3) self.failUnlessEqual(track.max_priority, 0)
def testGetValidTransitionSlots(self): factory = self.factory stream = self.stream track1 = self.track1 test_data = [ ("a", 0, 10), ("b", 5, 15), ("c", 20, 25), ("d", 30, 35), ("e", 30, 35), ("f", 35, 45), ("g", 40, 50), ("h", 50, 60), ("i", 55, 65), ("j", 57, 60), ("k", 62, 70), ("l", 63, 67), ] expected = [["a", "b"], ["d", "e"], ["f", "g"]] objs = {} names = {} ordered = [] for name, start, end in test_data: obj = SourceTrackObject(factory, stream) obj.start = start * gst.SECOND obj.duration = end * gst.SECOND - obj.start track1.addTrackObject(obj) objs[name] = obj names[obj] = name ordered.append(obj) slots, valid = track1.getValidTransitionSlots(ordered) result = [[names[obj] for obj in layer] for layer in slots] self.failUnlessEqual(result, expected) self.failUnlessEqual(track1.valid_arrangement, False) test_data = [("a", 0, 5), ("b", 9, 12), ("c", 8, 13)] ordered = [] for name, start, end in test_data: ordered.append(objs[name]) objs[name].start = gst.SECOND * start objs[name].duration = gst.SECOND * (end - start) slots, valid = track1.getValidTransitionSlots(ordered) self.failUnlessEqual(valid, False) test_data = [ ("a", 0, 5), ("b", 1, 4), ("c", 8, 13), ] ordered = [] for name, start, end in test_data: ordered.append(objs[name]) objs[name].start = gst.SECOND * start objs[name].duration = gst.SECOND * (end - start) slots, valid = track1.getValidTransitionSlots(ordered) self.failUnlessEqual(valid, False)
def testSaveTrackSource(self): video_stream = VideoStream(gst.Caps("video/x-raw-yuv")) audio_stream = AudioStream(gst.Caps("audio/x-raw-int")) source1 = FileSourceFactory("file1.ogg") source1.addOutputStream(video_stream) # these two calls are needed to populate the context for the -ref # elements self.formatter._saveSource(source1) self.formatter._saveStream(video_stream) track_source = SourceTrackObject(source1, video_stream, start=10 * gst.SECOND, duration=20 * gst.SECOND, in_point=5 * gst.SECOND, media_duration=15 * gst.SECOND, priority=10) track = Track(video_stream) track.addTrackObject(track_source) # create an interpolator and insert it into the track object fakevol = gst.element_factory_make("volume") prop = get_controllable_properties(fakevol)[1][1] volcurve = Interpolator(track_source, fakevol, prop) track_source.interpolators[prop.name] = (prop, volcurve) # add some points to the interpolator value = float(0) volcurve.start.setObjectTime(0) volcurve.start.value = 0 for t in xrange(3, 15, 3): value = int(t % 2) volcurve.newKeyframe(t * gst.SECOND, value) volcurve.end.setObjectTime(15 * gst.SECOND) volcurve.end.value = 15 % 2 element = self.formatter._saveTrackObject(track_source) self.failUnlessEqual(element.tag, "track-object") self.failUnlessEqual(element.attrib["type"], qual(track_source.__class__)) self.failUnlessEqual(element.attrib["start"], ts(10 * gst.SECOND)) self.failUnlessEqual(element.attrib["duration"], ts(20 * gst.SECOND)) self.failUnlessEqual(element.attrib["in_point"], ts(5 * gst.SECOND)) self.failUnlessEqual(element.attrib["media_duration"], ts(15 * gst.SECOND)) self.failUnlessEqual(element.attrib["priority"], "(int)10") self.failIfEqual(element.find("factory-ref"), None) self.failIfEqual(element.find("stream-ref"), None) # find the interpolation keyframes curves = element.find("curves") self.failIfEqual(curves, None) curve = curves.find("curve") self.failIfEqual(curve, None) self.failUnlessEqual(curve.attrib["property"], "volume") # compute a dictionary of keyframes saved_points = dict( ((obj.attrib["time"], (obj.attrib["value"], obj.attrib["mode"])) for obj in curve.getiterator("keyframe"))) # compare this with the expected values expected = dict(((str(t * gst.SECOND), ("(gdouble)%s" % (t % 2), "2")) for t in xrange(3, 15, 3))) self.failUnlessEqual(expected, saved_points)