def testMatchStreamSameNameAndCompatibleCaps(self): s1 = AudioStream(gst.Caps("audio/x-vorbis, a={1, 2}"), pad_name="src0") s2 = AudioStream(gst.Caps("audio/x-vorbis, a={2, 3}"), pad_name="src0") stream, rank = match_stream(s1, [s2]) self.failUnlessEqual(id(s2), id(stream)) self.failUnlessEqual(rank, STREAM_MATCH_SAME_PAD_NAME + STREAM_MATCH_COMPATIBLE_CAPS)
def testAudioProperties(self): expected = { 'audiotype': 'audio/x-raw-float', 'rate': 48000, 'channels': 2, 'width': 32, 'depth': 32 } stream = AudioStream(self.unfixed_caps) self.checkProperties(stream, expected) expected = { 'audiotype': 'audio/x-raw-int', 'rate': 44100, 'channels': 2, 'width': 32, 'depth': 32 } stream = AudioStream(self.fixed_caps) self.checkProperties(stream, expected) # get None when trying to access these properties with non raw streams # NOTE: this is the current behaviour. Does it really make sense to try # to access these properties for non raw streams? Should we rather error # out? expected = dict((name, None) for name in expected.keys()) expected['audiotype'] = 'audio/x-vorbis' stream = AudioStream(self.non_raw_caps) self.checkProperties(stream, expected)
def testTracks(self): timeline = Timeline() stream1 = VideoStream(gst.Caps('video/x-raw-rgb'), 'src0') stream2 = AudioStream(gst.Caps('audio/x-raw-int'), 'src1') track1 = Track(stream1) track2 = Track(stream2) # start with 2 tracks timeline.addTrack(track1) timeline.addTrack(track2) factory = TimelineSourceFactory(timeline) bin = factory.makeBin() self.failUnlessEqual(len(list(bin)), 2) self.failUnlessEqual(set(factory.getOutputStreams()), set([stream1, stream2])) # add a new track stream3 = AudioStream(gst.Caps('audio/x-raw-int'), 'src2') track3 = Track(stream3) timeline.addTrack(track3) self.failUnlessEqual(len(list(bin)), 3) self.failUnlessEqual(set(factory.getOutputStreams()), set([stream1, stream2, stream3])) # remove a track timeline.removeTrack(track3) self.failUnlessEqual(len(list(bin)), 2) self.failUnlessEqual(set(factory.getOutputStreams()), set([stream1, stream2])) factory.clean()
def testMatchSamePadName(self): s1 = AudioStream(gst.Caps("audio/x-vorbis"), pad_name="src0") s2 = AudioStream(gst.Caps("audio/x-speex"), pad_name="src0") stream, rank = match_stream(s1, [s2]) self.failUnlessEqual(id(s2), id(stream)) self.failUnlessEqual(rank, STREAM_MATCH_SAME_PAD_NAME + STREAM_MATCH_SAME_TYPE)
def testMatchStreamSameNameAndSameCaps(self): s1 = AudioStream(gst.Caps("audio/x-vorbis"), pad_name="src0") s2 = AudioStream(gst.Caps("audio/x-vorbis"), pad_name="src0") stream, rank = match_stream(s1, [s2]) self.failUnlessEqual(id(s2), id(stream)) self.failUnlessEqual( rank, STREAM_MATCH_SAME_PAD_NAME + STREAM_MATCH_SAME_CAPS) self.failUnlessEqual(rank, STREAM_MATCH_MAXIMUM)
def testMatchStreamGroupsOrder(self): stream1 = AudioStream(gst.Caps("audio/x-vorbis")) stream2 = AudioStream(gst.Caps("audio/x-vorbis")) stream3 = AudioStream(gst.Caps("audio/x-vorbis")) known_best_map = {(stream1, stream2): STREAM_MATCH_SAME_CAPS} group_a = [stream1] group_b = [stream2, stream3] best_map = match_stream_groups(group_a, group_b) self.failUnlessEqual(known_best_map, best_map)
def testLoadTrackEffect(self): # create fake document tree element = Element("track-object",\ type="pitivi.timeline.track.TrackEffect", start=ts(1 * gst.SECOND), duration=ts(10 * gst.SECOND), in_point=ts(5 * gst.SECOND), media_duration=ts(15 * gst.SECOND), priority=ts(5), id="1") effect_elem = SubElement(element, "effect") factory_elem = SubElement(effect_elem, "factory", name="identity") properties_elem = SubElement(effect_elem, "gst-element-properties", sync="(bool)True") # insert our fake factory into the context stream = AudioStream(gst.Caps("audio/x-raw-int")) factory = EffectFactory('identity') factory.addInputStream(stream) factory.addOutputStream(stream) self.formatter.avalaible_effects._effect_factories_dict[ 'identity'] = factory track = Track(stream) track_object = self.formatter._loadTrackObject(track, element) self.failUnless(isinstance(track_object, TrackEffect)) self.failUnlessEqual(track_object.factory, factory) self.failUnlessEqual(track_object.stream, stream) self.failUnlessEqual(track_object.start, 1 * gst.SECOND) self.failUnlessEqual(track_object.duration, 10 * gst.SECOND) self.failUnlessEqual(track_object.in_point, 5 * gst.SECOND) self.failUnlessEqual(track_object.media_duration, 15 * gst.SECOND) self.failUnlessEqual(track_object.priority, 5)
def testSaveTrack(self): video_stream = VideoStream(gst.Caps("video/x-raw-yuv")) audio_stream = AudioStream(gst.Caps("audio/x-raw-int")) source1 = VideoTestSourceFactory() # these two calls are needed to populate the context for the -ref # elements self.formatter._saveSource(source1) self.formatter._saveStream(video_stream) track_object = SourceTrackObject(source1, video_stream, start=10 * gst.SECOND, duration=20 * gst.SECOND, in_point=5 * gst.SECOND, media_duration=15 * gst.SECOND, priority=10) track = Track(video_stream) track.addTrackObject(track_object) element = self.formatter._saveTrack(track) self.failUnlessEqual(element.tag, "track") track_objects_element = element.find("track-objects") self.failUnlessEqual(len(track_objects_element), 1)
def testSavetimelineObjects(self): video_stream = VideoStream(gst.Caps("video/x-raw-yuv")) audio_stream = AudioStream(gst.Caps("audio/x-raw-int")) source1 = FileSourceFactory("file1.ogg") source1.addOutputStream(video_stream) # these two calls are needed to populate the context for the -ref # elements self.formatter._saveSource(source1) self.formatter._saveStream(video_stream) track_object = SourceTrackObject(source1, video_stream, start=10 * gst.SECOND, duration=20 * gst.SECOND, in_point=5 * gst.SECOND, media_duration=15 * gst.SECOND, priority=10) track = Track(video_stream) track.addTrackObject(track_object) self.formatter._saveTrackObject(track_object) timeline_object = TimelineObject(source1) timeline_object.addTrackObject(track_object) element = self.formatter._saveTimelineObjects([timeline_object]) self.failUnlessEqual(len(element), 1)
def testPads(self): timeline = Timeline() stream1 = VideoStream(gst.Caps('video/x-raw-rgb'), 'src0') stream2 = AudioStream(gst.Caps('audio/x-raw-int'), 'src1') track1 = Track(stream1) track2 = Track(stream2) timeline.addTrack(track1) timeline.addTrack(track2) factory = TimelineSourceFactory(timeline) bin = factory.makeBin() self.failUnlessEqual(len(list(bin.src_pads())), 0) pad1 = gst.Pad('src0', gst.PAD_SRC) pad1.set_caps(gst.Caps('asd')) pad1.set_active(True) track1.composition.add_pad(pad1) pad2 = gst.Pad('src0', gst.PAD_SRC) pad2.set_caps(gst.Caps('asd')) pad2.set_active(True) track2.composition.add_pad(pad2) self.failUnlessEqual(len(list(bin.src_pads())), 2) track1.composition.remove_pad(pad1) self.failUnlessEqual(len(list(bin.src_pads())), 1) track2.composition.remove_pad(pad2) self.failUnlessEqual(len(list(bin.src_pads())), 0) factory.clean()
def newBlankProject(self): """ start up a new blank project """ # if there's a running project we must close it if self.current is not None and not self.closeRunningProject(): return False # we don't have an URI here, None means we're loading a new project self.emit("new-project-loading", None) project = Project(_("New Project")) self.emit("new-project-created", project) self.current = project # FIXME: this should not be hard-coded # add default tracks for a new project video = VideoStream(gst.Caps('video/x-raw-rgb; video/x-raw-yuv')) track = Track(video) project.timeline.addTrack(track) audio = AudioStream(gst.Caps('audio/x-raw-int; audio/x-raw-float')) track = Track(audio) project.timeline.addTrack(track) project.connect("project-changed", self._projectChangedCb) self.emit("new-project-loaded", self.current) return True
def testSaveTimeline(self): video_stream = VideoStream(gst.Caps("video/x-raw-yuv")) audio_stream = AudioStream(gst.Caps("audio/x-raw-int")) source1 = VideoTestSourceFactory() source1.addOutputStream(video_stream) self.formatter._saveSource(source1) self.formatter._saveStream(video_stream) track_object = SourceTrackObject(source1, video_stream, start=10 * gst.SECOND, duration=20 * gst.SECOND, in_point=5 * gst.SECOND, media_duration=15 * gst.SECOND, priority=10) track = Track(video_stream) track.addTrackObject(track_object) self.formatter._saveTrackObject(track_object) timeline_object = TimelineObject(source1) timeline_object.addTrackObject(track_object) self.formatter._saveTimelineObject(timeline_object) timeline = Timeline() timeline.addTrack(track) element = self.formatter._saveTimeline(timeline) self.failUnlessEqual(element.tag, "timeline") tracks = element.find("tracks") self.failUnlessEqual(len(tracks), 1)
def setUp(self): TestCase.setUp(self) stream = AudioStream(gst.Caps("audio/x-raw-int")) self.factory = StubFactory() gst.debug("%r" % self.factory.duration) self.factory.addOutputStream(stream) self.track_object = SourceTrackObject(self.factory, stream) self.monitor = TrackSignalMonitor(self.track_object)
def setUp(self): common.TestCase.setUp(self) self.vsrc = common.FakeSourceFactory("videotestsrc") self.vsrc.addOutputStream( VideoStream(gst.Caps("video/x-raw-yuv"), pad_name="src")) self.asrc = common.FakeSourceFactory("audiotestsrc") self.asrc.addOutputStream( AudioStream(gst.Caps("audio/x-raw-float"), pad_name="src")) self.vsettings = StreamEncodeSettings(encoder="theoraenc") self.asettings = StreamEncodeSettings(encoder="vorbisenc")
def testSaveFactoryRef(self): video_stream = VideoStream(gst.Caps("video/x-raw-yuv")) audio_stream = AudioStream(gst.Caps("audio/x-raw-int")) source1 = FileSourceFactory("file1.ogg") source1.addOutputStream(video_stream) source1.addOutputStream(audio_stream) element = self.formatter._saveSource(source1) element_ref = self.formatter._saveFactoryRef(source1) self.failUnlessEqual(element_ref.tag, "factory-ref") self.failUnlessEqual(element_ref.attrib["id"], element.attrib["id"])
def _fillTimeline(self): # audio and video track video = VideoStream(gst.Caps('video/x-raw-rgb; video/x-raw-yuv')) track = Track(video) self.project.timeline.addTrack(track) audio = AudioStream(gst.Caps('audio/x-raw-int; audio/x-raw-float')) track = Track(audio) self.project.timeline.addTrack(track) for uri in self._uris: factory = self.project.sources.getUri(uri) self.project.timeline.addSourceFactory(factory)
def testMatchStreamNoMatch(self): s1 = AudioStream(gst.Caps("audio/x-vorbis")) s2 = VideoStream(gst.Caps("video/x-theora")) stream, rank = match_stream(s1, []) self.failUnlessEqual(stream, None) self.failUnlessEqual(rank, STREAM_MATCH_NONE) stream, rank = match_stream(s1, [s2]) self.failUnlessEqual(stream, None) self.failUnlessEqual(rank, STREAM_MATCH_NONE)
def testMakeStreamBin(self): # streams are usually populated by the discoverer so here we have to do # that ourselves video1 = VideoStream(gst.Caps('video/x-raw-rgb, width=2048'), pad_name='src0') video2 = VideoStream(gst.Caps('video/x-raw-rgb, width=320'), pad_name='src1') audio = AudioStream(gst.Caps('audio/x-raw-int'), pad_name='src2') self.factory.addOutputStream(video1) self.factory.addOutputStream(video2) self.factory.addOutputStream(audio)
def testSimpleMatch(self): stream1 = AudioStream(gst.Caps("audio/x-vorbis")) stream2 = AudioStream(gst.Caps("audio/x-raw-int")) stream3 = AudioStream(gst.Caps("audio/x-vorbis, meh=asd")) group_a = [stream1, stream2] group_b = [stream3] walker = StreamGroupWalker(group_a, group_b) walkers = walker.advance() self.failUnlessEqual(len(walkers), 2) walker = walkers[0] self.failUnlessEqual(walker.advance(), []) self.failUnlessEqual(walker.getMatches(), {(stream1, stream3): STREAM_MATCH_COMPATIBLE_CAPS}) walker = walkers[1] self.failUnlessEqual(walker.advance(), []) self.failUnlessEqual(walker.getMatches(), {(stream2, stream3): STREAM_MATCH_SAME_TYPE})
def testMatchStreamGroupsBestMatch(self): stream1 = AudioStream(gst.Caps("video/x-theora")) stream2 = AudioStream(gst.Caps("audio/x-vorbis, meh={FAIL, WIN}")) stream3 = AudioStream(gst.Caps("audio/x-vorbis")) stream4 = AudioStream(gst.Caps("video/x-theora")) stream5 = AudioStream(gst.Caps("audio/x-vorbis, meh=WIN")) stream6 = AudioStream(gst.Caps("audio/x-vorbis")) known_best_map = {(stream1, stream4): STREAM_MATCH_SAME_CAPS, (stream2, stream5): STREAM_MATCH_COMPATIBLE_CAPS, (stream3, stream6): STREAM_MATCH_SAME_CAPS} group_a = [stream1, stream2, stream3] group_b = [stream4, stream5, stream6] best_map = match_stream_groups(group_a, group_b) self.failUnlessEqual(known_best_map, best_map) group_a = [stream1, stream2, stream3] group_b = [stream6, stream5, stream4] best_map = match_stream_groups(group_a, group_b) self.failUnlessEqual(known_best_map, best_map) group_a = [stream1, stream2, stream3] group_b = [stream5, stream6, stream4] best_map = match_stream_groups(group_a, group_b) self.failUnlessEqual(known_best_map, best_map)
def testSaveSource(self): video_stream = VideoStream(gst.Caps("video/x-raw-yuv")) audio_stream = AudioStream(gst.Caps("audio/x-raw-int")) source1 = FileSourceFactory("file1.ogg") source1.addOutputStream(video_stream) source1.addOutputStream(audio_stream) element = self.formatter._saveSource(source1) self.failUnlessEqual(element.tag, "source") self.failUnlessEqual(element.attrib["type"], qual(source1.__class__)) self.failUnlessEqual(element.attrib["filename"], "file1.ogg") streams = element.find("output-streams") self.failUnlessEqual(len(streams), 2)
def testMakeStreamBin(self): # streams are usually populated by the discoverer so here we have to do # that ourselves video = VideoStream(gst.Caps('video/x-raw-rgb'), pad_name='src0') audio = AudioStream(gst.Caps('audio/x-raw-int'), pad_name='src1') self.factory.addOutputStream(video) self.factory.addOutputStream(audio) bin = self.factory.makeBin(video) self.failUnless(hasattr(bin, "decodebin")) self.failUnless(isinstance(bin.decodebin, StubSingleDecodeBin)) self.failUnlessEqual(bin.decodebin.uri, 'file:///path/to/file') self.failUnlessEqual(video.caps, bin.decodebin.caps) self.failUnlessEqual(video, bin.decodebin.stream) self.factory.releaseBin(bin)
def testAudioOnly(self): audio_factory1 = AudioTestSourceFactory(3) audio_factory1.duration = 10 * gst.SECOND stream = AudioStream(gst.Caps('audio/x-raw-int'), 'src0') audio_factory1.addOutputStream(stream) timeline = Timeline() track = Track(stream) track_object1 = SourceTrackObject(audio_factory1, stream) track_object1.start = 2 * gst.SECOND track.addTrackObject(track_object1) timeline.addTrack(track) factory = TimelineSourceFactory(timeline) bin = factory.makeBin() self.failUnlessEqual(len(list(bin)), 1) self.failUnlessEqual(factory.duration, 12 * gst.SECOND) fakesink = gst.element_factory_make('fakesink') def bin_pad_added_cb(bin, pad): pad.link(fakesink.get_pad('sink')) bin.connect('pad-added', bin_pad_added_cb) def error_cb(bus, message): gerror, debug = message.parse_error() self.fail('%s: %s' % (gerror.message, debug)) def eos_cb(bus, message): self.loop.quit() pipeline = gst.Pipeline() bus = pipeline.get_bus() bus.add_signal_watch() bus.connect('message::error', error_cb) bus.connect('message::eos', eos_cb) pipeline.add(bin) pipeline.add(fakesink) pipeline.set_state(gst.STATE_PLAYING) self.loop.run() pipeline.set_state(gst.STATE_NULL) factory.clean()
def testLoadProject(self): video_stream = VideoStream(gst.Caps("video/x-raw-yuv")) audio_stream = AudioStream(gst.Caps("audio/x-raw-int")) source1 = VideoTestSourceFactory() self.formatter._saveSource(source1) self.formatter._saveStream(video_stream) track_object = SourceTrackObject(source1, video_stream, start=10 * gst.SECOND, duration=20 * gst.SECOND, in_point=5 * gst.SECOND, media_duration=15 * gst.SECOND, priority=10) track = Track(video_stream) track.addTrackObject(track_object) self.formatter._saveTrackObject(track_object) timeline_object = TimelineObject(source1) timeline_object.addTrackObject(track_object) self.formatter._saveTimelineObject(timeline_object) timeline = Timeline() timeline.addTrack(track) self.formatter._saveTimeline(timeline) project = Project() project.timeline = timeline project.sources.addFactory(source1) element = self.formatter._serializeProject(project) self.failUnlessEqual(element.tag, "pitivi") self.failIfEqual(element.find("factories"), None) self.failIfEqual(element.find("timeline"), None) indent(element) f = file("/tmp/untitled.pptv", "w") f.write(tostring(element)) f.close()
def testSaveFactories(self): video_stream = VideoStream(gst.Caps("video/x-raw-yuv")) audio_stream = AudioStream(gst.Caps("audio/x-raw-int")) source1 = FileSourceFactory("file1.ogg") source1.addOutputStream(video_stream) source1.addOutputStream(audio_stream) source2 = FileSourceFactory("file2.ogg") source2.addOutputStream(video_stream) source2.addOutputStream(audio_stream) source_factories = [source1, source2] element = self.formatter._saveFactories(source_factories) self.failUnlessEqual(element.tag, "factories") sources = element.find("sources") self.failUnlessEqual(len(sources), 2)
def testEmptyGroups(self): group_a = [] group_b = [] walker = StreamGroupWalker(group_a, group_b) self.failUnlessEqual(walker.advance(), []) self.failUnlessEqual(walker.getMatches(), {}) stream = AudioStream(gst.Caps("audio/x-vorbis")) group_a = [stream] group_b = [] walker = StreamGroupWalker(group_a, group_b) self.failUnlessEqual(walker.advance(), []) self.failUnlessEqual(walker.getMatches(), {}) group_a = [] group_b = [stream] walker = StreamGroupWalker(group_a, group_b) self.failUnlessEqual(walker.advance(), []) self.failUnlessEqual(walker.getMatches(), {})
def testSaveTrackEffect(self): video_stream = VideoStream(gst.Caps("video/x-raw-yuv")) audio_stream = AudioStream(gst.Caps("audio/x-raw-int")) effect1 = EffectFactory('identity', 'identity') effect1.addOutputStream(video_stream) effect1.addInputStream(video_stream) #It is necessary to had the identity factory to the #effect_factories_dictionnary self.formatter.avalaible_effects._effect_factories_dict['identity'] =\ effect1 track_effect = TrackEffect(effect1, video_stream, start=10 * gst.SECOND, duration=20 * gst.SECOND, in_point=5 * gst.SECOND, media_duration=15 * gst.SECOND, priority=10) track = Track(video_stream) track.addTrackObject(track_effect) element = self.formatter._saveTrackObject(track_effect) self.failUnlessEqual(element.tag, "track-object") self.failUnlessEqual(element.attrib["type"], qual(track_effect.__class__)) self.failUnlessEqual(element.attrib["start"], ts(10 * gst.SECOND)) self.failUnlessEqual(element.attrib["duration"], ts(20 * gst.SECOND)) self.failUnlessEqual(element.attrib["in_point"], ts(5 * gst.SECOND)) self.failUnlessEqual(element.attrib["media_duration"], ts(15 * gst.SECOND)) self.failUnlessEqual(element.attrib["priority"], "(int)10") effect_element = element.find('effect') self.failIfEqual(effect_element, None) self.failIfEqual(effect_element.find("factory"), None) self.failIfEqual(effect_element.find("gst-element-properties"), None)
def testSplitObjectKeyframes(self): DURATION = 10 * gst.SECOND factory = AudioTestSourceFactory() factory.duration = DURATION stream_ = AudioStream(gst.Caps("audio/x-raw-int")) obj = SourceTrackObject(factory, stream_) track = Track(stream_) track.addTrackObject(obj) obj.start = 3 * gst.SECOND obj.duration = DURATION # create a three keyframes at: 3, 6 and 9 seconds interpolator = obj.getInterpolator("volume") keyframes = dict(((t * gst.SECOND, (t % 2, gst.INTERPOLATE_LINEAR)) for t in xrange(3, 10, 3))) expected = [] expected2 = [] for time, (value, mode) in keyframes.iteritems(): kf = interpolator.newKeyframe(time, value, mode) if time < (5 * gst.SECOND): expected.append(kf) else: expected2.append(kf) def getKeyframes(obj): keyframes = obj.getInterpolator("volume").getInteriorKeyframes() return list(keyframes) obj2 = obj.splitObject(8 * gst.SECOND) keyframes = getKeyframes(obj) keyframes2 = getKeyframes(obj2) self.failUnlessEqual(keyframes, expected) self.failUnlessEqual(keyframes2, expected2)
def __init__(self, wave=0): SourceFactory.__init__(self, "audiotestsrc://") self.wave = wave caps = gst.Caps('audio/x-raw-int; audio/x-raw-float') self.addOutputStream(AudioStream(caps))
def testSplitObject(self): DURATION = 10 * gst.SECOND factory = AudioTestSourceFactory() factory.duration = DURATION stream_ = AudioStream(gst.Caps("audio/x-raw-int")) obj = SourceTrackObject(factory, stream_) track = Track(stream_) track.addTrackObject(obj) obj.start = 3 * gst.SECOND obj.duration = DURATION # create a zig-zag volume curve interpolator = obj.getInterpolator("volume") expected = dict(((t * gst.SECOND, (t % 2, gst.INTERPOLATE_LINEAR)) for t in xrange(3, 10, 3))) for time, (value, mode) in expected.iteritems(): interpolator.newKeyframe(time, value, mode) def getKeyframes(obj): keyframes = obj.getInterpolator("volume").getInteriorKeyframes() return dict(((kf.time, (kf.value, kf.mode)) for kf in keyframes)) monitor = TrackSignalMonitor(obj) self.failUnlessRaises(TrackError, obj.splitObject, 2 * gst.SECOND) self.failUnlessRaises(TrackError, obj.splitObject, 14 * gst.SECOND) # should these be possible (ie create empty objects) ? self.failUnlessRaises(TrackError, obj.splitObject, 3 * gst.SECOND) self.failUnlessRaises(TrackError, obj.splitObject, 13 * gst.SECOND) # splitObject at 4s should result in: # obj (start 3, end 4) other1 (start 4, end 13) other1 = obj.splitObject(4 * gst.SECOND) self.failUnlessEqual(expected, getKeyframes(other1)) self.failUnlessEqual(obj.start, 3 * gst.SECOND) self.failUnlessEqual(obj.in_point, 0 * gst.SECOND) self.failUnlessEqual(obj.duration, 1 * gst.SECOND) self.failUnlessEqual(obj.rate, 1) self.failUnlessEqual(other1.start, 4 * gst.SECOND) self.failUnlessEqual(other1.in_point, 1 * gst.SECOND) self.failUnlessEqual(other1.duration, 9 * gst.SECOND) self.failUnlessEqual(other1.rate, 1) self.failUnlessEqual(monitor.start_changed_count, 0) self.failUnlessEqual(monitor.duration_changed_count, 1) # move other1 back to start = 1 other1.start = 1 * gst.SECOND # splitObject again other1 monitor = TrackSignalMonitor(other1) other2 = other1.splitObject(6 * gst.SECOND) self.failUnlessEqual(other1.start, 1 * gst.SECOND) self.failUnlessEqual(other1.in_point, 1 * gst.SECOND) self.failUnlessEqual(other1.duration, 5 * gst.SECOND) self.failUnlessEqual(other1.rate, 1) self.failUnlessEqual(other2.start, 6 * gst.SECOND) self.failUnlessEqual(other2.in_point, 6 * gst.SECOND) self.failUnlessEqual(other2.duration, 4 * gst.SECOND) self.failUnlessEqual(other2.rate, 1) self.failUnlessEqual(monitor.start_changed_count, 0) self.failUnlessEqual(monitor.duration_changed_count, 1)