def test_clip_for_element(self): tree = cElementTree.parse(FCP7_XML_EXAMPLE_PATH) # Use clipitem-3 because it's self-contained and doesn't reference # other elements sequence_elem = tree.find(".//clipitem[@id='clipitem-3']/../../../..") clip_elem = tree.find(".//clipitem[@id='clipitem-3']") context = self.adapter._Context(sequence_elem) # Make a parser parser = self.adapter.FCP7XMLParser(tree) clip, time_range = parser.item_and_timing_for_element( clip_elem, head_transition=None, tail_transition=None, context=context, ) self.assertEqual(clip.name, "sc01_sh020_anim.mov") expected_range = opentime.TimeRange( start_time=opentime.RationalTime(165, 30), duration=opentime.RationalTime(157, 30), ) self.assertEqual(time_range, expected_range) expected_range = opentime.TimeRange( start_time=opentime.RationalTime(0, 30), duration=opentime.RationalTime(157, 30), ) self.assertEqual(clip.source_range, expected_range)
def test_marker_for_element(self): marker_element = cElementTree.fromstring( """ <marker> <comment>so, this happened</comment> <name>My MArker 1</name> <in>113</in> <out>-1</out> </marker> """ ) marker = self.adapter.marker_for_element(marker_element, 30) self.assertEqual(marker.name, "My MArker 1") self.assertEqual( marker.marked_range, opentime.TimeRange( start_time=opentime.RationalTime(113, 30), duration=opentime.RationalTime(0, 30), ) ) self.assertEqual( marker.metadata["fcp_xml"]["comment"], "so, this happened" ) with self.assertRaises(KeyError): marker.metadata["fcp_xml"]["name"]
def test_transition_cut_point(self): transition_element = cElementTree.fromstring(""" <transitionitem> <start>538</start> <end>557</end> <alignment>end-black</alignment> <cutPointTicks>160876800000</cutPointTicks> <rate> <timebase>30</timebase> <ntsc>FALSE</ntsc> </rate> <effect> <name>Cross Dissolve</name> <effectid>Cross Dissolve</effectid> <effectcategory>Dissolve</effectcategory> <effecttype>transition</effecttype> <mediatype>video</mediatype> <wipecode>0</wipecode> <wipeaccuracy>100</wipeaccuracy> <startratio>0</startratio> <endratio>1</endratio> <reverse>FALSE</reverse> </effect> </transitionitem> """) alignment_element = transition_element.find("./alignment") track_element = cElementTree.fromstring(""" <track> <rate> <timebase>30</timebase> <ntsc>FALSE</ntsc> </rate> </track> """) context = self.adapter._Context(track_element) cut_point = self.adapter._transition_cut_point(transition_element, context) self.assertEqual(cut_point, opentime.RationalTime(557, 30)) alignment_element.text = "end-black" cut_point = self.adapter._transition_cut_point(transition_element, context) self.assertEqual(cut_point, opentime.RationalTime(557, 30)) for alignment in ("start", "start-black"): alignment_element.text = alignment cut_point = self.adapter._transition_cut_point( transition_element, context) self.assertEqual(cut_point, opentime.RationalTime(538, 30)) # TODO: Mathematically, this cut point falls at 547.5, is the rounding # down behavior "correct"? alignment_element.text = "center" cut_point = self.adapter._transition_cut_point(transition_element, context) self.assertEqual(cut_point, opentime.RationalTime(547, 30))
def otio_range_with_handles(otio_range, instance): handle_start = instance.data["handleStart"] handle_end = instance.data["handleEnd"] handles_duration = handle_start + handle_end fps = float(otio_range.start_time.rate) start = _ot.to_frames(otio_range.start_time, fps) duration = _ot.to_frames(otio_range.duration, fps) return _ot.TimeRange(start_time=_ot.RationalTime((start - handle_start), fps), duration=_ot.RationalTime( (duration + handles_duration), fps))
def range_from_frames(start, duration, fps): """ Returns otio time range. Args: start (int): frame start duration (int): frame duration fps (float): frame range Returns: otio._ot._ot.TimeRange: crated range """ return _ot.TimeRange(_ot.RationalTime(start, fps), _ot.RationalTime(duration, fps))
def test_time_from_timecode_element_implicit_ntsc(self): clipitem_element = cElementTree.fromstring(""" <clipitem> <duration>767</duration> <rate> <ntsc>TRUE</ntsc> <timebase>24</timebase> </rate> <in>447</in> <out>477</out> <start>264</start> <end>294</end> <file> <rate> <timebase>24</timebase> <ntsc>TRUE</ntsc> </rate> <duration>767</duration> <timecode> <rate> <timebase>24</timebase> </rate> <string>14:11:44:09</string> <frame>1226505</frame> <displayformat>NDF</displayformat> <source>source</source> </timecode> </file> </clipitem> """) context = self.adapter._Context(clipitem_element) timecode_element = clipitem_element.find("./file/timecode") time = self.adapter._time_from_timecode_element( timecode_element, context) self.assertEqual(time, opentime.RationalTime(1226505, 24000.0 / 1001))
def test_timeline_for_sequence(self): tree = cElementTree.parse(FCP7_XML_EXAMPLE_PATH) # Get the test sequence and pare out the track definitions to keep this # test simple. seq_elem = tree.find("sequence") seq_elem.find("./media").clear() seq_elem.find("./timecode/string").text = "01:00:00:00" seq_elem.find("./timecode/frame").text = "108000" parser = self.adapter.FCP7XMLParser(tree) context = self.adapter._Context() timeline = parser.timeline_for_sequence(seq_elem, context) # Spot-check the sequence self.assertEqual(timeline.name, "sc01_sh010_layerA") self.assertEqual(timeline.global_start_time, opentime.RationalTime(108000, 30)) # Spot check that metadata translated with a tag and a property adapter_metadata = timeline.metadata["fcp_xml"] self.assertEqual(adapter_metadata["labels"]["label2"], "Forest") self.assertEqual( adapter_metadata["@MZ.Sequence.VideoTimeDisplayFormat"], "104") # make sure the media and name tags were not included in the metadata for k in {"name", "media"}: with self.assertRaises(KeyError): adapter_metadata[k]
def test_missing_media_reference_from_element(self): file_element = cElementTree.fromstring( """ <file id="101_021_0030_FG01"> <name>101_021_0030_FG01</name> <duration>155</duration> <rate> <ntsc>FALSE</ntsc> <timebase>24</timebase> </rate> <timecode> <rate> <ntsc>FALSE</ntsc> <timebase>24</timebase> </rate> <frame>1308828</frame> <displayformat>NDF</displayformat> <string>15:08:54:12</string> <reel> <name>A173C021_181204_R207</name> </reel> </timecode> </file> """ ) parser = self.adapter.FCP7XMLParser(file_element) context = self.adapter._Context() ref = parser.media_reference_for_file_element( file_element, context=context, ) self.assertTrue(isinstance(ref, schema.MissingReference)) self.assertEqual(ref.name, "101_021_0030_FG01") self.assertEqual( ref.available_range, opentime.TimeRange( start_time=opentime.RationalTime(1308828, 24), duration=opentime.RationalTime(155, 24), ) ) # Spot-check a metadata field reelname = ref.metadata["fcp_xml"]["timecode"]["reel"]["name"] self.assertEqual(reelname, "A173C021_181204_R207")
def trim_media_range(media_range, source_range): """ Trim input media range with clip source range. Args: media_range (otio._ot._ot.TimeRange): available range of media source_range (otio._ot._ot.TimeRange): clip required range Returns: otio._ot._ot.TimeRange: trimmed media range """ rw_media_start = _ot.RationalTime( media_range.start_time.value + source_range.start_time.value, media_range.start_time.rate) rw_media_duration = _ot.RationalTime(source_range.duration.value, media_range.duration.rate) return _ot.TimeRange(rw_media_start, rw_media_duration)
def test_build_empty_file(self): media_ref = schema.MissingReference( name="test_clip_name", available_range=opentime.TimeRange( opentime.RationalTime(820489, 24), opentime.RationalTime(2087, 24), ), metadata={ "fcp_xml": { "timecode": { "rate": { "ntsc": "FALSE", "timebase": "24" }, "displayformat": "NDF", "reel": { "name": "test_reel_name", }, } } }, ) file_element = self.adapter._build_empty_file( media_ref, media_ref.available_range.start_time, br_map={}, ) self.assertEqual(file_element.find("./name").text, "test_clip_name") self.assertEqual(file_element.find("./duration").text, "2087") rate_element = file_element.find("./rate") self.assertEqual(rate_element.find("./ntsc").text, "FALSE") self.assertEqual(rate_element.find("./timebase").text, "24") tc_element = file_element.find("./timecode") self.assertEqual(tc_element.find("./rate/ntsc").text, "FALSE") self.assertEqual(tc_element.find("./rate/timebase").text, "24") self.assertEqual(tc_element.find("./string").text, "09:29:47:01") self.assertEqual(tc_element.find("./reel/name").text, "test_reel_name")
def test_time_from_timecode_element_ntsc_non_drop_frame(self): tc_element = cElementTree.fromstring(""" <timecode> <rate> <timebase>30</timebase> <ntsc>TRUE</ntsc> </rate> <string>00:59:56:12</string> <displayformat>NDF</displayformat> </timecode> """) time = self.adapter._time_from_timecode_element(tc_element) self.assertEqual(time, opentime.RationalTime(107892, (30000 / 1001.0)))
def test_time_from_timecode_element(self): tc_element = cElementTree.fromstring(""" <timecode> <rate> <timebase>30</timebase> <ntsc>FALSE</ntsc> </rate> <string>01:00:00:00</string> <frame>108000</frame> <displayformat>NDF</displayformat> </timecode> """) time = self.adapter._time_from_timecode_element(tc_element) self.assertEqual(time, opentime.RationalTime(108000, 30))
def test_hiero_flavored_xml(self): timeline = adapters.read_from_file(HIERO_XML_PATH) self.assertTrue(len(timeline.tracks), 1) self.assertTrue(timeline.tracks[0].name == 'Video 1') clips = [c for c in timeline.tracks[0].each_clip()] self.assertTrue(len(clips), 2) self.assertTrue(clips[0].name == 'A160C005_171213_R0MN') self.assertTrue(clips[1].name == '/') self.assertTrue( isinstance( clips[0].media_reference, schema.ExternalReference ) ) self.assertTrue( isinstance( clips[1].media_reference, schema.MissingReference ) ) source_range = opentime.TimeRange( start_time=opentime.RationalTime(1101071, 24), duration=opentime.RationalTime(1055, 24) ) self.assertTrue(clips[0].source_range == source_range) available_range = opentime.TimeRange( start_time=opentime.RationalTime(1101071, 24), duration=opentime.RationalTime(1055, 24) ) self.assertTrue(clips[0].available_range() == available_range) clip_1_range = clips[1].available_range() self.assertEqual( clip_1_range, opentime.TimeRange( opentime.RationalTime(), opentime.RationalTime(1, 24), ) ) # Test serialization tmp_path = tempfile.mkstemp(suffix=".xml", text=True)[1] adapters.write_to_file(timeline, tmp_path) # Similar to the test_roundtrip_disk2mem2disk above # the track name element among others will not be present in a new xml. with open(HIERO_XML_PATH, "r") as original_file: with open(tmp_path, "r") as output_file: self.assertNotEqual(original_file.read(), output_file.read())
def test_time_from_timecode_element_drop_frame(self): tc_element = cElementTree.fromstring(""" <timecode> <rate> <timebase>30</timebase> <ntsc>TRUE</ntsc> </rate> <string>10:03:00;05</string> <frame>1084319</frame> <displayformat>DF</displayformat> </timecode> """) time = self.adapter._time_from_timecode_element(tc_element) self.assertEqual(time, opentime.RationalTime(1084319, (30000 / 1001.0)))
def test_read(self): timeline = adapters.read_from_file(FCP7_XML_EXAMPLE_PATH) self.assertTrue(timeline is not None) self.assertEqual(len(timeline.tracks), 8) video_tracks = [ t for t in timeline.tracks if t.kind == schema.TrackKind.Video ] audio_tracks = [ t for t in timeline.tracks if t.kind == schema.TrackKind.Audio ] self.assertEqual(len(video_tracks), 4) self.assertEqual(len(audio_tracks), 4) video_clip_names = (("", 'sc01_sh010_anim.mov'), ("", 'sc01_sh010_anim.mov', "", 'sc01_sh020_anim.mov', 'sc01_sh030_anim.mov', 'Cross Dissolve', "", 'sc01_sh010_anim'), ("", 'test_title'), ("", 'sc01_master_layerA_sh030_temp.mov', 'Cross Dissolve', 'sc01_sh010_anim.mov')) for n, track in enumerate(video_tracks): self.assertTupleEqual(tuple(c.name for c in track), video_clip_names[n]) audio_clip_names = (("", 'sc01_sh010_anim.mov', "", 'sc01_sh010_anim.mov'), ("", 'sc01_placeholder.wav', "", 'sc01_sh010_anim'), ("", 'track_08.wav'), ("", 'sc01_master_layerA_sh030_temp.mov', 'sc01_sh010_anim.mov')) for n, track in enumerate(audio_tracks): self.assertTupleEqual(tuple(c.name for c in track), audio_clip_names[n]) video_clip_durations = (((536, 30.0), (100, 30.0)), ((13, 30.0), (100, 30.0), (52, 30.0), (157, 30.0), (235, 30.0), ((19, 30.0), (0, 30.0)), (79, 30.0), (320, 30.0)), ((15, 30.0), (941, 30.0)), ((956, 30.0), (208, 30.0), ((12, 30.0), (13, 30.0)), (82, 30.0))) for t, track in enumerate(video_tracks): for c, clip in enumerate(track): if isinstance(clip, schema.Transition): self.assertEqual( clip.in_offset, opentime.RationalTime(*video_clip_durations[t][c][0])) self.assertEqual( clip.out_offset, opentime.RationalTime(*video_clip_durations[t][c][1])) else: self.assertEqual( clip.source_range.duration, opentime.RationalTime(*video_clip_durations[t][c])) audio_clip_durations = (((13, 30.0), (100, 30.0), (423, 30.0), (100, 30.0), (423, 30.0)), ((335, 30.0), (170, 30.0), (131, 30.0), (294, 30.0), (34, 30.0), (124, 30.0)), ((153, 30.0), (198, 30.0)), ((956, 30.0), (221, 30.0), (94, 30.0))) for t, track in enumerate(audio_tracks): for c, clip in enumerate(track): self.assertEqual( clip.source_range.duration, opentime.RationalTime(*audio_clip_durations[t][c])) timeline_marker_names = ('My MArker 1', 'dsf', "") for n, marker in enumerate(timeline.tracks.markers): self.assertEqual(marker.name, timeline_marker_names[n]) timeline_marker_start_times = ((113, 30.0), (492, 30.0), (298, 30.0)) for n, marker in enumerate(timeline.tracks.markers): self.assertEqual( marker.marked_range.start_time, opentime.RationalTime(*timeline_marker_start_times[n])) timeline_marker_comments = ('so, this happened', 'fsfsfs', None) for n, marker in enumerate(timeline.tracks.markers): self.assertEqual( marker.metadata.get('fcp_xml', {}).get('comment'), timeline_marker_comments[n]) clip_with_marker = video_tracks[1][4] clip_marker = clip_with_marker.markers[0] self.assertEqual(clip_marker.name, "") self.assertEqual(clip_marker.marked_range.start_time, opentime.RationalTime(73, 30.0)) self.assertEqual( clip_marker.metadata.get('fcp_xml', {}).get('comment'), None)
def create_compound_clip(clip_data, name, folder): """ Convert timeline object into nested timeline object Args: clip_data (dict): timeline item object packed into dict with project, timeline (sequence) folder (resolve.MediaPool.Folder): media pool folder object, name (str): name for compound clip Returns: resolve.MediaPoolItem: media pool item with compound clip timeline(cct) """ # get basic objects form data project = clip_data["project"] timeline = clip_data["timeline"] clip = clip_data["clip"] # get details of objects clip_item = clip["item"] mp = project.GetMediaPool() # get clip attributes clip_attributes = get_clip_attributes(clip_item) mp_item = clip_item.GetMediaPoolItem() mp_props = mp_item.GetClipProperty() mp_first_frame = int(mp_props["Start"]) mp_last_frame = int(mp_props["End"]) # initialize basic source timing for otio ci_l_offset = clip_item.GetLeftOffset() ci_duration = clip_item.GetDuration() rate = float(mp_props["FPS"]) # source rational times mp_in_rc = opentime.RationalTime((ci_l_offset), rate) mp_out_rc = opentime.RationalTime((ci_l_offset + ci_duration - 1), rate) # get frame in and out for clip swaping in_frame = opentime.to_frames(mp_in_rc) out_frame = opentime.to_frames(mp_out_rc) # keep original sequence tl_origin = timeline # Set current folder to input media_pool_folder: mp.SetCurrentFolder(folder) # check if clip doesnt exist already: clips = folder.GetClipList() cct = next((c for c in clips if c.GetName() in name), None) if cct: print(f"_ cct exists: {cct}") else: # Create empty timeline in current folder and give name: cct = mp.CreateEmptyTimeline(name) # check if clip doesnt exist already: clips = folder.GetClipList() cct = next((c for c in clips if c.GetName() in name), None) print(f"_ cct created: {cct}") with maintain_current_timeline(cct, tl_origin): # Add input clip to the current timeline: mp.AppendToTimeline([{ "mediaPoolItem": mp_item, "startFrame": mp_first_frame, "endFrame": mp_last_frame }]) # Add collected metadata and attributes to the comound clip: if mp_item.GetMetadata(self.pype_tag_name): clip_attributes[self.pype_tag_name] = mp_item.GetMetadata( self.pype_tag_name)[self.pype_tag_name] # stringify clip_attributes = json.dumps(clip_attributes) # add attributes to metadata for k, v in mp_item.GetMetadata().items(): cct.SetMetadata(k, v) # add metadata to cct cct.SetMetadata(self.pype_tag_name, clip_attributes) # reset start timecode of the compound clip cct.SetClipProperty("Start TC", mp_props["Start TC"]) # swap clips on timeline swap_clips(clip_item, cct, in_frame, out_frame) cct.SetClipColor("Pink") return cct
def test_roundtrip_mem2disk2mem(self): timeline = schema.Timeline('test_timeline') RATE = 48.0 video_reference = schema.ExternalReference( target_url="/var/tmp/test1.mov", available_range=opentime.TimeRange( opentime.RationalTime(value=100, rate=RATE), opentime.RationalTime(value=1000, rate=RATE))) video_reference.name = "test_vid_one" audio_reference = schema.ExternalReference( target_url="/var/tmp/test1.wav", available_range=opentime.TimeRange( opentime.RationalTime(value=0, rate=RATE), opentime.RationalTime(value=1000, rate=RATE)), ) audio_reference.name = "test_wav_one" generator_reference = schema.GeneratorReference( name="Color", metadata={ "fcp_xml": { "effectid": "Color", "effectcategory": "Matte", "effecttype": "generator", "mediatype": "video", "parameter": { "@authoringApp": "PremierePro", "parameterid": "fillcolor", "name": "Color", "value": { "alpha": "0", "red": "255", "green": "255", "blue": "255", }, }, }, }, ) v0 = schema.Track(kind=schema.track.TrackKind.Video) v1 = schema.Track(kind=schema.track.TrackKind.Video) timeline.tracks.extend([v0, v1]) a0 = schema.Track(kind=schema.track.TrackKind.Audio) timeline.tracks.append(a0) v0.extend([ schema.Clip(name='test_clip1', media_reference=video_reference, source_range=opentime.TimeRange( opentime.RationalTime(value=112, rate=RATE), opentime.RationalTime(value=40, rate=RATE))), schema.Gap(source_range=opentime.TimeRange( duration=opentime.RationalTime(value=60, rate=RATE))), schema.Clip(name='test_clip2', media_reference=video_reference, source_range=opentime.TimeRange( opentime.RationalTime(value=123, rate=RATE), opentime.RationalTime(value=260, rate=RATE))), schema.Clip(name='test_generator_clip', media_reference=generator_reference, source_range=opentime.TimeRange( opentime.RationalTime(value=292, rate=24.0), opentime.RationalTime(value=183, rate=24.0))), ]) v1.extend([ schema.Gap(source_range=opentime.TimeRange( duration=opentime.RationalTime(value=500, rate=RATE))), schema.Clip(name='test_clip3', media_reference=video_reference, source_range=opentime.TimeRange( opentime.RationalTime(value=112, rate=RATE), opentime.RationalTime(value=55, rate=RATE))) ]) a0.extend([ schema.Gap(source_range=opentime.TimeRange( duration=opentime.RationalTime(value=10, rate=RATE))), schema.Clip( name='test_clip4', media_reference=audio_reference, source_range=opentime.TimeRange( opentime.RationalTime(value=152, rate=RATE), opentime.RationalTime(value=248, rate=RATE)), ) ]) timeline.tracks.markers.append( schema.Marker(name='test_timeline_marker', marked_range=opentime.TimeRange( opentime.RationalTime(123, RATE)), metadata={'fcp_xml': { 'comment': 'my_comment' }})) v1[1].markers.append( schema.Marker(name='test_clip_marker', marked_range=opentime.TimeRange( opentime.RationalTime(125, RATE)), metadata={'fcp_xml': { 'comment': 'my_comment' }})) # make sure that global_start_time.rate survives the round trip timeline.global_start_time = opentime.RationalTime(100, RATE) result = adapters.write_to_string(timeline, adapter_name='fcp_xml') new_timeline = adapters.read_from_string(result, adapter_name='fcp_xml') # Since FCP XML's "sequence" is a marriage of the timeline and the # main tracks stack, the tracks stack loses its name new_timeline.tracks.name = timeline.tracks.name self.assertEqual(new_timeline.name, 'test_timeline') # Before comparing, scrub ignorable metadata introduced in # serialization (things like unique ids minted by the adapter) # Since we seeded metadata for the generator, keep that metadata del (new_timeline.metadata["fcp_xml"]) for child in new_timeline.tracks.each_child(): try: del (child.metadata["fcp_xml"]) except KeyError: pass try: is_generator = isinstance(child.media_reference, schema.GeneratorReference) if not is_generator: del (child.media_reference.metadata["fcp_xml"]) except (AttributeError, KeyError): pass self.assertJsonEqual(new_timeline, timeline)
def test_generator_for_element(self): generator_element = cElementTree.fromstring(""" <generatoritem id="clipitem-29"> <name>White</name> <enabled>TRUE</enabled> <duration>1035764</duration> <start>383</start> <end>432</end> <in>86313</in> <out>86362</out> <rate> <timebase>24</timebase> <ntsc>TRUE</ntsc> </rate> <effect> <name>Color</name> <effectid>Color</effectid> <effectcategory>Matte</effectcategory> <effecttype>generator</effecttype> <mediatype>video</mediatype> <parameter authoringApp="PremierePro"> <parameterid>fillcolor</parameterid> <name>Color</name> <value> <alpha>0</alpha> <red>255</red> <green>255</green> <blue>255</blue> </value> </parameter> </effect> </generatoritem> """) parent_context_element = cElementTree.fromstring(""" <track> <rate> <timebase>24</timebase> <ntsc>TRUE</ntsc> </rate> </track> """) context = self.adapter._Context(parent_context_element) # Make a parser parser = self.adapter.FCP7XMLParser(generator_element) clip, time_range = parser.item_and_timing_for_element( generator_element, head_transition=None, tail_transition=None, context=context, ) self.assertEqual(clip.name, "White") expected_range = opentime.TimeRange( start_time=opentime.RationalTime(383, (24000 / 1001.0)), duration=opentime.RationalTime(49, (24000 / 1001.0)), ) self.assertEqual(time_range, expected_range) expected_source_range = opentime.TimeRange( start_time=opentime.RationalTime(86313, (24000 / 1001.0)), duration=opentime.RationalTime(49, (24000 / 1001.0)), ) self.assertEqual(clip.source_range, expected_source_range) ref = clip.media_reference self.assertTrue(isinstance(ref, schema.GeneratorReference)) self.assertEqual(ref.name, "Color") self.assertEqual(ref.metadata["fcp_xml"]["parameter"]["value"]["red"], "255")
def test_media_reference_from_element(self): file_element = cElementTree.fromstring(""" <file id="file-3"> <name>sc01_sh030_anim.mov</name> <pathurl>file:///Scratch/media/sc01_sh030_anim.2.mov</pathurl> <rate> <timebase>30</timebase> <ntsc>FALSE</ntsc> </rate> <duration>400</duration> <timecode> <rate> <timebase>30</timebase> <ntsc>FALSE</ntsc> </rate> <string>01:00:00:00</string> <frame>108000</frame> <displayformat>NDF</displayformat> <reel> <name/> </reel> </timecode> <media> <video> <samplecharacteristics> <rate> <timebase>30</timebase> <ntsc>FALSE</ntsc> </rate> <width>1280</width> <height>720</height> <anamorphic>FALSE</anamorphic> <pixelaspectratio>square</pixelaspectratio> <fielddominance>none</fielddominance> </samplecharacteristics> </video> <audio> <samplecharacteristics> <depth>16</depth> <samplerate>48000</samplerate> </samplecharacteristics> <channelcount>2</channelcount> </audio> </media> </file> """) parser = self.adapter.FCP7XMLParser(file_element) context = self.adapter._Context() ref = parser.media_reference_for_file_element( file_element, context=context, ) self.assertEqual(ref.target_url, "file:///Scratch/media/sc01_sh030_anim.2.mov") self.assertEqual(ref.name, "sc01_sh030_anim.mov") self.assertEqual( ref.available_range, opentime.TimeRange( start_time=opentime.RationalTime(108000, 30), duration=opentime.RationalTime(400, 30), )) # Spot-check a metadata field video_metadata = ref.metadata["fcp_xml"]["media"]["video"] self.assertEqual(video_metadata["samplecharacteristics"]["height"], "720")
def process(self, instance): data = dict() self.log.debug("__ instance.data: {}".format(instance.data)) # Timeline data. handle_start = instance.data["handleStart"] handle_end = instance.data["handleEnd"] source_in_h = instance.data("sourceInH", instance.data("sourceIn") - handle_start) source_out_h = instance.data("sourceOutH", instance.data("sourceOut") + handle_end) timeline_in = instance.data["clipIn"] timeline_out = instance.data["clipOut"] # set frame start with tag or take it from timeline frame_start = instance.data.get("startingFrame") if not frame_start: frame_start = timeline_in source = instance.data.get("source") otio_data = dict() self.log.debug("__ source: `{}`".format(source)) rate_fps = instance.context.data["fps"] otio_in_h_ratio = otio_ot.RationalTime( value=(source.timecodeStart() + (source_in_h + (source_out_h - source_in_h))), rate=rate_fps) otio_out_h_ratio = otio_ot.RationalTime(value=(source.timecodeStart() + source_in_h), rate=rate_fps) otio_timeline_in_ratio = otio_ot.RationalTime( value=int(instance.data.get("timelineTimecodeStart", 0)) + timeline_in, rate=rate_fps) otio_timeline_out_ratio = otio_ot.RationalTime( value=int(instance.data.get("timelineTimecodeStart", 0)) + timeline_out, rate=rate_fps) otio_data.update({ "otioClipInHTimecode": otio_ot.to_timecode(otio_in_h_ratio), "otioClipOutHTimecode": otio_ot.to_timecode(otio_out_h_ratio), "otioClipInHSec": otio_ot.to_seconds(otio_in_h_ratio), "otioClipOutHSec": otio_ot.to_seconds(otio_out_h_ratio), "otioTimelineInTimecode": otio_ot.to_timecode(otio_timeline_in_ratio), "otioTimelineOutTimecode": otio_ot.to_timecode(otio_timeline_out_ratio), "otioTimelineInSec": otio_ot.to_seconds(otio_timeline_in_ratio), "otioTimelineOutSec": otio_ot.to_seconds(otio_timeline_out_ratio) }) data.update({ "otioData": otio_data, "sourceTimecodeIn": otio_ot.to_timecode(otio_in_h_ratio), "sourceTimecodeOut": otio_ot.to_timecode(otio_out_h_ratio) }) instance.data.update(data) self.log.debug("data: {}".format(instance.data))
def main(srcpath, outfile): ''' Find all the files in a path and extract the julian date of any pictures found ''' srcpath = '/'.join(srcpath.split('\\')) candidate_files = [] print "Scanning directory: ", srcpath for (dirpath, dirnames, filenames) in walk(srcpath): for file in filenames: fullpath = os.path.join(dirpath, file) candidate_files.append('/'.join(fullpath.split('\\'))) break # only ingest the first directory found. TODO: make a track per directory refs = [] print "Reading EXIF data" file_count = 0 max_files = 1000 for path in candidate_files: fh = open(path, "rb") tags = exifread.process_file(fh, details=False) fh.close() if 'EXIF DateTimeOriginal' in tags: rpath = os.path.relpath(path, srcpath) ref = ImageRef(path.split('/')[-1], rpath) datestr = tags['EXIF DateTimeOriginal'].values datestrs = datestr.split(' ') cal = datestrs[0].split(':') hr = datestrs[1].split(':') dto = datetime.datetime(int(cal[0]), int(cal[1]), int(cal[2]), int(hr[0]), int(hr[1]), int(hr[2])) julian = jdutil.datetime_to_jd(dto) ref.time_stamp = julian if 'EXIF ExposureTime' in tags: et = tags['EXIF ExposureTime'].values[0] ref.exposure_time = float(et.num) / float(et.den) else: ref.exposure_time = 1.0 / 100.0 # arbitrary refs.append(ref) file_count += 1 if file_count > max_files: break refs.sort() epoch = refs[0].time_stamp for ref in refs: print(ref.time_stamp - epoch) * 24 * 3600, ref.path timeline = otio.schema.Timeline() timeline.name = "Photos" # TODO pass the time line name track = otio.schema.Sequence() track.name = "Photo track" # TODO make a track per day track.metadata = {"epoch": epoch} timeline.tracks.append(track) for i, ref in enumerate(refs): next_i = min(i + 1, len(refs) - 1) ts = (ref.time_stamp - epoch) * 24.0 * 3600.0 # seconds ts_next = (refs[next_i].time_stamp - epoch) * 24.0 * 3600.0 duration = ts_next - ts # exposure time is already in seconds image_time = opentime.TimeRange( opentime.RationalTime(ts, 1), opentime.RationalTime(ref.exposure_time, 1.0)) media_reference = otio.media_reference.External( target_url="file://" + ref.path, available_range=image_time) media_reference.name = ref.name clip = otio.schema.Clip(name=ref.name) clip.media_reference = media_reference clip.source_range = opentime.TimeRange( opentime.RationalTime(0, 1.0), opentime.RationalTime(duration, 1.0)) track.append(clip) otio.adapters.write_to_file(timeline, outfile)