def test_clip_for_element(self):
        tree = cElementTree.parse(FCP7_XML_EXAMPLE_PATH)

        # Use clipitem-3 because it's self-contained and doesn't reference
        # other elements
        sequence_elem = tree.find(".//clipitem[@id='clipitem-3']/../../../..")
        clip_elem = tree.find(".//clipitem[@id='clipitem-3']")
        context = self.adapter._Context(sequence_elem)

        # Make a parser
        parser = self.adapter.FCP7XMLParser(tree)

        clip, time_range = parser.item_and_timing_for_element(
            clip_elem,
            head_transition=None,
            tail_transition=None,
            context=context,
        )

        self.assertEqual(clip.name, "sc01_sh020_anim.mov")

        expected_range = opentime.TimeRange(
            start_time=opentime.RationalTime(165, 30),
            duration=opentime.RationalTime(157, 30),
        )
        self.assertEqual(time_range, expected_range)

        expected_range = opentime.TimeRange(
            start_time=opentime.RationalTime(0, 30),
            duration=opentime.RationalTime(157, 30),
        )
        self.assertEqual(clip.source_range, expected_range)
    def test_hiero_flavored_xml(self):
        timeline = adapters.read_from_file(HIERO_XML_PATH)
        self.assertTrue(len(timeline.tracks), 1)
        self.assertTrue(timeline.tracks[0].name == 'Video 1')

        clips = [c for c in timeline.tracks[0].each_clip()]
        self.assertTrue(len(clips), 2)

        self.assertTrue(clips[0].name == 'A160C005_171213_R0MN')
        self.assertTrue(clips[1].name == '/')

        self.assertTrue(
            isinstance(
                clips[0].media_reference,
                schema.ExternalReference
            )
        )

        self.assertTrue(
            isinstance(
                clips[1].media_reference,
                schema.MissingReference
            )
        )

        source_range = opentime.TimeRange(
            start_time=opentime.RationalTime(1101071, 24),
            duration=opentime.RationalTime(1055, 24)
        )
        self.assertTrue(clips[0].source_range == source_range)

        available_range = opentime.TimeRange(
            start_time=opentime.RationalTime(1101071, 24),
            duration=opentime.RationalTime(1055, 24)
        )
        self.assertTrue(clips[0].available_range() == available_range)

        clip_1_range = clips[1].available_range()
        self.assertEqual(
            clip_1_range,
            opentime.TimeRange(
                opentime.RationalTime(),
                opentime.RationalTime(1, 24),
            )
        )

        # Test serialization
        tmp_path = tempfile.mkstemp(suffix=".xml", text=True)[1]
        adapters.write_to_file(timeline, tmp_path)

        # Similar to the test_roundtrip_disk2mem2disk above
        # the track name element among others will not be present in a new xml.
        with open(HIERO_XML_PATH, "r") as original_file:
            with open(tmp_path, "r") as output_file:
                self.assertNotEqual(original_file.read(), output_file.read())
    def test_marker_for_element(self):
        marker_element = cElementTree.fromstring(
            """
            <marker>
              <comment>so, this happened</comment>
              <name>My MArker 1</name>
              <in>113</in>
              <out>-1</out>
            </marker>
            """
        )

        marker = self.adapter.marker_for_element(marker_element, 30)

        self.assertEqual(marker.name, "My MArker 1")
        self.assertEqual(
            marker.marked_range,
            opentime.TimeRange(
                start_time=opentime.RationalTime(113, 30),
                duration=opentime.RationalTime(0, 30),
            )
        )
        self.assertEqual(
            marker.metadata["fcp_xml"]["comment"], "so, this happened"
        )
        with self.assertRaises(KeyError):
            marker.metadata["fcp_xml"]["name"]
Beispiel #4
0
def otio_range_with_handles(otio_range, instance):
    handle_start = instance.data["handleStart"]
    handle_end = instance.data["handleEnd"]
    handles_duration = handle_start + handle_end
    fps = float(otio_range.start_time.rate)
    start = _ot.to_frames(otio_range.start_time, fps)
    duration = _ot.to_frames(otio_range.duration, fps)

    return _ot.TimeRange(start_time=_ot.RationalTime((start - handle_start),
                                                     fps),
                         duration=_ot.RationalTime(
                             (duration + handles_duration), fps))
Beispiel #5
0
def range_from_frames(start, duration, fps):
    """
    Returns otio time range.

    Args:
        start (int): frame start
        duration (int): frame duration
        fps (float): frame range

    Returns:
        otio._ot._ot.TimeRange: crated range

    """
    return _ot.TimeRange(_ot.RationalTime(start, fps),
                         _ot.RationalTime(duration, fps))
    def test_missing_media_reference_from_element(self):
        file_element = cElementTree.fromstring(
            """
            <file id="101_021_0030_FG01">
              <name>101_021_0030_FG01</name>
              <duration>155</duration>
              <rate>
                <ntsc>FALSE</ntsc>
                <timebase>24</timebase>
              </rate>
              <timecode>
                <rate>
                  <ntsc>FALSE</ntsc>
                  <timebase>24</timebase>
                </rate>
                <frame>1308828</frame>
                <displayformat>NDF</displayformat>
                <string>15:08:54:12</string>
                <reel>
                  <name>A173C021_181204_R207</name>
                </reel>
              </timecode>
            </file>
            """
        )

        parser = self.adapter.FCP7XMLParser(file_element)
        context = self.adapter._Context()
        ref = parser.media_reference_for_file_element(
            file_element,
            context=context,
        )

        self.assertTrue(isinstance(ref, schema.MissingReference))
        self.assertEqual(ref.name, "101_021_0030_FG01")
        self.assertEqual(
            ref.available_range,
            opentime.TimeRange(
                start_time=opentime.RationalTime(1308828, 24),
                duration=opentime.RationalTime(155, 24),
            )
        )

        # Spot-check a metadata field
        reelname = ref.metadata["fcp_xml"]["timecode"]["reel"]["name"]
        self.assertEqual(reelname, "A173C021_181204_R207")
Beispiel #7
0
def trim_media_range(media_range, source_range):
    """
    Trim input media range with clip source range.

    Args:
        media_range (otio._ot._ot.TimeRange): available range of media
        source_range (otio._ot._ot.TimeRange): clip required range

    Returns:
        otio._ot._ot.TimeRange: trimmed media range

    """
    rw_media_start = _ot.RationalTime(
        media_range.start_time.value + source_range.start_time.value,
        media_range.start_time.rate)
    rw_media_duration = _ot.RationalTime(source_range.duration.value,
                                         media_range.duration.rate)
    return _ot.TimeRange(rw_media_start, rw_media_duration)
    def test_build_empty_file(self):
        media_ref = schema.MissingReference(
            name="test_clip_name",
            available_range=opentime.TimeRange(
                opentime.RationalTime(820489, 24),
                opentime.RationalTime(2087, 24),
            ),
            metadata={
                "fcp_xml": {
                    "timecode": {
                        "rate": {
                            "ntsc": "FALSE",
                            "timebase": "24"
                        },
                        "displayformat": "NDF",
                        "reel": {
                            "name": "test_reel_name",
                        },
                    }
                }
            },
        )

        file_element = self.adapter._build_empty_file(
            media_ref,
            media_ref.available_range.start_time,
            br_map={},
        )

        self.assertEqual(file_element.find("./name").text, "test_clip_name")
        self.assertEqual(file_element.find("./duration").text, "2087")

        rate_element = file_element.find("./rate")
        self.assertEqual(rate_element.find("./ntsc").text, "FALSE")
        self.assertEqual(rate_element.find("./timebase").text, "24")

        tc_element = file_element.find("./timecode")
        self.assertEqual(tc_element.find("./rate/ntsc").text, "FALSE")
        self.assertEqual(tc_element.find("./rate/timebase").text, "24")
        self.assertEqual(tc_element.find("./string").text, "09:29:47:01")
        self.assertEqual(tc_element.find("./reel/name").text, "test_reel_name")
    def test_roundtrip_mem2disk2mem(self):
        timeline = schema.Timeline('test_timeline')

        RATE = 48.0

        video_reference = schema.ExternalReference(
            target_url="/var/tmp/test1.mov",
            available_range=opentime.TimeRange(
                opentime.RationalTime(value=100, rate=RATE),
                opentime.RationalTime(value=1000, rate=RATE)))
        video_reference.name = "test_vid_one"
        audio_reference = schema.ExternalReference(
            target_url="/var/tmp/test1.wav",
            available_range=opentime.TimeRange(
                opentime.RationalTime(value=0, rate=RATE),
                opentime.RationalTime(value=1000, rate=RATE)),
        )
        audio_reference.name = "test_wav_one"
        generator_reference = schema.GeneratorReference(
            name="Color",
            metadata={
                "fcp_xml": {
                    "effectid": "Color",
                    "effectcategory": "Matte",
                    "effecttype": "generator",
                    "mediatype": "video",
                    "parameter": {
                        "@authoringApp": "PremierePro",
                        "parameterid": "fillcolor",
                        "name": "Color",
                        "value": {
                            "alpha": "0",
                            "red": "255",
                            "green": "255",
                            "blue": "255",
                        },
                    },
                },
            },
        )

        v0 = schema.Track(kind=schema.track.TrackKind.Video)
        v1 = schema.Track(kind=schema.track.TrackKind.Video)

        timeline.tracks.extend([v0, v1])

        a0 = schema.Track(kind=schema.track.TrackKind.Audio)

        timeline.tracks.append(a0)

        v0.extend([
            schema.Clip(name='test_clip1',
                        media_reference=video_reference,
                        source_range=opentime.TimeRange(
                            opentime.RationalTime(value=112, rate=RATE),
                            opentime.RationalTime(value=40, rate=RATE))),
            schema.Gap(source_range=opentime.TimeRange(
                duration=opentime.RationalTime(value=60, rate=RATE))),
            schema.Clip(name='test_clip2',
                        media_reference=video_reference,
                        source_range=opentime.TimeRange(
                            opentime.RationalTime(value=123, rate=RATE),
                            opentime.RationalTime(value=260, rate=RATE))),
            schema.Clip(name='test_generator_clip',
                        media_reference=generator_reference,
                        source_range=opentime.TimeRange(
                            opentime.RationalTime(value=292, rate=24.0),
                            opentime.RationalTime(value=183, rate=24.0))),
        ])

        v1.extend([
            schema.Gap(source_range=opentime.TimeRange(
                duration=opentime.RationalTime(value=500, rate=RATE))),
            schema.Clip(name='test_clip3',
                        media_reference=video_reference,
                        source_range=opentime.TimeRange(
                            opentime.RationalTime(value=112, rate=RATE),
                            opentime.RationalTime(value=55, rate=RATE)))
        ])

        a0.extend([
            schema.Gap(source_range=opentime.TimeRange(
                duration=opentime.RationalTime(value=10, rate=RATE))),
            schema.Clip(
                name='test_clip4',
                media_reference=audio_reference,
                source_range=opentime.TimeRange(
                    opentime.RationalTime(value=152, rate=RATE),
                    opentime.RationalTime(value=248, rate=RATE)),
            )
        ])

        timeline.tracks.markers.append(
            schema.Marker(name='test_timeline_marker',
                          marked_range=opentime.TimeRange(
                              opentime.RationalTime(123, RATE)),
                          metadata={'fcp_xml': {
                              'comment': 'my_comment'
                          }}))

        v1[1].markers.append(
            schema.Marker(name='test_clip_marker',
                          marked_range=opentime.TimeRange(
                              opentime.RationalTime(125, RATE)),
                          metadata={'fcp_xml': {
                              'comment': 'my_comment'
                          }}))

        # make sure that global_start_time.rate survives the round trip
        timeline.global_start_time = opentime.RationalTime(100, RATE)

        result = adapters.write_to_string(timeline, adapter_name='fcp_xml')
        new_timeline = adapters.read_from_string(result,
                                                 adapter_name='fcp_xml')

        # Since FCP XML's "sequence" is a marriage of the timeline and the
        # main tracks stack, the tracks stack loses its name
        new_timeline.tracks.name = timeline.tracks.name

        self.assertEqual(new_timeline.name, 'test_timeline')

        # Before comparing, scrub ignorable metadata introduced in
        # serialization (things like unique ids minted by the adapter)
        # Since we seeded metadata for the generator, keep that metadata
        del (new_timeline.metadata["fcp_xml"])
        for child in new_timeline.tracks.each_child():
            try:
                del (child.metadata["fcp_xml"])
            except KeyError:
                pass

            try:
                is_generator = isinstance(child.media_reference,
                                          schema.GeneratorReference)
                if not is_generator:
                    del (child.media_reference.metadata["fcp_xml"])
            except (AttributeError, KeyError):
                pass

        self.assertJsonEqual(new_timeline, timeline)
    def test_generator_for_element(self):
        generator_element = cElementTree.fromstring("""
            <generatoritem id="clipitem-29">
              <name>White</name>
              <enabled>TRUE</enabled>
              <duration>1035764</duration>
              <start>383</start>
              <end>432</end>
              <in>86313</in>
              <out>86362</out>
              <rate>
                <timebase>24</timebase>
                <ntsc>TRUE</ntsc>
              </rate>
              <effect>
                <name>Color</name>
                <effectid>Color</effectid>
                <effectcategory>Matte</effectcategory>
                <effecttype>generator</effecttype>
                <mediatype>video</mediatype>
                <parameter authoringApp="PremierePro">
                  <parameterid>fillcolor</parameterid>
                  <name>Color</name>
                  <value>
                    <alpha>0</alpha>
                    <red>255</red>
                    <green>255</green>
                    <blue>255</blue>
                  </value>
                </parameter>
              </effect>
            </generatoritem>
            """)
        parent_context_element = cElementTree.fromstring("""
            <track>
              <rate>
                <timebase>24</timebase>
                <ntsc>TRUE</ntsc>
              </rate>
            </track>
            """)

        context = self.adapter._Context(parent_context_element)

        # Make a parser
        parser = self.adapter.FCP7XMLParser(generator_element)

        clip, time_range = parser.item_and_timing_for_element(
            generator_element,
            head_transition=None,
            tail_transition=None,
            context=context,
        )

        self.assertEqual(clip.name, "White")

        expected_range = opentime.TimeRange(
            start_time=opentime.RationalTime(383, (24000 / 1001.0)),
            duration=opentime.RationalTime(49, (24000 / 1001.0)),
        )
        self.assertEqual(time_range, expected_range)

        expected_source_range = opentime.TimeRange(
            start_time=opentime.RationalTime(86313, (24000 / 1001.0)),
            duration=opentime.RationalTime(49, (24000 / 1001.0)),
        )
        self.assertEqual(clip.source_range, expected_source_range)

        ref = clip.media_reference
        self.assertTrue(isinstance(ref, schema.GeneratorReference))
        self.assertEqual(ref.name, "Color")
        self.assertEqual(ref.metadata["fcp_xml"]["parameter"]["value"]["red"],
                         "255")
    def test_media_reference_from_element(self):
        file_element = cElementTree.fromstring("""
            <file id="file-3">
              <name>sc01_sh030_anim.mov</name>
              <pathurl>file:///Scratch/media/sc01_sh030_anim.2.mov</pathurl>
              <rate>
                <timebase>30</timebase>
                <ntsc>FALSE</ntsc>
              </rate>
              <duration>400</duration>
              <timecode>
                <rate>
                  <timebase>30</timebase>
                  <ntsc>FALSE</ntsc>
                </rate>
                <string>01:00:00:00</string>
                <frame>108000</frame>
                <displayformat>NDF</displayformat>
                <reel>
                  <name/>
                </reel>
              </timecode>
              <media>
                <video>
                  <samplecharacteristics>
                    <rate>
                      <timebase>30</timebase>
                      <ntsc>FALSE</ntsc>
                    </rate>
                    <width>1280</width>
                    <height>720</height>
                    <anamorphic>FALSE</anamorphic>
                    <pixelaspectratio>square</pixelaspectratio>
                    <fielddominance>none</fielddominance>
                  </samplecharacteristics>
                </video>
                <audio>
                  <samplecharacteristics>
                    <depth>16</depth>
                    <samplerate>48000</samplerate>
                  </samplecharacteristics>
                  <channelcount>2</channelcount>
                </audio>
              </media>
            </file>
            """)

        parser = self.adapter.FCP7XMLParser(file_element)
        context = self.adapter._Context()
        ref = parser.media_reference_for_file_element(
            file_element,
            context=context,
        )

        self.assertEqual(ref.target_url,
                         "file:///Scratch/media/sc01_sh030_anim.2.mov")
        self.assertEqual(ref.name, "sc01_sh030_anim.mov")
        self.assertEqual(
            ref.available_range,
            opentime.TimeRange(
                start_time=opentime.RationalTime(108000, 30),
                duration=opentime.RationalTime(400, 30),
            ))

        # Spot-check a metadata field
        video_metadata = ref.metadata["fcp_xml"]["media"]["video"]
        self.assertEqual(video_metadata["samplecharacteristics"]["height"],
                         "720")
Beispiel #12
0
def main(srcpath, outfile):
    '''
    Find all the files in a path and extract the julian date of any pictures found
    '''
    srcpath = '/'.join(srcpath.split('\\'))
    candidate_files = []

    print "Scanning directory: ", srcpath

    for (dirpath, dirnames, filenames) in walk(srcpath):
        for file in filenames:
            fullpath = os.path.join(dirpath, file)
            candidate_files.append('/'.join(fullpath.split('\\')))
        break  # only ingest the first directory found. TODO: make a track per directory

    refs = []

    print "Reading EXIF data"

    file_count = 0
    max_files = 1000

    for path in candidate_files:
        fh = open(path, "rb")
        tags = exifread.process_file(fh, details=False)
        fh.close()

        if 'EXIF DateTimeOriginal' in tags:
            rpath = os.path.relpath(path, srcpath)
            ref = ImageRef(path.split('/')[-1], rpath)

            datestr = tags['EXIF DateTimeOriginal'].values
            datestrs = datestr.split(' ')
            cal = datestrs[0].split(':')
            hr = datestrs[1].split(':')
            dto = datetime.datetime(int(cal[0]), int(cal[1]), int(cal[2]),
                                    int(hr[0]), int(hr[1]), int(hr[2]))
            julian = jdutil.datetime_to_jd(dto)

            ref.time_stamp = julian

            if 'EXIF ExposureTime' in tags:
                et = tags['EXIF ExposureTime'].values[0]
                ref.exposure_time = float(et.num) / float(et.den)
            else:
                ref.exposure_time = 1.0 / 100.0  # arbitrary

            refs.append(ref)

        file_count += 1
        if file_count > max_files:
            break

    refs.sort()

    epoch = refs[0].time_stamp

    for ref in refs:
        print(ref.time_stamp - epoch) * 24 * 3600, ref.path

    timeline = otio.schema.Timeline()
    timeline.name = "Photos"  # TODO pass the time line name
    track = otio.schema.Sequence()
    track.name = "Photo track"  # TODO make a track per day
    track.metadata = {"epoch": epoch}
    timeline.tracks.append(track)

    for i, ref in enumerate(refs):
        next_i = min(i + 1, len(refs) - 1)
        ts = (ref.time_stamp - epoch) * 24.0 * 3600.0  # seconds
        ts_next = (refs[next_i].time_stamp - epoch) * 24.0 * 3600.0
        duration = ts_next - ts

        # exposure time is already in seconds
        image_time = opentime.TimeRange(
            opentime.RationalTime(ts, 1),
            opentime.RationalTime(ref.exposure_time, 1.0))

        media_reference = otio.media_reference.External(
            target_url="file://" + ref.path, available_range=image_time)
        media_reference.name = ref.name

        clip = otio.schema.Clip(name=ref.name)
        clip.media_reference = media_reference
        clip.source_range = opentime.TimeRange(
            opentime.RationalTime(0, 1.0),
            opentime.RationalTime(duration, 1.0))
        track.append(clip)

    otio.adapters.write_to_file(timeline, outfile)