예제 #1
0
def export_xml():
    """Export final cut pro XML file"""
    app = get_app()
    _ = app._tr

    # Get FPS info
    fps_num = get_app().project.get("fps").get("num", 24)
    fps_den = get_app().project.get("fps").get("den", 1)
    fps_float = float(fps_num / fps_den)

    # Ticks (final cut pro value)
    ticks = 254016000000

    # Get path
    recommended_path = get_app().project.current_filepath or ""
    if not recommended_path:
        recommended_path = os.path.join(info.HOME_PATH,
                                        "%s.xml" % _("Untitled Project"))
    else:
        recommended_path = recommended_path.replace(".osp", ".xml")
    file_path = QFileDialog.getSaveFileName(app.window, _("Export XML..."),
                                            recommended_path,
                                            _("Final Cut Pro (*.xml)"))[0]
    if not file_path:
        # User canceled dialog
        return

    # Append .xml if needed
    if not file_path.endswith(".xml"):
        file_path = "%s.xml" % file_path

    # Get filename with no path
    file_name = os.path.basename(file_path)

    # Determine max frame (based on clips)
    duration = 0.0
    for clip in Clip.filter():
        clip_last_frame = clip.data.get("position") + (clip.data.get("end") -
                                                       clip.data.get("start"))
        if clip_last_frame > duration:
            # Set max length of timeline
            duration = clip_last_frame

    # XML template path
    xmldoc = minidom.parse(
        os.path.join(info.RESOURCES_PATH, 'export-project-template.xml'))

    # Set Project Details
    xmldoc.getElementsByTagName("name")[0].childNodes[0].nodeValue = file_name
    xmldoc.getElementsByTagName("uuid")[0].childNodes[0].nodeValue = str(
        uuid1())
    xmldoc.getElementsByTagName(
        "duration")[0].childNodes[0].nodeValue = duration
    xmldoc.getElementsByTagName(
        "width")[0].childNodes[0].nodeValue = app.project.get("width")
    xmldoc.getElementsByTagName(
        "height")[0].childNodes[0].nodeValue = app.project.get("height")
    xmldoc.getElementsByTagName("samplerate")[0].childNodes[
        0].nodeValue = app.project.get("sample_rate")
    xmldoc.getElementsByTagName("sequence")[0].setAttribute(
        "id", app.project.get("id"))
    for childNode in xmldoc.getElementsByTagName("timebase"):
        childNode.childNodes[0].nodeValue = fps_float

    # Get parent audio node
    parentAudioNode = xmldoc.getElementsByTagName("audio")[0]

    # Loop through tracks
    all_tracks = get_app().project.get("layers")
    track_count = 1
    for track in sorted(all_tracks, key=itemgetter('number')):
        existing_track = Track.get(number=track.get("number"))
        if not existing_track:
            # Log error and fail silently, and continue
            log.error('No track object found with number: %s' %
                      track.get("number"))
            continue

        # Track details
        track_locked = track.get("lock", False)
        clips_on_track = Clip.filter(layer=track.get("number"))
        if not clips_on_track:
            continue

        # Create video track node
        trackTemplateDoc = minidom.parse(
            os.path.join(info.RESOURCES_PATH,
                         'export-track-video-template.xml'))
        videoTrackNode = trackTemplateDoc.getElementsByTagName('track')[0]
        xmldoc.getElementsByTagName("video")[0].appendChild(videoTrackNode)

        # Create audio track nodes (1 for each channel)
        trackTemplateDoc = minidom.parse(
            os.path.join(info.RESOURCES_PATH,
                         'export-track-audio-template.xml'))
        audioTrackNode = trackTemplateDoc.getElementsByTagName('track')[0]
        parentAudioNode.appendChild(audioTrackNode)
        audioTrackNode.getElementsByTagName(
            "outputchannelindex")[0].childNodes[0].nodeValue = track_count

        # Is Track Locked?
        if track_locked:
            videoTrackNode.getElementsByTagName(
                "locked")[0].childNodes[0].nodeValue = "TRUE"
            audioTrackNode.getElementsByTagName(
                "locked")[0].childNodes[0].nodeValue = "TRUE"

        # Loop through clips on this track
        for clip in clips_on_track:
            # Create VIDEO clip node
            clipNode = None
            if clip.data.get("reader", {}).get("has_video"):
                clipTemplateDoc = minidom.parse(
                    os.path.join(info.RESOURCES_PATH,
                                 'export-clip-video-template.xml'))
                clipNode = clipTemplateDoc.getElementsByTagName('clipitem')[0]
                videoTrackNode.appendChild(clipNode)

                # Update clip properties
                clipNode.setAttribute('id', clip.data.get('id'))
                clipNode.getElementsByTagName("file")[0].setAttribute(
                    'id', clip.data.get('file_id'))
                clipNode.getElementsByTagName(
                    "name")[0].childNodes[0].nodeValue = clip.data.get('title')
                clipNode.getElementsByTagName(
                    "name")[1].childNodes[0].nodeValue = clip.data.get('title')
                clipNode.getElementsByTagName("pathurl")[0].childNodes[
                    0].nodeValue = clip.data.get('title')
                clipNode.getElementsByTagName("in")[0].childNodes[
                    0].nodeValue = clip.data.get('start') * fps_float
                clipNode.getElementsByTagName("out")[0].childNodes[
                    0].nodeValue = clip.data.get('end') * fps_float
                clipNode.getElementsByTagName("start")[0].childNodes[
                    0].nodeValue = clip.data.get('position') * fps_float
                clipNode.getElementsByTagName("end")[0].childNodes[
                    0].nodeValue = (clip.data.get('position') +
                                    (clip.data.get('end') -
                                     clip.data.get('start'))) * fps_float
                clipNode.getElementsByTagName("duration")[0].childNodes[
                    0].nodeValue = (clip.data.get('end') -
                                    clip.data.get('start')) * fps_float
                clipNode.getElementsByTagName("pproTicksIn")[0].childNodes[
                    0].nodeValue = (clip.data.get('start') * fps_float) * ticks
                clipNode.getElementsByTagName("pproTicksOut")[0].childNodes[
                    0].nodeValue = (clip.data.get('end') * fps_float) * ticks

                # Add Keyframes (if any)
                createEffect(xmldoc, "Opacity", clipNode,
                             clip.data.get('alpha', {}).get('Points', []),
                             100.0)

            # Create AUDIO clip nodes
            if clip.data.get("reader", {}).get("has_audio"):
                clipTemplateDoc = minidom.parse(
                    os.path.join(info.RESOURCES_PATH,
                                 'export-clip-audio-template.xml'))
                clipAudioNode = clipTemplateDoc.getElementsByTagName(
                    'clipitem')[0]
                audioTrackNode.appendChild(clipAudioNode)

                # Update audio characteristics
                if clipNode:
                    clipNode.getElementsByTagName("samplerate")[0].childNodes[
                        0].nodeValue = clip.data.get("reader",
                                                     {}).get("channels")
                    clipNode.getElementsByTagName("channelcount")[
                        0].childNodes[0].nodeValue = clip.data.get(
                            "reader", {}).get("sample_rate")
                    clipAudioNode.getElementsByTagName(
                        "file")[0].childNodes.clear()
                else:
                    clipAudioNode.getElementsByTagName("name")[1].childNodes[
                        0].nodeValue = clip.data.get('title')
                    clipAudioNode.getElementsByTagName("pathurl")[
                        0].childNodes[0].nodeValue = clip.data.get('title')

                # Update audio clip properties
                clipAudioNode.setAttribute('id',
                                           "%s-audio" % clip.data.get('id'))
                clipAudioNode.getElementsByTagName("file")[0].setAttribute(
                    'id', clip.data.get('file_id'))
                clipAudioNode.getElementsByTagName(
                    "trackindex")[0].childNodes[0].nodeValue = track_count
                clipAudioNode.getElementsByTagName(
                    "name")[0].childNodes[0].nodeValue = clip.data.get('title')
                clipAudioNode.getElementsByTagName("in")[0].childNodes[
                    0].nodeValue = clip.data.get('start') * fps_float
                clipAudioNode.getElementsByTagName("out")[0].childNodes[
                    0].nodeValue = clip.data.get('end') * fps_float
                clipAudioNode.getElementsByTagName("start")[0].childNodes[
                    0].nodeValue = clip.data.get('position') * fps_float
                clipAudioNode.getElementsByTagName("end")[0].childNodes[
                    0].nodeValue = (clip.data.get('position') +
                                    (clip.data.get('end') -
                                     clip.data.get('start'))) * fps_float
                clipAudioNode.getElementsByTagName("duration")[0].childNodes[
                    0].nodeValue = (clip.data.get('end') -
                                    clip.data.get('start')) * fps_float
                clipAudioNode.getElementsByTagName(
                    "pproTicksIn")[0].childNodes[0].nodeValue = (
                        clip.data.get('start') * fps_float) * ticks
                clipAudioNode.getElementsByTagName(
                    "pproTicksOut")[0].childNodes[0].nodeValue = (
                        clip.data.get('end') * fps_float) * ticks

                # Add Keyframes (if any)
                createEffect(xmldoc, "Audio Levels", clipAudioNode,
                             clip.data.get('volume', {}).get('Points', []),
                             1.0)
            else:
                # No audio, remove audio characteristics
                if clipNode:
                    clipNode.getElementsByTagName("audio").pop()

        # Update counter
        track_count += 1

    try:
        file = open(os.fsencode(file_path),
                    "wb")  # wb needed for windows support
        file.write(bytes(xmldoc.toxml(), 'UTF-8'))
        file.close()
    except IOError as inst:
        log.error("Error writing XML export: {}".format(str(inst)))
예제 #2
0
def export_edl():
    """Export EDL File"""
    app = get_app()
    _ = app._tr

    # EDL Export format
    edl_string = "%03d  %-9s%-6s%-9s%11s %11s %11s %11s\n"

    # Get FPS info
    fps_num = get_app().project.get("fps").get("num", 24)
    fps_den = get_app().project.get("fps").get("den", 1)
    fps_float = float(fps_num / fps_den)

    # Get EDL path
    recommended_path = app.project.current_filepath or ""
    if not recommended_path:
        recommended_path = os.path.join(info.HOME_PATH,
                                        "%s.edl" % _("Untitled Project"))
    else:
        recommended_path = recommended_path.replace(".osp", ".edl")
    file_path = QFileDialog.getSaveFileName(
        app.window, _("Export EDL..."), recommended_path,
        _("Edit Decision Lists (*.edl)"))[0]
    if not file_path:
        return

    # Append .edl if needed
    if not file_path.endswith(".edl"):
        file_path = "%s.edl" % file_path

    # Get filename with no extension
    file_name_with_ext = os.path.basename(file_path)
    file_name = os.path.splitext(file_name_with_ext)[0]

    all_tracks = get_app().project.get("layers")
    track_count = len(all_tracks)
    for track in reversed(sorted(all_tracks, key=itemgetter('number'))):
        existing_track = Track.get(number=track.get("number"))
        if not existing_track:
            # Log error and fail silently, and continue
            log.error('No track object found with number: %s' %
                      track.get("number"))
            continue

        # Track name
        track_name = track.get("label") or "TRACK %s" % track_count
        clips_on_track = Clip.filter(layer=track.get("number"))
        if not clips_on_track:
            continue

        # Generate EDL File (1 per track - limitation of EDL format)
        # TODO: Improve and move this into its own class
        with open("%s-%s.edl" % (file_path.replace(".edl", ""), track_name),
                  'w',
                  encoding="utf8") as f:
            # Add Header
            f.write("TITLE: %s - %s\n" % (file_name, track_name))
            f.write("FCM: NON-DROP FRAME\n\n")

            # Loop through each track
            edit_index = 1
            export_position = 0.0

            # Loop through clips on this track
            for clip in clips_on_track:
                # Do we need a blank clip?
                if clip.data.get('position', 0.0) > export_position:
                    # Blank clip (i.e. 00:00:00:00)
                    clip_start_time = secondsToTimecode(0.0, fps_num, fps_den)
                    clip_end_time = secondsToTimecode(
                        clip.data.get('position') - export_position, fps_num,
                        fps_den)
                    timeline_start_time = secondsToTimecode(
                        export_position, fps_num, fps_den)
                    timeline_end_time = secondsToTimecode(
                        clip.data.get('position'), fps_num, fps_den)

                    # Write blank clip
                    f.write(edl_string %
                            (edit_index, "BL"[:9], "V"[:6], "C",
                             clip_start_time, clip_end_time,
                             timeline_start_time, timeline_end_time))

                # Format clip start/end and timeline start/end values (i.e. 00:00:00:00)
                clip_start_time = secondsToTimecode(clip.data.get('start'),
                                                    fps_num, fps_den)
                clip_end_time = secondsToTimecode(clip.data.get('end'),
                                                  fps_num, fps_den)
                timeline_start_time = secondsToTimecode(
                    clip.data.get('position'), fps_num, fps_den)
                timeline_end_time = secondsToTimecode(
                    clip.data.get('position') +
                    (clip.data.get('end') - clip.data.get('start')), fps_num,
                    fps_den)

                has_video = clip.data.get("reader", {}).get("has_video", False)
                has_audio = clip.data.get("reader", {}).get("has_audio", False)
                if has_video:
                    # Video Track
                    f.write(edl_string %
                            (edit_index, "AX"[:9], "V"[:6], "C",
                             clip_start_time, clip_end_time,
                             timeline_start_time, timeline_end_time))
                if has_audio:
                    # Audio Track
                    f.write(edl_string %
                            (edit_index, "AX"[:9], "A"[:6], "C",
                             clip_start_time, clip_end_time,
                             timeline_start_time, timeline_end_time))
                f.write("* FROM CLIP NAME: %s\n" % clip.data.get('title'))

                # Add opacity data (if any)
                alpha_points = clip.data.get('alpha', {}).get('Points', [])
                if len(alpha_points) > 1:
                    # Loop through Points (remove duplicates)
                    keyframes = {}
                    for point in alpha_points:
                        keyframeTime = (point.get('co', {}).get('X', 1.0) -
                                        1) / fps_float
                        keyframeValue = point.get('co', {}).get('Y',
                                                                0.0) * 100.0
                        keyframes[keyframeTime] = keyframeValue
                    # Write keyframe values to EDL
                    for opacity_time in sorted(keyframes.keys()):
                        opacity_value = keyframes.get(opacity_time)
                        f.write(
                            "* OPACITY LEVEL AT %s IS %0.2f%%  (REEL AX)\n" %
                            (secondsToTimecode(opacity_time, fps_num,
                                               fps_den), opacity_value))

                # Add volume data (if any)
                volume_points = clip.data.get('volume', {}).get('Points', [])
                if len(volume_points) > 1:
                    # Loop through Points (remove duplicates)
                    keyframes = {}
                    for point in volume_points:
                        keyframeTime = (point.get('co', {}).get('X', 1.0) -
                                        1) / fps_float
                        keyframeValue = (point.get('co', {}).get('Y', 0.0) *
                                         99.0) - 99  # Scaling 0-1 to -99-0
                        keyframes[keyframeTime] = keyframeValue
                    # Write keyframe values to EDL
                    for volume_time in sorted(keyframes.keys()):
                        volume_value = keyframes.get(volume_time)
                        f.write(
                            "* AUDIO LEVEL AT %s IS %0.2f DB  (REEL AX A1)\n" %
                            (secondsToTimecode(volume_time, fps_num,
                                               fps_den), volume_value))

                # Update export position
                export_position = clip.data.get('position') + (
                    clip.data.get('end') - clip.data.get('start'))
                f.write("\n")

                edit_index += 1

            # Update counters
            track_count -= 1