Exemple #1
0
def fill_track_mlt(mlt_track, py_track):
    """
    Replaces py objects in track (MLT Playlist) with mlt objects
    """
    # Update mlt obj attr values to saved ones
    mlt_track.__dict__.update(py_track.__dict__)

    # Clear py clips from MLT object
    mlt_track.clips = []

    # Create clips
    sequence = mlt_track.sequence
    for i in range(0, len(py_track.clips)):
        clip = py_track.clips[i]
        mlt_clip = None
        append_created = True  # blanks get appended at creation time, other clips don't

        # Add color attribute if not found
        if not hasattr(clip, "color"):
            clip.color = None

        # normal clip
        if (clip.is_blanck_clip == False
                and (clip.media_type != appconsts.PATTERN_PRODUCER)):
            orig_path = clip.path  # Save the path for error message

            if clip.media_type != appconsts.IMAGE_SEQUENCE:
                clip.path = get_media_asset_path(clip.path, _load_file_path)
            else:
                clip.path = get_img_seq_media_path(clip.path, _load_file_path)

            mlt_clip = sequence.create_file_producer_clip(clip.path)
            if mlt_clip == None:
                raise FileProducerNotFoundError(orig_path)
            mlt_clip.__dict__.update(clip.__dict__)
            fill_filters_mlt(mlt_clip, sequence)
        # pattern producer
        elif (clip.is_blanck_clip == False
              and (clip.media_type == appconsts.PATTERN_PRODUCER)):
            mlt_clip = sequence.create_pattern_producer(clip.create_data)
            mlt_clip.__dict__.update(clip.__dict__)
            fill_filters_mlt(mlt_clip, sequence)
        # blank clip
        elif (clip.is_blanck_clip == True):
            length = clip.clip_out - clip.clip_in + 1
            mlt_clip = sequence.create_and_insert_blank(mlt_track, i, length)
            mlt_clip.__dict__.update(clip.__dict__)
            append_created = False
        else:  # This is just for info, if this ever happens crash will happen.
            print "Could not recognize clip, dict:"
            print clip.__dict__

        mlt_clip.selected = False  # This transient state gets saved and
        # we want everything unselected to begin with
        # Mute
        if clip.mute_filter != None:
            mute_filter = mltfilters.create_mute_volume_filter(sequence)
            mltfilters.do_clip_mute(mlt_clip, mute_filter)

        # Add to track in MLT if hasn't already been appended (blank clip has)
        if append_created == True:
            append_clip(mlt_track, mlt_clip, clip.clip_in, clip.clip_out)

        # Save refences to recreate sync relations after all clips loaded
        global all_clips, sync_clips
        all_clips[mlt_clip.id] = mlt_clip
        if mlt_clip.sync_data != None:
            sync_clips.append((mlt_clip, mlt_track))
Exemple #2
0
def fill_track_mlt(mlt_track, py_track):
    """
    Replaces py objects in track (MLT Playlist) with mlt objects
    """
    # Update mlt obj attr values to saved ones
    mlt_track.__dict__.update(py_track.__dict__)
    
    # Clear py clips from MLT object
    mlt_track.clips = []
    
    # Create clips
    sequence = mlt_track.sequence
    for i in range(0, len(py_track.clips)):
        clip = py_track.clips[i]
        mlt_clip = None
        append_created = True # blanks get appended at creation time, other clips don't

        # Add color attribute if not found
        if not hasattr(clip, "color"):
            clip.color = None
            
        # Add markers list if not found
        if not hasattr(clip, "markers"):
            clip.markers = []

        # Add img seq ttl value for all clips if not found, we need this present in every clip so we test for 'clip.ttl == None' to get stuff working
        if not hasattr(clip, "ttl"):
            clip.ttl = None
            
        # normal clip
        if (clip.is_blanck_clip == False and (clip.media_type != appconsts.PATTERN_PRODUCER)):
            orig_path = clip.path # Save the path for error message
            
            if clip.media_type != appconsts.IMAGE_SEQUENCE:
                clip.path = get_media_asset_path(clip.path, _load_file_path)
            else:
                clip.path = get_img_seq_media_path(clip.path, _load_file_path)
                
            mlt_clip = sequence.create_file_producer_clip(clip.path, None, False, clip.ttl)
            
            if mlt_clip == None:
                raise FileProducerNotFoundError(orig_path)
            mlt_clip.__dict__.update(clip.__dict__)
            fill_filters_mlt(mlt_clip, sequence)
        # pattern producer
        elif (clip.is_blanck_clip == False and (clip.media_type == appconsts.PATTERN_PRODUCER)):
            mlt_clip = sequence.create_pattern_producer(clip.create_data)
            mlt_clip.__dict__.update(clip.__dict__)
            fill_filters_mlt(mlt_clip, sequence)
        # blank clip
        elif (clip.is_blanck_clip == True): 
            length = clip.clip_out - clip.clip_in + 1
            mlt_clip = sequence.create_and_insert_blank(mlt_track, i, length)
            mlt_clip.__dict__.update(clip.__dict__)
            append_created = False
        else: # This is just for info, if this ever happens crash will happen.
            print "Could not recognize clip, dict:"
            print clip.__dict__

        mlt_clip.selected = False # This transient state gets saved and 
                                   # we want everything unselected to begin with
        # Mute 
        if clip.mute_filter != None:
            mute_filter = mltfilters.create_mute_volume_filter(sequence) 
            mltfilters.do_clip_mute(mlt_clip, mute_filter)
        
        # Add to track in MLT if hasn't already been appended (blank clip has)
        if append_created == True:
            append_clip(mlt_track, mlt_clip, clip.clip_in, clip.clip_out)

        # Save refences to recreate sync relations after all clips loaded
        global all_clips, sync_clips
        all_clips[mlt_clip.id] = mlt_clip
        if mlt_clip.sync_data != None:
            sync_clips.append((mlt_clip, mlt_track))
Exemple #3
0
def fill_track_mlt(mlt_track, py_track):
    """
    Replaces py objects in track (MLT Playlist) with mlt objects
    """
    # Update mlt obj attr values to saved ones
    mlt_track.__dict__.update(py_track.__dict__)
    
    # Clear py clips from MLT object
    mlt_track.clips = []
    
    # Create clips
    sequence = mlt_track.sequence
    for i in range(0, len(py_track.clips)):
        clip = py_track.clips[i]
        if clip.is_blanck_clip == False:
            _show_msg(_("Building track ") + str(py_track.id) + " - " + clip.name)
                
        mlt_clip = None
        append_created = True # blanks get appended at creation time, other clips don't

        persistancecompat.FIX_MISSING_CLIP_ATTRS(clip)

        # normal clip
        if (clip.is_blanck_clip == False and (clip.media_type != appconsts.PATTERN_PRODUCER)):
            orig_path = clip.path # Save the path for error message

            # Possibly do a relative file search to all but rendered container clip media, that needs to be re-rendered.
            if not(clip.container_data != None and clip.container_data.rendered_media != None):
                if clip.media_type != appconsts.IMAGE_SEQUENCE:
                    clip.path = get_media_asset_path(clip.path, _load_file_path)
                else:
                    clip.path = get_img_seq_media_path(clip.path, _load_file_path)

            # Try to fix possible missing proxy files for clips if we are in proxy mode.
            if not os.path.isfile(clip.path) and project_proxy_mode == appconsts.USE_PROXY_MEDIA:
                try:
                    try:
                        possible_orig_file_path = proxy_path_dict[clip.path] # This dict was filled with media file data.
                    except:
                        # Both proxy AND original file are missing, can happen if a project file in proxy mode
                        # is opened in another machine.
                        # clip.path was changed by calling  get_media_asset_path() try to use fixed original
                        possible_orig_file_path = proxy_path_dict[orig_path]
                        possible_orig_file_path = get_media_asset_path(possible_orig_file_path, _load_file_path)
                         
                    if os.path.isfile(possible_orig_file_path): # Original media file exists, use it
                        clip.path = possible_orig_file_path
                except:
                    pass # missing proxy file fix has failed

            # If container clip rendered media is missing try to use unrendered media.
            if not os.path.isfile(clip.path) and clip.container_data != None:
                if clip.path != clip.container_data.unrendered_media:
                    clip.path = clip.container_data.unrendered_media
                    clip.container_data.clear_rendered_media()
                    
            mlt_clip = sequence.create_file_producer_clip(clip.path, None, False, clip.ttl)
            
            if mlt_clip == None:
                raise FileProducerNotFoundError(orig_path)

            mlt_clip.__dict__.update(clip.__dict__)
            fill_filters_mlt(mlt_clip, sequence)
        # pattern producer
        elif (clip.is_blanck_clip == False and (clip.media_type == appconsts.PATTERN_PRODUCER)):
            mlt_clip = sequence.create_pattern_producer(clip.create_data)
            mlt_clip.__dict__.update(clip.__dict__)
            fill_filters_mlt(mlt_clip, sequence)
        # blank clip
        elif (clip.is_blanck_clip == True): 
            length = clip.clip_out - clip.clip_in + 1
            mlt_clip = sequence.create_and_insert_blank(mlt_track, i, length)
            mlt_clip.__dict__.update(clip.__dict__)
            append_created = False
        else: # This is just for info, if this ever happens crash will happen.
            print("Could not recognize clip, dict:")
            print(clip.__dict__)

        mlt_clip.selected = False # This transient state gets saved and 
                                   # we want everything unselected to begin with
        # Mute 
        if clip.mute_filter != None:
            mute_filter = mltfilters.create_mute_volume_filter(sequence) 
            mltfilters.do_clip_mute(mlt_clip, mute_filter)
        
        # Add to track in MLT if hasn't already been appended (blank clip has)
        if append_created == True:
            append_clip(mlt_track, mlt_clip, clip.clip_in, clip.clip_out)

        # Save refences to recreate sync relations after all clips loaded
        global all_clips, sync_clips
        all_clips[mlt_clip.id] = mlt_clip
        if mlt_clip.sync_data != None:
            sync_clips.append((mlt_clip, mlt_track))
Exemple #4
0
def fill_track_mlt(mlt_track, py_track):
    """
    Replaces py objects in track (MLT Playlist) with mlt objects
    """
    # Update mlt obj attr values to saved ones
    mlt_track.__dict__.update(py_track.__dict__)

    # Clear py clips from MLT object
    mlt_track.clips = []

    # Create clips
    sequence = mlt_track.sequence
    for i in range(0, len(py_track.clips)):
        clip = py_track.clips[i]
        mlt_clip = None
        append_created = True  # blanks get appended at creation time, other clips don't

        # Add color attribute if not found
        if not hasattr(clip, "color"):
            clip.color = None

        # Add markers list if not found
        if not hasattr(clip, "markers"):
            clip.markers = []

        # Add img seq ttl value for all clips if not found, we need this present in every clip so we test for 'clip.ttl == None' to get stuff working
        if not hasattr(clip, "ttl"):
            clip.ttl = None

        # normal clip
        if (clip.is_blanck_clip == False
                and (clip.media_type != appconsts.PATTERN_PRODUCER)):
            orig_path = clip.path  # Save the path for error message

            if clip.media_type != appconsts.IMAGE_SEQUENCE:
                clip.path = get_media_asset_path(clip.path, _load_file_path)
            else:
                clip.path = get_img_seq_media_path(clip.path, _load_file_path)

            # Try to fix possible missing proxy files for clips if we are in proxy mode.
            if not os.path.isfile(
                    clip.path
            ) and project_proxy_mode == appconsts.USE_PROXY_MEDIA:
                try:
                    possible_orig_file_path = proxy_path_dict[
                        clip.
                        path]  # This dict was filled with media file data.
                    if os.path.isfile(possible_orig_file_path
                                      ):  # Original media file exists, use it
                        clip.path = possible_orig_file_path
                except:
                    pass  # missing proxy file fix has failed

            mlt_clip = sequence.create_file_producer_clip(
                clip.path, None, False, clip.ttl)

            if mlt_clip == None:
                raise FileProducerNotFoundError(orig_path)
            mlt_clip.__dict__.update(clip.__dict__)
            fill_filters_mlt(mlt_clip, sequence)
        # pattern producer
        elif (clip.is_blanck_clip == False
              and (clip.media_type == appconsts.PATTERN_PRODUCER)):
            mlt_clip = sequence.create_pattern_producer(clip.create_data)
            mlt_clip.__dict__.update(clip.__dict__)
            fill_filters_mlt(mlt_clip, sequence)
        # blank clip
        elif (clip.is_blanck_clip == True):
            length = clip.clip_out - clip.clip_in + 1
            mlt_clip = sequence.create_and_insert_blank(mlt_track, i, length)
            mlt_clip.__dict__.update(clip.__dict__)
            append_created = False
        else:  # This is just for info, if this ever happens crash will happen.
            print("Could not recognize clip, dict:")
            print(clip.__dict__)

        mlt_clip.selected = False  # This transient state gets saved and
        # we want everything unselected to begin with
        # Mute
        if clip.mute_filter != None:
            mute_filter = mltfilters.create_mute_volume_filter(sequence)
            mltfilters.do_clip_mute(mlt_clip, mute_filter)

        # Add to track in MLT if hasn't already been appended (blank clip has)
        if append_created == True:
            append_clip(mlt_track, mlt_clip, clip.clip_in, clip.clip_out)

        # Save refences to recreate sync relations after all clips loaded
        global all_clips, sync_clips
        all_clips[mlt_clip.id] = mlt_clip
        if mlt_clip.sync_data != None:
            sync_clips.append((mlt_clip, mlt_track))
Exemple #5
0
 def clone_mute_state(self, clip, clone_clip):
     # Mute
     if clip.mute_filter != None:
         mute_filter = mltfilters.create_mute_volume_filter(self)
         mltfilters.do_clip_mute(clone_clip, mute_filter)