Exemplo n.º 1
0
def _do_create_sync_compound_clip(dialog, response_id, data):
    if response_id != Gtk.ResponseType.ACCEPT:
        dialog.destroy()
        return

    sync_data, name_entry = data
    files_offsets, clips = sync_data
    video_file, audio_file, idstr = clips
    media_name = name_entry.get_text()

    dialog.destroy()

    # Create unique file path in hidden render folder
    folder = editorpersistance.prefs.render_folder
    uuid_str = md5.new(str(os.urandom(32))).hexdigest()
    write_file = folder + "/" + uuid_str + ".xml"

    # Create tractor
    tractor = mlt.Tractor()
    multitrack = tractor.multitrack()
    track_video = mlt.Playlist()
    track_audio = mlt.Playlist()
    track_audio.set("hide", 1)  # video off, audio on as mlt "hide" value
    multitrack.connect(track_audio, 0)
    multitrack.connect(track_video, 0)

    # Create clips
    video_clip = mlt.Producer(PROJECT().profile, str(video_file))
    audio_clip = mlt.Producer(PROJECT().profile, str(audio_file))

    # Get offset
    offset = files_offsets[audio_file]
    print audio_file, offset

    # Add clips
    if offset > 0:
        offset_frames = int(float(offset) + 0.5)
        print "plus"
        track_video.append(video_clip, 0, video_clip.get_length() - 1)
        track_audio.insert_blank(0, offset_frames)
        track_audio.append(audio_clip, 0, audio_clip.get_length() - 1)
    elif offset < 0:
        offset_frames = int(float(offset) - 0.5)
        print "miinus"
        track_video.insert_blank(0, offset_frames)
        track_video.append(video_clip, 0, video_clip.get_length() - 1)
        track_audio.append(audio_clip, 0, audio_clip.get_length() - 1)
    else:
        track_video.append(video_clip, 0, video_clip.get_length() - 1)
        track_audio.append(audio_clip, 0, audio_clip.get_length() - 1)

    # render MLT XML, callback in projectaction.py creates media object
    render_player = renderconsumer.XMLCompoundRenderPlayer(
        write_file, media_name,
        projectaction._xml_compound_render_done_callback, tractor)
    render_player.start()
Exemplo n.º 2
0
    def add_track(self, track_type, is_hidden=False):
        """ 
        Creates a MLT playlist object, adds project
        data and adds to tracks list.
        """
        new_track = mlt.Playlist()

        self._add_track_attributes(new_track, track_type)
        new_track.is_sync_track = False

        # Connect to MLT multitrack
        self.multitrack.connect(new_track, len(self.tracks))

        # Add to tracklist and set id to list index
        new_track.id = len(self.tracks)
        self.tracks.append(new_track)

        # Mix all audio to track 1 by combining them one after another
        # using an always active field transition.
        if (
            (new_track.id > AUDIO_MIX_DOWN_TRACK
             )  # black bg or track1 it's self does not need to be mixed
                and (is_hidden == False)
        ):  # We actually do want the hidden track to cover all audio below, which happens if it is not mixed.
            self._mix_audio_for_track(new_track)

        # Add method that returns track name
        new_track.get_name = lambda: utils.get_track_name(new_track, self)

        return new_track
Exemplo n.º 3
0
def create_scene(media):
    if media.endswith(".gif"):
        tv = tempfile.NamedTemporaryFile(mode='w+b',
                                         suffix='.mp4',
                                         prefix='tmp',
                                         delete=False,
                                         dir=outdir)
        subprocess.call(
            ["ffmpeg", "-i", media, "-y", tv.name, "-loglevel", "quiet"])
        ftodel.append(tv.name)
        tscene = mlt.Producer(profile, tv.name)
        if tscene.get_out() == 0:
            tp = tempfile.NamedTemporaryFile(mode='w+b',
                                             suffix='.gif',
                                             prefix='tmp',
                                             delete=False,
                                             dir=outdir)
            ftodel.append(tp.name)
            subprocess.call(
                ["ffmpeg", "-i", media, "-y", "-loglevel", "quiet", tp.name])
            scene = mlt.Producer(profile, tp.name)
        elif tscene.get_out() < 150:
            scene = mlt.Playlist()
            lt = int(150 / tscene.get_out()) + 1
            for x in range(0, lt):
                tscene = mlt.Producer(profile, tv.name)
                scene.append(tscene)
            scene.set('out', 150)
        else:
            scene = mlt.Producer(profile, tv.name)
    elif media.endswith(".mp3"):
        tv = tempfile.NamedTemporaryFile(mode='w+b',
                                         suffix='.wav',
                                         prefix='tmp',
                                         delete=False,
                                         dir=outdir)
        subprocess.call(
            ["ffmpeg", "-i", media, "-y", tv.name, "-loglevel", "quiet"])
        ftodel.append(tv.name)
        scene = mlt.Producer(profile, tv.name)

    elif media.startswith("http"):
        extension = media[media.rfind("."):]
        tv = tempfile.NamedTemporaryFile(mode='w+b',
                                         suffix=extension,
                                         prefix='tmp',
                                         delete=False,
                                         dir=outdir)
        ftodel.append(tv.name)
        subprocess.call([
            "ffmpeg", "-i", media, "-y", "-c:a", "copy", "-c:v", "copy",
            "-loglevel", "quiet", tv.name
        ])
        #subprocess.call(["ffmpeg", "-i", media, "-y",  tv.name])
        scene = mlt.Producer(profile, tv.name)
    else:
        scene = mlt.Producer(profile, media)
    return scene
Exemplo n.º 4
0
    def run(self):
        # Image produceer
        img_producer = current_sequence().create_file_producer_clip(
            str(self.image_file)
        )  # , new_clip_name=None, novalidate=False, ttl=None):

        # Create tractor and track to get right length
        tractor = mlt.Tractor()
        multitrack = tractor.multitrack()
        track0 = mlt.Playlist()
        multitrack.connect(track0, 0)
        track0.insert(img_producer, 0, 0, self.length)

        # Consumer
        write_file = userfolders.get_cache_dir() + "/unrendered_clip.mp4"
        # Delete earlier created files
        if os.path.exists(write_file):
            os.remove(write_file)
        consumer = renderconsumer.get_default_render_consumer(
            write_file,
            PROJECT().profile)

        clip_renderer = renderconsumer.FileRenderPlayer(
            write_file, tractor, consumer, 0, self.length)
        clip_renderer.wait_for_producer_end_stop = True
        clip_renderer.start()

        Gdk.threads_enter()

        info_text = _("<b>Rendering Placeholder Media For:</b> "
                      ) + self.data.get_program_name() + ".blend"

        progress_bar = Gtk.ProgressBar()
        dialog = rendergui.clip_render_progress_dialog(
            None, self.window_text, info_text, progress_bar,
            gui.editor_window.window, True)

        motion_progress_update = renderconsumer.ProgressWindowThread(
            dialog, progress_bar, clip_renderer, self.progress_thread_complete)
        motion_progress_update.start()

        Gdk.threads_leave()

        while clip_renderer.stopped == False:
            time.sleep(0.5)

        Gdk.threads_enter()

        self.callback(write_file, self.data)

        Gdk.threads_leave()
Exemplo n.º 5
0
    def run(self):
        self.start_time = time.monotonic()

        profile = mltprofiles.get_profile(self.profile_desc)
        motion_producer = mlt.Producer(
            profile, None,
            str("timewarp:" + str(self.speed) + ":" + str(self.source_path)))

        # Create tractor and track to get right length
        tractor = mlt.Tractor()
        multitrack = tractor.multitrack()
        track0 = mlt.Playlist()
        multitrack.connect(track0, 0)
        track0.insert(motion_producer, 0, 0, motion_producer.get_length() - 1)

        consumer = renderconsumer.get_render_consumer_for_encoding_and_quality(
            self.write_file, profile, self.encoding_option_index,
            self.quality_option_index)

        # start and end frames, renderer stop behaviour
        start_frame = self.start_frame
        end_frame = self.end_frame
        wait_for_producer_stop = True
        if self.render_full_range == False:
            wait_for_producer_stop = False  # consumer wont stop automatically and needs to stopped explicitly

        # Launch render
        self.render_player = renderconsumer.FileRenderPlayer(
            self.write_file, tractor, consumer, start_frame, end_frame)
        self.render_player.wait_for_producer_end_stop = False
        self.render_player.start()

        while self.render_player.stopped == False:

            self.check_abort_requested()

            if self.abort == True:
                self.render_player.shutdown()
                return

            fraction = self.render_player.get_render_fraction()
            self.render_update(fraction)

            time.sleep(0.3)

        # Write out completed flag file.
        ccrutils.write_completed_message()
Exemplo n.º 6
0
import shutil
import tempfile
import os
import os.path

# Start the mlt system
mlt.mlt_log_set_level(40) # verbose
mlt.Factory.init()

# Establish a pipeline
profile = mlt.Profile("atsc_1080i_5994")
#profile = mlt.Profile('square_ntsc_wide')
profile.set_explicit(1)
tractor = mlt.Tractor()
tractor.set('eof', 'loop')
playlist = mlt.Playlist()
playlist.append(mlt.Producer(profile, 'color:'))

# Setup the consumer
consumer = 'decklink:0'
if len(sys.argv) > 1:
  consumer = sys.argv[1]
consumer = mlt.Consumer(profile, consumer)
consumer.connect(playlist)
#consumer.set("real_time", -2)
consumer.start()

def switch(resource):
  global playlist
  resource = resource
  playlist.lock()
Exemplo n.º 7
0
def get_rendered_transition_tractor(current_sequence, 
                                    orig_from,
                                    orig_to,
                                    action_from_out,
                                    action_from_in,
                                    action_to_out,
                                    action_to_in,
                                    transition_type_selection_index,
                                    wipe_luma_sorted_keys_index,
                                    gdk_color_str):

    name, transition_type = rendered_transitions[transition_type_selection_index]
    
    # New from clip
    if orig_from.media_type != appconsts.PATTERN_PRODUCER:
        from_clip = current_sequence.create_file_producer_clip(orig_from.path, None, False, orig_from.ttl)# File producer
    else:
        from_clip = current_sequence.create_pattern_producer(orig_from.create_data) # pattern producer
    current_sequence.clone_clip_and_filters(orig_from, from_clip)

    # New to clip
    if not(transition_type == RENDERED_FADE_IN or transition_type == RENDERED_FADE_OUT): # fades to not use to_clip
        if orig_to.media_type != appconsts.PATTERN_PRODUCER:
            to_clip = current_sequence.create_file_producer_clip(orig_to.path, None, False, orig_to.ttl)# File producer
        else:
            to_clip = current_sequence.create_pattern_producer(orig_to.create_data) # pattern producer
        current_sequence.clone_clip_and_filters(orig_to, to_clip)

    # Create tractor and tracks
    tractor = mlt.Tractor()
    multitrack = tractor.multitrack()
    track0 = mlt.Playlist()
    track1 = mlt.Playlist()
    multitrack.connect(track0, 0)
    multitrack.connect(track1, 1)

    # we'll set in and out points for images and pattern producers.
    if not(transition_type == RENDERED_FADE_IN or transition_type == RENDERED_FADE_OUT): # fades to not use to_clip or some other data used here
        if from_clip.media_type == appconsts.IMAGE or from_clip.media_type == appconsts.PATTERN_PRODUCER:
            length = action_from_out - action_from_in
            from_clip.clip_in = 0
            from_clip.clip_out = length

        if to_clip.media_type == appconsts.IMAGE or to_clip.media_type == appconsts.PATTERN_PRODUCER:
            length = action_to_out - action_to_in
            to_clip.clip_in = 0
            to_clip.clip_out = length
    else:
        length = action_from_out
        if from_clip.media_type == appconsts.IMAGE or from_clip.media_type == appconsts.PATTERN_PRODUCER:
            from_clip.clip_in = 0
            from_clip.clip_out = length
            
    # Add clips to tracks and create keyframe string for mixing
    if transition_type == RENDERED_DISSOLVE or transition_type == RENDERED_WIPE:
        # Add clips. Images and pattern producers always fill full track.
        if from_clip.media_type != appconsts.IMAGE and from_clip.media_type != appconsts.PATTERN_PRODUCER:
            track0.insert(from_clip, 0, action_from_in, action_from_out)
        else:
            track0.insert(from_clip, 0, 0, action_from_out - action_from_in)
            
        if to_clip.media_type != appconsts.IMAGE and to_clip.media_type != appconsts.PATTERN_PRODUCER: 
            track1.insert(to_clip, 0, action_to_in, action_to_out)
        else:
            track1.insert(to_clip, 0, 0,  action_to_out - action_to_in)
        kf_str = "0=0/0:100%x100%:0.0;"+ str(tractor.get_length() - 1) + "=0/0:100%x100%:100.0"
    elif transition_type == RENDERED_COLOR_DIP:
        length = action_from_out - action_from_in
        first_clip_length = length // 2
        second_clip_length = length - first_clip_length
        color_clip = patternproducer.create_color_producer(current_sequence.profile, gdk_color_str)
        track0.insert(color_clip, 0, 0, length)
        track1.insert(from_clip, 0, action_from_in, action_from_in + first_clip_length)
        track1.insert(to_clip, 1, action_to_out - second_clip_length, action_to_out)
        kf_str = "0=0/0:100%x100%:100.0;"+ str(first_clip_length) + "=0/0:100%x100%:0.0;" + str(tractor.get_length() - 1) + "=0/0:100%x100%:100.0"
    elif (transition_type == RENDERED_FADE_IN or transition_type == RENDERED_FADE_OUT):
        color_clip = patternproducer.create_color_producer(current_sequence.profile, gdk_color_str)
        track0.insert(color_clip, 0, 0, length)
        if transition_type ==  RENDERED_FADE_IN:
            track1.insert(from_clip, 0, orig_from.clip_in, orig_from.clip_in + length)
            kf_str = "0=0/0:100%x100%:0.0;"+ str(length) + "=0/0:100%x100%:100.0"
        else: # transition_type ==  RENDERED_FADE_OUT
            track1.insert(from_clip, 0, orig_from.clip_out - length, orig_from.clip_out)
            kf_str = "0=0/0:100%x100%:100.0;"+ str(length) + "=0/0:100%x100%:0.0"

    # Create transition
    transition = mlt.Transition(current_sequence.profile, "region")
    mltrefhold.hold_ref(transition)
    transition.set("composite.geometry", str(kf_str)) # controls mix over time
    transition.set("composite.automatic",1)
    transition.set("composite.aligned", 0)
    transition.set("composite.deinterlace",0)
    transition.set("composite.distort",0)
    transition.set("composite.fill",1)
    transition.set("composite.operator","over")
    transition.set("composite.luma_invert",0)
    transition.set("composite.progressive",1)
    transition.set("composite.softness",0)
    transition.set("in", 0)
    transition.set("out", tractor.get_length() - 1)
    transition.set("a_track", 0)
    transition.set("b_track", 1)

    # Setting luma resource file turns dissolve into wipe
    if transition_type == RENDERED_WIPE:
        wipe_resource_path = get_wipe_resource_path_for_sorted_keys_index(wipe_luma_sorted_keys_index)
        transition.set("composite.luma", str(wipe_resource_path))

    # Add transition
    field = tractor.field()
    field.plant_transition(transition, 0,1)

    return tractor
Exemplo n.º 8
0
    profile.set_explicit(1)
    profile.set_sample_aspect(1, 1)
    profile.set_frame_rate(30000, 1000)
    profile.set_display_aspect(2, 3)
    profile.set_width(720)
    profile.set_height(1080)
    profile.set_colorspace(709)
    logo_max_size = 100
else:
    profile = mlt.Profile("atsc_1080p_30")
    logo_max_size = 160

profile.set_colorspace(709)

tractor = mlt.Tractor()
scenes = mlt.Playlist()
oscenes = mlt.Playlist()
sindex = 1
slength = 5
t_length = 0
frame_rate = 30
production = 0
if production:
    save_video = 1
    outdir = s[0:s.rfind("/")]
else:
    save_video = 0
    outdir = s[0:s.rfind("/")] + "/Temp"
#outdir="/home/prftp/html/docroot/backend/web/python"

outroshow = int(config["27"][4]["display_outro__27"])