def init_mlt_objects(self): # MLT objects for multitrack sequence self.tractor = mlt.Tractor() self.tractor.mark_in = -1 self.tractor.mark_out = -1 # Only create and add pan filter if actual pan is applied # This method gets called on load and we only want to add a filter then if pan is applied, # and not on initial creation. # audiomonitoring.py calls add_track_pan_filter() when pan turned on for initial creation if self.master_audio_pan != NO_PAN: self.add_track_pan_filter(self.tractor, self.master_audio_pan) # Create and add gain filter gain_filter = mlt.Filter(self.profile, "volume") mltrefhold.hold_ref(gain_filter) gain_filter.set("gain", str(self.master_audio_gain)) self.tractor.attach(gain_filter) self.tractor.gain_filter = gain_filter self.field = self.tractor.field() self.multitrack = self.tractor.multitrack() self.vectorscope = mlt.Filter(self.profile, "frei0r.vectorscope") mltrefhold.hold_ref(self.vectorscope) # ?? is this just some anti-crash hack attempt that was not removed self.vectorscope.set("mix", str(SCOPE_MIX_VALUES[_scope_over_lay_mix])) self.vectorscope.set("overlay sides", "0.0") self.rgbparade = mlt.Filter(self.profile, "frei0r.rgbparade") mltrefhold.hold_ref(self.rgbparade) # ?? is this just some anti-crash hack attempt that was not removed self.rgbparade.set("mix", str(SCOPE_MIX_VALUES[_scope_over_lay_mix])) self.rgbparade.set("overlay sides", "0.0") self.outputfilter = None
def _do_create_sync_compound_clip(dialog, response_id, data): if response_id != Gtk.ResponseType.ACCEPT: dialog.destroy() return sync_data, name_entry = data files_offsets, clips = sync_data video_file, audio_file, idstr = clips media_name = name_entry.get_text() dialog.destroy() # Create unique file path in hidden render folder folder = editorpersistance.prefs.render_folder uuid_str = md5.new(str(os.urandom(32))).hexdigest() write_file = folder + "/" + uuid_str + ".xml" # Create tractor tractor = mlt.Tractor() multitrack = tractor.multitrack() track_video = mlt.Playlist() track_audio = mlt.Playlist() track_audio.set("hide", 1) # video off, audio on as mlt "hide" value multitrack.connect(track_audio, 0) multitrack.connect(track_video, 0) # Create clips video_clip = mlt.Producer(PROJECT().profile, str(video_file)) audio_clip = mlt.Producer(PROJECT().profile, str(audio_file)) # Get offset offset = files_offsets[audio_file] print audio_file, offset # Add clips if offset > 0: offset_frames = int(float(offset) + 0.5) print "plus" track_video.append(video_clip, 0, video_clip.get_length() - 1) track_audio.insert_blank(0, offset_frames) track_audio.append(audio_clip, 0, audio_clip.get_length() - 1) elif offset < 0: offset_frames = int(float(offset) - 0.5) print "miinus" track_video.insert_blank(0, offset_frames) track_video.append(video_clip, 0, video_clip.get_length() - 1) track_audio.append(audio_clip, 0, audio_clip.get_length() - 1) else: track_video.append(video_clip, 0, video_clip.get_length() - 1) track_audio.append(audio_clip, 0, audio_clip.get_length() - 1) # render MLT XML, callback in projectaction.py creates media object render_player = renderconsumer.XMLCompoundRenderPlayer( write_file, media_name, projectaction._xml_compound_render_done_callback, tractor) render_player.start()
def run(self): # Image produceer img_producer = current_sequence().create_file_producer_clip( str(self.image_file) ) # , new_clip_name=None, novalidate=False, ttl=None): # Create tractor and track to get right length tractor = mlt.Tractor() multitrack = tractor.multitrack() track0 = mlt.Playlist() multitrack.connect(track0, 0) track0.insert(img_producer, 0, 0, self.length) # Consumer write_file = userfolders.get_cache_dir() + "/unrendered_clip.mp4" # Delete earlier created files if os.path.exists(write_file): os.remove(write_file) consumer = renderconsumer.get_default_render_consumer( write_file, PROJECT().profile) clip_renderer = renderconsumer.FileRenderPlayer( write_file, tractor, consumer, 0, self.length) clip_renderer.wait_for_producer_end_stop = True clip_renderer.start() Gdk.threads_enter() info_text = _("<b>Rendering Placeholder Media For:</b> " ) + self.data.get_program_name() + ".blend" progress_bar = Gtk.ProgressBar() dialog = rendergui.clip_render_progress_dialog( None, self.window_text, info_text, progress_bar, gui.editor_window.window, True) motion_progress_update = renderconsumer.ProgressWindowThread( dialog, progress_bar, clip_renderer, self.progress_thread_complete) motion_progress_update.start() Gdk.threads_leave() while clip_renderer.stopped == False: time.sleep(0.5) Gdk.threads_enter() self.callback(write_file, self.data) Gdk.threads_leave()
def run(self): self.start_time = time.monotonic() profile = mltprofiles.get_profile(self.profile_desc) motion_producer = mlt.Producer( profile, None, str("timewarp:" + str(self.speed) + ":" + str(self.source_path))) # Create tractor and track to get right length tractor = mlt.Tractor() multitrack = tractor.multitrack() track0 = mlt.Playlist() multitrack.connect(track0, 0) track0.insert(motion_producer, 0, 0, motion_producer.get_length() - 1) consumer = renderconsumer.get_render_consumer_for_encoding_and_quality( self.write_file, profile, self.encoding_option_index, self.quality_option_index) # start and end frames, renderer stop behaviour start_frame = self.start_frame end_frame = self.end_frame wait_for_producer_stop = True if self.render_full_range == False: wait_for_producer_stop = False # consumer wont stop automatically and needs to stopped explicitly # Launch render self.render_player = renderconsumer.FileRenderPlayer( self.write_file, tractor, consumer, start_frame, end_frame) self.render_player.wait_for_producer_end_stop = False self.render_player.start() while self.render_player.stopped == False: self.check_abort_requested() if self.abort == True: self.render_player.shutdown() return fraction = self.render_player.get_render_fraction() self.render_update(fraction) time.sleep(0.3) # Write out completed flag file. ccrutils.write_completed_message()
import tornado.ioloop import tornado.web import shutil import tempfile import os import os.path # Start the mlt system mlt.mlt_log_set_level(40) # verbose mlt.Factory.init() # Establish a pipeline profile = mlt.Profile("atsc_1080i_5994") #profile = mlt.Profile('square_ntsc_wide') profile.set_explicit(1) tractor = mlt.Tractor() tractor.set('eof', 'loop') playlist = mlt.Playlist() playlist.append(mlt.Producer(profile, 'color:')) # Setup the consumer consumer = 'decklink:0' if len(sys.argv) > 1: consumer = sys.argv[1] consumer = mlt.Consumer(profile, consumer) consumer.connect(playlist) #consumer.set("real_time", -2) consumer.start() def switch(resource): global playlist
def get_rendered_transition_tractor(current_sequence, orig_from, orig_to, action_from_out, action_from_in, action_to_out, action_to_in, transition_type_selection_index, wipe_luma_sorted_keys_index, gdk_color_str): name, transition_type = rendered_transitions[transition_type_selection_index] # New from clip if orig_from.media_type != appconsts.PATTERN_PRODUCER: from_clip = current_sequence.create_file_producer_clip(orig_from.path, None, False, orig_from.ttl)# File producer else: from_clip = current_sequence.create_pattern_producer(orig_from.create_data) # pattern producer current_sequence.clone_clip_and_filters(orig_from, from_clip) # New to clip if not(transition_type == RENDERED_FADE_IN or transition_type == RENDERED_FADE_OUT): # fades to not use to_clip if orig_to.media_type != appconsts.PATTERN_PRODUCER: to_clip = current_sequence.create_file_producer_clip(orig_to.path, None, False, orig_to.ttl)# File producer else: to_clip = current_sequence.create_pattern_producer(orig_to.create_data) # pattern producer current_sequence.clone_clip_and_filters(orig_to, to_clip) # Create tractor and tracks tractor = mlt.Tractor() multitrack = tractor.multitrack() track0 = mlt.Playlist() track1 = mlt.Playlist() multitrack.connect(track0, 0) multitrack.connect(track1, 1) # we'll set in and out points for images and pattern producers. if not(transition_type == RENDERED_FADE_IN or transition_type == RENDERED_FADE_OUT): # fades to not use to_clip or some other data used here if from_clip.media_type == appconsts.IMAGE or from_clip.media_type == appconsts.PATTERN_PRODUCER: length = action_from_out - action_from_in from_clip.clip_in = 0 from_clip.clip_out = length if to_clip.media_type == appconsts.IMAGE or to_clip.media_type == appconsts.PATTERN_PRODUCER: length = action_to_out - action_to_in to_clip.clip_in = 0 to_clip.clip_out = length else: length = action_from_out if from_clip.media_type == appconsts.IMAGE or from_clip.media_type == appconsts.PATTERN_PRODUCER: from_clip.clip_in = 0 from_clip.clip_out = length # Add clips to tracks and create keyframe string for mixing if transition_type == RENDERED_DISSOLVE or transition_type == RENDERED_WIPE: # Add clips. Images and pattern producers always fill full track. if from_clip.media_type != appconsts.IMAGE and from_clip.media_type != appconsts.PATTERN_PRODUCER: track0.insert(from_clip, 0, action_from_in, action_from_out) else: track0.insert(from_clip, 0, 0, action_from_out - action_from_in) if to_clip.media_type != appconsts.IMAGE and to_clip.media_type != appconsts.PATTERN_PRODUCER: track1.insert(to_clip, 0, action_to_in, action_to_out) else: track1.insert(to_clip, 0, 0, action_to_out - action_to_in) kf_str = "0=0/0:100%x100%:0.0;"+ str(tractor.get_length() - 1) + "=0/0:100%x100%:100.0" elif transition_type == RENDERED_COLOR_DIP: length = action_from_out - action_from_in first_clip_length = length // 2 second_clip_length = length - first_clip_length color_clip = patternproducer.create_color_producer(current_sequence.profile, gdk_color_str) track0.insert(color_clip, 0, 0, length) track1.insert(from_clip, 0, action_from_in, action_from_in + first_clip_length) track1.insert(to_clip, 1, action_to_out - second_clip_length, action_to_out) kf_str = "0=0/0:100%x100%:100.0;"+ str(first_clip_length) + "=0/0:100%x100%:0.0;" + str(tractor.get_length() - 1) + "=0/0:100%x100%:100.0" elif (transition_type == RENDERED_FADE_IN or transition_type == RENDERED_FADE_OUT): color_clip = patternproducer.create_color_producer(current_sequence.profile, gdk_color_str) track0.insert(color_clip, 0, 0, length) if transition_type == RENDERED_FADE_IN: track1.insert(from_clip, 0, orig_from.clip_in, orig_from.clip_in + length) kf_str = "0=0/0:100%x100%:0.0;"+ str(length) + "=0/0:100%x100%:100.0" else: # transition_type == RENDERED_FADE_OUT track1.insert(from_clip, 0, orig_from.clip_out - length, orig_from.clip_out) kf_str = "0=0/0:100%x100%:100.0;"+ str(length) + "=0/0:100%x100%:0.0" # Create transition transition = mlt.Transition(current_sequence.profile, "region") mltrefhold.hold_ref(transition) transition.set("composite.geometry", str(kf_str)) # controls mix over time transition.set("composite.automatic",1) transition.set("composite.aligned", 0) transition.set("composite.deinterlace",0) transition.set("composite.distort",0) transition.set("composite.fill",1) transition.set("composite.operator","over") transition.set("composite.luma_invert",0) transition.set("composite.progressive",1) transition.set("composite.softness",0) transition.set("in", 0) transition.set("out", tractor.get_length() - 1) transition.set("a_track", 0) transition.set("b_track", 1) # Setting luma resource file turns dissolve into wipe if transition_type == RENDERED_WIPE: wipe_resource_path = get_wipe_resource_path_for_sorted_keys_index(wipe_luma_sorted_keys_index) transition.set("composite.luma", str(wipe_resource_path)) # Add transition field = tractor.field() field.plant_transition(transition, 0,1) return tractor