def _mix_audio_for_track(self, track): # Create and add transition to combine track audios # # Audio transition objects are not saved and are thrown away when track count is changed so we don't # need to hold references to them in Sequence object, mltrefhold stuff is just very # defencsive programming because MLT crashes are most related to deleting stuff, probably not needed at all. transition = mlt.Transition(self.profile, "mix") mltrefhold.hold_ref(transition) # look to remove transition.set("a_track", int(AUDIO_MIX_DOWN_TRACK)) transition.set("b_track", track.id) transition.set("always_active", 1) transition.set("combine", 1) self.field.plant_transition(transition, int(AUDIO_MIX_DOWN_TRACK), track.id) # Create and add gain filter gain_filter = mlt.Filter(self.profile, "volume") mltrefhold.hold_ref(gain_filter) gain_filter.set("gain", str(track.audio_gain)) track.attach(gain_filter) track.gain_filter = gain_filter # Add pan filter if this track is panorated if track.audio_pan != NO_PAN: self.add_track_pan_filter(track, 0.5) track.audio_pan = 0.5
def create_mlt_transition(self, mlt_profile): transition = mlt.Transition(mlt_profile, str(self.info.mlt_service_id)) mltrefhold.hold_ref(transition) self.mlt_transition = transition self.set_default_values() # PROP_EXPR values may have keywords that need to be replaced with # numerical values that depend on the profile we have. These need # to be replaced now that we have profile and we are ready to connect this. propertyparse.replace_value_keywords(self.properties, mlt_profile) self.update_editable_mlt_properties()
def _mix_audio_for_track(self, track): # Create and add transition to combine track audios transition = mlt.Transition(self.profile, "mix") mltrefhold.hold_ref(transition) transition.set("a_track", int(AUDIO_MIX_DOWN_TRACK)) transition.set("b_track", track.id) transition.set("always_active", 1) transition.set("combine", 1) self.field.plant_transition(transition, int(AUDIO_MIX_DOWN_TRACK), track.id) # Create and ad gain filter gain_filter = mlt.Filter(self.profile, "volume") mltrefhold.hold_ref(gain_filter) gain_filter.set("gain", str(track.audio_gain)) track.attach(gain_filter) track.gain_filter = gain_filter # Add pan filter if this track is panorated if track.audio_pan != NO_PAN: self.add_track_pan_filter(track, 0.5) track.audio_pan = 0.5
def get_rendered_transition_tractor(current_sequence, orig_from, orig_to, action_from_out, action_from_in, action_to_out, action_to_in, transition_type_selection_index, wipe_luma_sorted_keys_index, gdk_color_str): name, transition_type = rendered_transitions[transition_type_selection_index] # New from clip if orig_from.media_type != appconsts.PATTERN_PRODUCER: from_clip = current_sequence.create_file_producer_clip(orig_from.path, None, False, orig_from.ttl)# File producer else: from_clip = current_sequence.create_pattern_producer(orig_from.create_data) # pattern producer current_sequence.clone_clip_and_filters(orig_from, from_clip) # New to clip if not(transition_type == RENDERED_FADE_IN or transition_type == RENDERED_FADE_OUT): # fades to not use to_clip if orig_to.media_type != appconsts.PATTERN_PRODUCER: to_clip = current_sequence.create_file_producer_clip(orig_to.path, None, False, orig_to.ttl)# File producer else: to_clip = current_sequence.create_pattern_producer(orig_to.create_data) # pattern producer current_sequence.clone_clip_and_filters(orig_to, to_clip) # Create tractor and tracks tractor = mlt.Tractor() multitrack = tractor.multitrack() track0 = mlt.Playlist() track1 = mlt.Playlist() multitrack.connect(track0, 0) multitrack.connect(track1, 1) # we'll set in and out points for images and pattern producers. if not(transition_type == RENDERED_FADE_IN or transition_type == RENDERED_FADE_OUT): # fades to not use to_clip or some other data used here if from_clip.media_type == appconsts.IMAGE or from_clip.media_type == appconsts.PATTERN_PRODUCER: length = action_from_out - action_from_in from_clip.clip_in = 0 from_clip.clip_out = length if to_clip.media_type == appconsts.IMAGE or to_clip.media_type == appconsts.PATTERN_PRODUCER: length = action_to_out - action_to_in to_clip.clip_in = 0 to_clip.clip_out = length else: length = action_from_out if from_clip.media_type == appconsts.IMAGE or from_clip.media_type == appconsts.PATTERN_PRODUCER: from_clip.clip_in = 0 from_clip.clip_out = length # Add clips to tracks and create keyframe string for mixing if transition_type == RENDERED_DISSOLVE or transition_type == RENDERED_WIPE: # Add clips. Images and pattern producers always fill full track. if from_clip.media_type != appconsts.IMAGE and from_clip.media_type != appconsts.PATTERN_PRODUCER: track0.insert(from_clip, 0, action_from_in, action_from_out) else: track0.insert(from_clip, 0, 0, action_from_out - action_from_in) if to_clip.media_type != appconsts.IMAGE and to_clip.media_type != appconsts.PATTERN_PRODUCER: track1.insert(to_clip, 0, action_to_in, action_to_out) else: track1.insert(to_clip, 0, 0, action_to_out - action_to_in) kf_str = "0=0/0:100%x100%:0.0;"+ str(tractor.get_length() - 1) + "=0/0:100%x100%:100.0" elif transition_type == RENDERED_COLOR_DIP: length = action_from_out - action_from_in first_clip_length = length // 2 second_clip_length = length - first_clip_length color_clip = patternproducer.create_color_producer(current_sequence.profile, gdk_color_str) track0.insert(color_clip, 0, 0, length) track1.insert(from_clip, 0, action_from_in, action_from_in + first_clip_length) track1.insert(to_clip, 1, action_to_out - second_clip_length, action_to_out) kf_str = "0=0/0:100%x100%:100.0;"+ str(first_clip_length) + "=0/0:100%x100%:0.0;" + str(tractor.get_length() - 1) + "=0/0:100%x100%:100.0" elif (transition_type == RENDERED_FADE_IN or transition_type == RENDERED_FADE_OUT): color_clip = patternproducer.create_color_producer(current_sequence.profile, gdk_color_str) track0.insert(color_clip, 0, 0, length) if transition_type == RENDERED_FADE_IN: track1.insert(from_clip, 0, orig_from.clip_in, orig_from.clip_in + length) kf_str = "0=0/0:100%x100%:0.0;"+ str(length) + "=0/0:100%x100%:100.0" else: # transition_type == RENDERED_FADE_OUT track1.insert(from_clip, 0, orig_from.clip_out - length, orig_from.clip_out) kf_str = "0=0/0:100%x100%:100.0;"+ str(length) + "=0/0:100%x100%:0.0" # Create transition transition = mlt.Transition(current_sequence.profile, "region") mltrefhold.hold_ref(transition) transition.set("composite.geometry", str(kf_str)) # controls mix over time transition.set("composite.automatic",1) transition.set("composite.aligned", 0) transition.set("composite.deinterlace",0) transition.set("composite.distort",0) transition.set("composite.fill",1) transition.set("composite.operator","over") transition.set("composite.luma_invert",0) transition.set("composite.progressive",1) transition.set("composite.softness",0) transition.set("in", 0) transition.set("out", tractor.get_length() - 1) transition.set("a_track", 0) transition.set("b_track", 1) # Setting luma resource file turns dissolve into wipe if transition_type == RENDERED_WIPE: wipe_resource_path = get_wipe_resource_path_for_sorted_keys_index(wipe_luma_sorted_keys_index) transition.set("composite.luma", str(wipe_resource_path)) # Add transition field = tractor.field() field.plant_transition(transition, 0,1) return tractor
# Establish a pipeline profile = mlt.Profile("atsc_1080i_5994") profile.set_explicit(1) tractor = mlt.Tractor() tractor.set("eof", "loop") fg_resource = "decklink:0" bg_resource = "decklink:1" if len(sys.argv) > 2: fg_resource = sys.argv[1] bg_resource = sys.argv[2] fg = mlt.Producer(profile, fg_resource) bg = mlt.Producer(profile, bg_resource) tractor.set_track(bg, 0) tractor.set_track(fg, 1) composite = mlt.Transition(profile, "composite") composite.set("fill", 1) tractor.plant_transition(composite) # Setup the consumer consumer = "decklink:2" if len(sys.argv) > 3: consumer = sys.argv[3] consumer = mlt.Consumer(profile, consumer) consumer.connect(tractor) consumer.set("real_time", -2) consumer.start() flip_flop = False
if config["27"][29]["credits_outro__27"] and scene: l = textembedder(config["27"][25]["coordinate_credits_outro__27"], config["26"][21]["font_size_credits_outro__27"], config["27"][27]["font_family_credits_outro__27"], config["27"][28]["text_alignment_credits_outro__27"], config["27"][29]["credits_outro__27"], 40, scene, 0, 0, 0, 0, 1) if scene: if scene.get_out() == 14999: scene.set('out', 150) t_length += scene.get_out() scenes.append(scene) tractor.multitrack().connect(scenes, 0) tractor.multitrack().connect(oscenes, 1) transition = mlt.Transition(profile, "composite") transition.set("a_track", 0) transition.set("b_track", 1) transition.set("sliced_composite", 1) tractor.plant_transition(transition, 0, 1) if music and music_or_footage != 'footage_only': tsound = mlt.Producer(profile, str(music)) if tsound.get_out() < t_length: sound = mlt.Playlist() lt = int(t_length / tsound.get_out()) + 1 for x in range(0, lt): tsound = mlt.Producer(profile, str(music)) sound.append(tsound) else: sound = mlt.Producer(profile, str(music))