def check_available_features(repo): try: print "Detecting environment..." global acodecs global vcodecs global formats global services global transitions global environment_detection_success acodecs = [] vcodecs = [] formats = [] services = {} transitions = {} # video codecs cv = mlt.Consumer(mlt.Profile(), "avformat") cv.set('vcodec', 'list') cv.start() codecs = mlt.Properties(cv.get_data('vcodec')) for i in range(0, codecs.count()): vcodecs.append(codecs.get(i)) # audio codecs ca = mlt.Consumer(mlt.Profile(), "avformat") ca.set('acodec', 'list') ca.start() codecs = mlt.Properties(ca.get_data('acodec')) for i in range(0, codecs.count()): acodecs.append(codecs.get(i)) # formats cf = mlt.Consumer(mlt.Profile(), "avformat") cf.set('f', 'list') cf.start() codecs = mlt.Properties(cf.get_data('f')) for i in range(0, codecs.count()): formats.append(codecs.get(i)) # filters envservices = mlt.Repository.filters(repo) for i in range(mlt.Properties.count(envservices)): services[mlt.Properties.get_name(envservices, i)] = True # transitions envtransitions = mlt.Repository.transitions(repo) for i in range(mlt.Properties.count(envtransitions)): transitions[mlt.Properties.get_name(envtransitions, i)] = True print "MLT detection succeeded, " + str(len(formats)) + " formats, " \ + str(len(vcodecs)) + " video codecs and " + str(len(acodecs)) + " audio codecs found." print str(len(services)) + " MLT services found." environment_detection_success = True except: print "Environment detection failed, environment unknown." GObject.timeout_add(2000, _show_failed_environment_info)
def _get_render_profile(project_profile, render_size, render_folder): new_width, new_height = _get_render_dimensions(project_profile, render_size) file_contents = "description=" + "proxy render profile" + "\n" file_contents += "frame_rate_num=" + str( project_profile.frame_rate_num()) + "\n" file_contents += "frame_rate_den=" + str( project_profile.frame_rate_den()) + "\n" file_contents += "width=" + str(new_width) + "\n" file_contents += "height=" + str(new_height) + "\n" file_contents += "progressive=1" + "\n" file_contents += "sample_aspect_num=" + str( project_profile.sample_aspect_num()) + "\n" file_contents += "sample_aspect_den=" + str( project_profile.sample_aspect_den()) + "\n" file_contents += "display_aspect_num=" + str( project_profile.display_aspect_num()) + "\n" file_contents += "display_aspect_den=" + str( project_profile.display_aspect_den()) + "\n" render_profile_path = render_folder + "/temp_render_profile" with atomicfile.AtomicFileWriter(render_profile_path, "w") as afw: profile_file = afw.get_file() profile_file.write(file_contents) render_profile = mlt.Profile(render_profile_path) return render_profile
def _get_proxy_profile(project): project_profile = project.profile new_width, new_height = _get_proxy_dimensions(project_profile, project.proxy_data.size) file_contents = "description=" + "proxy render profile" + "\n" file_contents += "frame_rate_num=" + str( project_profile.frame_rate_num()) + "\n" file_contents += "frame_rate_den=" + str( project_profile.frame_rate_den()) + "\n" file_contents += "width=" + str(new_width) + "\n" file_contents += "height=" + str(new_height) + "\n" file_contents += "progressive=1" + "\n" file_contents += "sample_aspect_num=" + str( project_profile.sample_aspect_num()) + "\n" file_contents += "sample_aspect_den=" + str( project_profile.sample_aspect_den()) + "\n" file_contents += "display_aspect_num=" + str( project_profile.display_aspect_num()) + "\n" file_contents += "display_aspect_den=" + str( project_profile.display_aspect_den()) + "\n" proxy_profile_path = utils.get_hidden_user_dir_path( ) + "temp_proxy_profile" profile_file = open(proxy_profile_path, "w") profile_file.write(file_contents) profile_file.close() proxy_profile = mlt.Profile(proxy_profile_path) return proxy_profile
def _get_proxy_profile(project): project_profile = project.profile new_width, new_height = _get_proxy_dimensions(project_profile, project.proxy_data.size) file_contents = "description=" + "proxy render profile" + "\n" file_contents += "frame_rate_num=" + str( project_profile.frame_rate_num()) + "\n" file_contents += "frame_rate_den=" + str( project_profile.frame_rate_den()) + "\n" file_contents += "width=" + str(new_width) + "\n" file_contents += "height=" + str(new_height) + "\n" file_contents += "progressive=1" + "\n" file_contents += "sample_aspect_num=" + str( project_profile.sample_aspect_num()) + "\n" file_contents += "sample_aspect_den=" + str( project_profile.sample_aspect_den()) + "\n" file_contents += "display_aspect_num=" + str( project_profile.display_aspect_num()) + "\n" file_contents += "display_aspect_den=" + str( project_profile.display_aspect_den()) + "\n" proxy_profile_path = userfolders.get_cache_dir() + "temp_proxy_profile" with atomicfile.AtomicFileWriter(proxy_profile_path, "w") as afw: profile_file = afw.get_file() profile_file.write(file_contents) proxy_profile = mlt.Profile(proxy_profile_path) return proxy_profile
def get_formats(self, format=None): try: formats_raw = [] # Create the consumer c = mlt.Consumer(mlt.Profile(), "avformat") # Ask for video codecs supports c.set('f', 'list') # Start the consumer to generate the list c.start() # Get the vcodec property codecs = mlt.Properties(c.get_data('f')) # Display the list of codecs for i in range(0, codecs.count()): formats_raw.append(codecs.get(i)) # sort list formats_raw.sort() return formats_raw except: # If the above code fails, use an older technique which uses the 'melt' # command line, and parses the output print "Warning: Could not get list of formats using the MLT API. Falling back to 'melt' executable." return self.get_formats_fallback(format)
def _load_profiles_list(dir_path): load_profiles = [] file_list = os.listdir(dir_path) for fname in file_list: file_path = dir_path + fname profile = mlt.Profile(file_path) profile.file_path = file_path load_profiles.append([profile.description(), profile]) return load_profiles
def __init__(self, loop_filename): mlt.Factory().init() self.profile = mlt.Profile() self.mlt_consumer = mlt.Consumer(self.profile, self.consumer_type_) #self.mlt_consumer.set("fullscreen", 1) self.loop = mlt.Producer(self.profile, loop_filename) #self.loop.set("force_aspect_ratio", 1.0) self.loop.set("eof", "loop") #self.playing_consumer = None self.mlt_consumer.set("rescale", "none") #self.overlay_call = None self.state = None self.pause_screen() #self.mlt_consumer.listen("producer-changed", None, self.blah ) self.mlt_consumer.start() print(("MLT profile desc", self.profile.description())) print(("Framerate", self.profile.frame_rate_num())) print(("Width/Height/Progressive", self.profile.width(), self.profile.height(), self.profile.progressive()))
def _load_profiles_list(dir_path): load_profiles = [] file_list = os.listdir(dir_path) for fname in file_list: ## Feb-2017 - SvdB - Filter out duplicate profiles based on profile name found_duplicate = False file_path = dir_path + fname profile = mlt.Profile(file_path) profile.file_path = file_path load_profiles.append([profile.description(), profile]) # Feb-2017 - SvdB - Filter out duplicate profiles based on profile name for enu_count, prof in enumerate(load_profiles): for prof_idx, prof_name in enumerate(prof): if prof_name == profile.description(): found_duplicate = True if found_duplicate == False: load_profiles.append([profile.description(), profile]) return load_profiles
def run(self): """ This is the main method on this thread. This method should not return anything, or the thread will no longer be active... and thus will no longer be able to inspect media files. """ self.amAlive = True self.file_name = "" self.c = None self.p = None # init the factory, and load a small video size / profile mlt.Factory().init() self.profile = mlt.Profile("quarter_ntsc") # this loop will continue as long as LibreShot is running while self.amAlive: time.sleep(1) # clear all the MLT objects self.p = None self.c = None self.profile = None self.f = None
#!/usr/bin/env python # -*- coding: utf-8 -*- # Import required modules from __future__ import print_function import mlt # Start the mlt system mlt.Factory().init() # Create the consumer c = mlt.Consumer(mlt.Profile(), "avformat") # Ask for video codecs supports c.set('vcodec', 'list') # Start the consumer to generate the list c.start() # Get the vcodec property codecs = mlt.Properties(c.get_data('vcodec')) # Print the list of codecs for i in range(0, codecs.count()): print(codecs.get(i))
import mlt import time import sys import tornado.ioloop import tornado.web import shutil import tempfile import os import os.path # Start the mlt system mlt.mlt_log_set_level(40) # verbose mlt.Factory.init() # Establish a pipeline profile = mlt.Profile("atsc_1080i_5994") #profile = mlt.Profile('square_ntsc_wide') profile.set_explicit(1) tractor = mlt.Tractor() tractor.set('eof', 'loop') playlist = mlt.Playlist() playlist.append(mlt.Producer(profile, 'color:')) # Setup the consumer consumer = 'decklink:0' if len(sys.argv) > 1: consumer = sys.argv[1] consumer = mlt.Consumer(profile, consumer) consumer.connect(playlist) #consumer.set("real_time", -2) consumer.start()
ftodel = list() t_scenes = scene_count(config) current_scene = 0 pjson = {'Progress': 'True', 'video': ''} fgcolor = config["23"][9]['font_color__23'] bgcolor = config["23"][4]['highlight_color__23'] vtype = config["0"][0]['format'] mlt.mlt_log_set_level(0) # verbose mlt.Factory().init() logo_max_size = 0 if vtype == 'square': profile = mlt.Profile() profile.set_explicit(1) profile.set_sample_aspect(1, 1) profile.set_frame_rate(30000, 1000) profile.set_display_aspect(1, 1) profile.set_width(1080) profile.set_height(1080) profile.set_colorspace(709) logo_max_size = 125 elif vtype == 'vertical': profile = mlt.Profile() profile.set_explicit(1) profile.set_sample_aspect(1, 1) profile.set_frame_rate(30000, 1000) profile.set_display_aspect(2, 3) profile.set_width(720)
#!/usr/bin/env python # -*- coding: utf-8 -*- import mlt import sys from PIL import Image # setup mlt.Factory.init() profile = mlt.Profile('square_pal_wide') prod = mlt.Producer(profile, sys.argv[1]) # This builds a profile from the attributes of the producer: auto-profile. profile.from_producer(prod) # Ensure the image is square pixels - optional. profile.set_width(int(profile.width() * profile.sar())) profile.set_sample_aspect(1, 0) # Seek to 10% and get a Mlt frame. prod.seek(int(prod.get_length() * 0.1)) frame = prod.get_frame() # And make sure we deinterlace if input is interlaced - optional. frame.set("consumer_deinterlace", 1) # Now we are ready to get the image and save it. size = (profile.width(), profile.height()) rgb = frame.get_image(mlt.mlt_image_rgb24, *size) img = Image.fromstring('RGB', size, rgb) img.save(sys.argv[1] + '.png')
def GetFile(self, file_location, only_thumbnail=True, new_file_base_name=None, start_time=0.00, end_time=None): """ Use this method to generate an LibreShotFile object based on the URL (or file location) of a video or audio file. Each time you call this method, it will lock this thread (and LibreShot's main thread) until it has finished. """ """ file_location: The location of the file on the hard drive, including the name and extension. only_thumbnail: True if only a thumbnail should be grabbed from the file, False if image sequence. new_file_base_name: The name of the folder and the base for the image sequence name, not including the path. start_time: The time to start grabbing frames from the file, in seconds. end_time: The time to end grabbing frames from the file, in seconds. None = To the last frame. """ try: # determine name and location of thumbnail image self.file_name = file_location self.thumbnail_path = "" self.file_type = "video" self.label = "" self.unique_id = str(uuid.uuid1()) project_path = self.project.folder (dirName, fileName) = os.path.split(file_location) (fileBaseName, fileExtension) = os.path.splitext(fileName) fileExtension = fileExtension.replace(".", "") uniqueFileBaseName = self.unique_id actual_thumbnail_path = project_path + "/thumbnail/" + uniqueFileBaseName + "_" + fileExtension + "_1.png" if only_thumbnail: # just get 1 thumbnail frame self.thumbnail_path = project_path + "/thumbnail/" + uniqueFileBaseName + "_" + fileExtension + "_%d.png" # set the profile self.profile = mlt.Profile("quarter_ntsc") else: if new_file_base_name == None or new_file_base_name == fileBaseName: # choose the same folder as the name (without extension) as default self.thumbnail_path = os.path.join( dirName, fileBaseName, fileBaseName + "_%d.png") else: # export a part of the video to a folder under the folder with the same name as the file. self.thumbnail_path = os.path.join( dirName, fileBaseName, new_file_base_name, new_file_base_name + "_%d.png") # re-init the mlt factory mlt.Factory.init() # Create the producer self.p = mlt.Producer(self.profile, '%s' % file_location) # Check if clip is valid (otherwise a seg fault) if self.p.is_valid() == False: return None # Check for invalid files - badly generated video files can have # a length of 0 or -1, e.g. # https://bugs.launchpad.net/ubuntu/+source/libreshot/+bug/927755, https://bugs.launchpad.net/kazam/+bug/925238 if self.p.get_length() < 1 or self.p.get_length() == 0x7fffffff: return None # check the 'seekable' property # If it is zero, then MLT is likely to have problems with this file. if self.p.get("seekable") == '0': messagebox.show( _("Warning!"), _("The file %s has properties that may prevent it working properly in LibreShot.\nYou may need to transcode it to another format." ) % (self.file_name)) # create the consumer self.c = mlt.Consumer(self.profile, "avformat", self.thumbnail_path) # set some consumer properties self.c.set("real_time", 0) self.c.set("vcodec", "png") # determine length of clip in seconds producer_fps = float(self.p.get_fps()) first_frame = int(round(producer_fps * start_time)) # Whole clip if end_time = None if end_time == None: last_frame = self.p.get_length() else: last_frame = int(round(producer_fps * end_time)) max_frames = last_frame - first_frame # determine dimensions height = 0 width = 0 if self.p.get("height"): height = int(self.p.get("height")) if self.p.get("width"): width = int(self.p.get("width")) audio_index = self.p.get_int("audio_index") video_index = self.p.get_int("video_index") audio_property = "meta.media.%s.codec.long_name" % audio_index if self.p.get(audio_property): self.audio_codec = self.p.get(audio_property) else: self.audio_codec = "" video_property = "meta.media.%s.codec.long_name" % video_index if self.p.get(video_property): self.video_codec = self.p.get(video_property) else: self.video_codec = "" if self.p.get_frame(): frame = self.p.get_frame() self.audio_frequency = frame.get_int("frequency") self.audio_channels = frame.get_int("channels") # determine if this is an image is_image = False if self.p.get_length( ) == 15000 and video_index == 0 and audio_index == 0: # images always have exactly 15000 frames is_image = True self.file_type = "image" # set the max length of the image to 300 seconds (i.e. 5 minutes) max_frames = producer_fps * 300 # get actual height & width of image (since MLT defaults to 1 x 1) width, height = self.get_image_size(file_location) # determine length if only_thumbnail: calculate_length = self.p.get_length() / producer_fps else: calculate_length = max_frames / producer_fps if is_image: # set the length to 300 seconds (i.e. 5 minutes) calculate_length = float(300) # set thumbnail image (if no height & width are detected) if (height == False or width == False) and (is_image == False): self.thumbnail_path = "" self.file_type = "audio" # get the 1st frame (if not exporting all frames) if only_thumbnail: max_frames = float(self.p.get_length()) - 1.0 self.p = self.p.cut(1, 1) # get the frames in an interval else: self.p = self.p.cut(first_frame, last_frame) # mark as image seq self.label = "Image Sequence" self.file_type = "image sequence" # Check if clip is valid (otherwise a seg fault) if self.p.is_valid() == False: return None # connect the producer and consumer self.c.connect(self.p) # Start the consumer, and lock the thread until it's done (to prevent crazy seg fault errors) # Only start if the media item has a thumbnail location (i.e. no audio thumbnails) if self.thumbnail_path: self.c.run() # create an libreshot file object newFile = files.LibreShotFile(self.project) # thumbnails and image sequences are stored at different locations if only_thumbnail: newFile.name = file_location else: newFile.name = self.thumbnail_path newFile.length = calculate_length newFile.thumb_location = actual_thumbnail_path newFile.videorate = (self.p.get_fps(), 0) newFile.height = height newFile.width = width newFile.max_frames = max_frames newFile.fps = producer_fps newFile.file_type = self.file_type newFile.label = self.label newFile.audio_channels = self.audio_channels newFile.audio_codec = self.audio_codec newFile.audio_frequency = self.audio_frequency newFile.video_codec = self.video_codec # return the LibreShotFile object return newFile except Exception: print "Failed to import file: %s" % file_location
def get_thumb_at_frame(self, filename, frame=1, new_name="", full_size=True): """ if new_name = None, it will default to 'name_fileext + "_%d.ext' in the thumbnail folder. if full_size is True, a full size frame will be extracted (based on the project profile). Else: quarter_ntsc""" self.file_name = filename project_path = self.project.folder myPath = self.file_name (dirName, fileName) = os.path.split(myPath) (fileBaseName, fileExtension) = os.path.splitext(fileName) fileExtension = fileExtension.replace(".", "") # Init mlt factory mlt.Factory.init() # set the profile if full_size: self.profile = profiles.mlt_profiles(self.project).get_profile( self.project.project_type) else: self.profile = mlt.Profile("quarter_ntsc") # Create the producer self.p = mlt.Producer(self.profile, '%s' % self.file_name) # Check if clip is valid (otherwise a seg fault) if self.p.is_valid() == False: return None if new_name == "": # just get 1 thumbnail frame self.thumbnail_path = project_path + "/thumbnail/" + fileBaseName + "_" + fileExtension + "_%d.png" else: #for snapshots, use the new file name #don't use the thumbnail path for the new file self.thumbnail_path = project_path + "/" + new_name # create the consumer self.c = mlt.Consumer(self.profile, "avformat", self.thumbnail_path) # set some consumer properties self.c.set("real_time", 0) self.c.set("vcodec", "png") #get the frame self.p = self.p.cut(frame, frame) # Check if clip is valid (otherwise a seg fault) if self.p.is_valid() == False: return None # connect the producer and consumer self.c.connect(self.p) # Only start if the media item has a thumbnail location (i.e. no audio thumbnails) if self.thumbnail_path: self.c.run()
#!/usr/bin/env python # -*- coding: utf-8 -*- # Import required modules from __future__ import print_function import mlt import time import sys # Start the mlt system mlt.Factory().init() # Establish a profile profile = mlt.Profile() # Create the producer p = mlt.Producer(profile, sys.argv[1]) if p: # Create the consumer c = mlt.Consumer(profile, "sdl") # Turn off the default rescaling c.set("rescale", "none") # Connect the producer to the consumer c.connect(p) # Start the consumer c.start()
def run(self): self.start_time = time.monotonic() if self.lookup_path == "None": # Video clips proxy_profile = mltprofiles.get_profile(self.proxy_profile_desc) # App wrote the temp profile when launching proxy render. # NOTE: this needs to be created here for future proxy_profile_path = userfolders.get_cache_dir( ) + "temp_proxy_profile" proxy_profile = mlt.Profile(proxy_profile_path) renderconsumer.performance_settings_enabled = False # uuh...we're obivously disabling something momentarily. consumer = renderconsumer.get_render_consumer_for_encoding( self.proxy_file_path, proxy_profile, renderconsumer.proxy_encodings[self.enc_index]) renderconsumer.performance_settings_enabled = True consumer.set("vb", str(int(self.proxy_rate)) + "k") consumer.set("rescale", "nearest") file_producer = mlt.Producer(proxy_profile, str(self.media_file_path)) start_frame = 0 end_frame = file_producer.get_length() - 1 self.render_player = renderconsumer.FileRenderPlayer( None, file_producer, consumer, 0, end_frame) self.render_player.wait_for_producer_end_stop = False self.render_player.start() while self.render_player.stopped == False: self.check_abort_requested() if self.abort == True: self.render_player.shutdown() return fraction = self.render_player.get_render_fraction() self.render_update(fraction) time.sleep(0.3) else: # Image Sequences copyfolder, copyfilename = os.path.split(self.proxy_file_path) if not os.path.isdir(copyfolder): os.makedirs(copyfolder) listing = glob.glob(self.lookup_path) size = self.proxy_w, self.proxy_h done = 0 for orig_path in listing: orig_folder, orig_file_name = os.path.split(orig_path) try: im = Image.open(orig_path) im.thumbnail(size, Image.ANTIALIAS) im.save(copyfolder + "/" + orig_file_name, "PNG") except IOError: print("proxy img seq frame failed for '%s'" % orig_path) done = done + 1 if done % 5 == 0: fraction = float(done) / float(len(listing)) self.render_update(fraction) # Write out completed flag file. ccrutils.write_completed_message()