def reset(self): """ desc: Initialize/ reset the plug-in. """ # Experimental variables self.var.resizeVideo = u"yes" self.var.duration = u"keypress" self.var.event_handler = u"" self.var.event_handler_trigger = u"on keypress" self.var.video_src = u"" self.var.playaudio = u"yes" # Internal variables self.file_loaded = False self.vlc_event_handler = None self.media = None self.hasMediaInfo = False self.process_feedback = True # See if MediaInfo functions are available try: MediaInfo.parse(u"") self.hasMediaInfo = True except: debug.msg(u"MediaInfo CLI not found. Frame info might be unavailable.", reason=u"warning") self.hasMediaInfo = False
def reset(self): """ desc: Initialize/ reset the plug-in. """ self.file_loaded = False self.resizeVideo = u"yes" self.duration = u"keypress" self.playaudio = u"yes" self.sendInfoToEyelink = u"no" self.event_handler = u"" self.event_handler_trigger = u"on keypress" self.vlc_event_handler = None self.media = None self.framerate = 0 self.frame_duration = 0 self.startPlaybackTime = 0 self.playbackStarted = False self.hasMediaInfo = False #See if MediaInfo functions are available try: MediaInfo.parse(u"") self.hasMediaInfo = True except: debug.msg( \ u"MediaInfo CLI not found. Frame info might be unavailable.", reason=u"warning") self.hasMediaInfo = False
def __init__(self, name, experiment, string=None): """ Constructor. Link to the video can already be specified but this is optional Arguments: name -- the name of the item experiment -- the opensesame experiment Keyword arguments: string -- a definition string for the item (Default=None) """ # The version of the plug-in self.version = 0.10 self.file_loaded = False self.paused = False self.item_type = "media_player_vlc" self.description = "Plays a video from file" self.duration = "keypress" self.playaudio = "yes" self.sendInfoToEyelink = "no" self.event_handler = "" self.event_handler_trigger = "on keypress" self.vlc_event_handler = None self.vlcInstance = vlc.Instance() self.player = self.vlcInstance.media_player_new() self.media = None self.framerate = 0 self.frame_duration = 0 self.startPlaybackTime = 0 self.playbackStarted = False self.hasMediaInfo = False #See if MediaInfo functions are available try: MediaInfo.parse("") self.hasMediaInfo = True except: debug.msg( \ "MediaInfo CLI not found. Frame info might be unavailable.", reason="warning") self.hasMediaInfo = False # The parent handles the rest of the construction item.item.__init__(self, name, experiment, string)
def analysis(video): mediainfoobject = MediaInfo.parse(str(settings.BASE_DIR) + str(os.path.normpath(video.videofile.url))) try: for track in mediainfoobject.tracks: if track.track_type == 'General': video.format = track.format video.filesize = track.file_size video.duration = track.duration if track.track_type == 'Video': video.width = track.width video.height = track.height video.resolution = str(video.width) + 'x' + str(video.height) video.vcodec = track.codec video.aspect = track.display_aspect_ratio video.framerate = track.frame_rate video.colorspace = track.color_space video.bitdepth = track.bit_depth video.vbitrate = track.bit_rate if track.track_type == 'Audio': video.acodec = track.format video.abitrate = track.bit_rate video.asamplingrate = track.asampling_rate video.abitdepth = track.bit_depth video.channels = track.channel_s except: return video
def get_video_info(file_list, output_filename="output.csv"): """Get video info for a file(s)""" total_duration = timedelta() items = [] for filename in file_list: media_info = MediaInfo.parse(filename) media_dict = OrderedDict() src_dir, src_file = os.path.split(filename) media_dict['filename'] = src_file for t in media_info.tracks: if t.track_type == 'General': media_dict['format'] = t.format elif t.track_type == 'Video': media_dict['duration'] = timedelta(milliseconds=t.duration) media_dict['dimensions'] = "%sx%s" % (t.width, t.height) media_dict['video_bitrate'] = t.other_bit_rate[0] media_dict['video_framerate'] = t.frame_rate total_duration += timedelta(milliseconds=t.duration) items.append(media_dict) csv_list = [] if len(items) > 0: csv_list.append([key for key, _ in items[0].items()]) for item in items: csv_list.append([v for k, v in item.items()]) totals = ['TOTALS:', len(items), total_duration] csv_list.append(totals) with open(os.path.join(src_dir, output_filename), 'w', newline='') as f: writer = csv.writer(f) writer.writerows(csv_list)
def parse_metadata(self, path, id_): """ Parse the MP4 header metadata for bitrate information. Specifically, retrieve the maximum encoded bitrate for each quality level. """ self.player.event('start', 'parsing metadata ' + str(path)) found = False try: media_info = MediaInfo.parse(path) except OSError: self._set_maximum_encoded_bitrate(0, id_) self.player.event('error', 'MediaInfo not installed') return for track in media_info.tracks: if track.track_type == 'Video': maximum_bitrate = track.maximum_bit_rate if maximum_bitrate: self._set_maximum_encoded_bitrate(maximum_bitrate, id_) found = True else: self.player.event( 'error', 'maximum bitrate not found in metadata') self._set_maximum_encoded_bitrate(0, id_) return if not found: self.player.event('error', 'no video track in metadata') self._set_maximum_encoded_bitrate(0, id_) self.player.event('stop', 'parsing metadata ' + str(path))
def get_duration(self): if MediaInfo is not None: for track in MediaInfo.parse(self.path).tracks: if track.track_type == 'Video': log.debug("Found video track with duration %d" % track.duration) self.duration = track.duration return self.duration
def find_file_extension(root, file_name): ext = "" codec = "" container = "" abs_file_path = os.path.join(root, file_name) print "Analysing file ...", abs_file_path media_info = MediaInfo.parse(abs_file_path) for track in media_info.tracks: if track.track_type == 'General': container = track.codec_id if track.track_type == 'Audio': codec = track.codec if container is not None: container = container.strip() if codec is not None: codec = codec.strip() if container is None: if codec in ["MPA2L3", "MPA1L3"]: ext = ".mp3" elif container == 'M4A': ext = ".m4a" print "container: {}, codec: {}, ext: {}".format(container, codec, ext) return ext
def run(self): uri = urlparse(self.subject[dc.identifier]) # Resolve absolute paths for command-line system calls if uri.scheme == "" or uri.scheme == "file": # Remove leading / from /C:\folder\ URIs # Don't use platform.system() here, because we don't want to include Cygwin if os.name == "nt" and len(uri.path) >= 3 and uri.path[0] == "/" and uri.path[2] == ":": filename = os.path.abspath(uri.path[1:]) else: filename = os.path.abspath(uri.path) else: filename = self.subject[dc.identifier] try: media_info = MediaInfo.parse(filename) tracks = media_info.tracks except: tracks = [] video_streams = list() audio_streams = list() for track in tracks: if track.track_type == 'General' and track.duration: self.subject.emit("duration", track.duration / 1000.0) elif track.track_type == 'Video': v = dict() if track.frame_rate: v["framerate"] = float(track.frame_rate) if track.codec: v["codec"] = track.codec if track.height: v["height"] = int(track.height) if track.width: v["width"] = int(track.width) video_streams.append(v) elif track.track_type == "Audio": a = dict() if track.sampling_rate: a["samplerate"] = int(track.sampling_rate) if track.codec: a["codec"] = track.codec if track.channel_s: a["channels"] = int(track.channel_s) audio_streams.append(a) for v in video_streams: self.subject.emit("video_stream", v) for a in audio_streams: self.subject.emit("audio_stream", a) if len(video_streams) > 0: self.subject.extendClass("item.video") elif len(audio_streams) > 0: self.subject.extendClass("item.audio")
def handle_media(content): with tempfile.NamedTemporaryFile() as f: f.write(content) media = MediaInfo.parse(f.name) duration = timedelta(seconds=media.tracks[0].duration // 1000) num_tracks = len(media.tracks) - 1 first_video_track = next((track for track in media.tracks if track.track_type == 'Video'), None) first_audio_track = next((track for track in media.tracks if track.track_type == 'Audio'), None) info = "\x02Media Info:\x02 {n} track{s}, {duration}, {size}".format( size=humanize.naturalsize(media.tracks[0].file_size), n=num_tracks, s='s' if num_tracks != 1 else '', duration=duration ) if first_video_track: info += "; {w} x {h} {codec}, {bitrate}bps, {framerate}fps".format( codec=first_video_track.format, bitrate=humanize.naturalsize(first_video_track.bit_rate, gnu=True).lower(), framerate=first_video_track.frame_rate, w=first_video_track.width, h=first_video_track.height ) if first_audio_track: info += "; {ch}ch {codec}, {sr}kHz".format( codec=first_audio_track.format, ch=first_audio_track.channel_s, sr=first_audio_track.sampling_rate // 100 / 10 ) return info
def parse_media(self): self.mtime_end = os.path.getmtime(self.file_path) if myutil.match_type(self.file_path, ["jpg"]): self.media_type = "Image" elif myutil.match_type(self.file_path, ["mp4", "mts", "lrv"]): self.media_type = "Video" elif myutil.match_type(self.file_path, ["wav"]): self.media_type = "Audio" media_info = MediaInfo.parse(self.file_path) for track in media_info.tracks: if StreamFile.Debug: pprint(track.to_data()) if track.track_type == "Video": # some jpg has a video track self.video_width = track.width self.video_height = track.height if track.duration is None: self.duration = -1 else: self.duration = track.duration break elif track.track_type == "Audio": self.duration = track.duration break elif track.track_type == "Image": self.video_width = track.width self.video_height = track.height self.duration = -1 break self.calc_mtime_begin()
def __init__(self, fpath): try: self.added = now() self.finished = 0 self.fullpath = fpath self.fileid = getNewFileID() tempMediainfo = MediaInfo.parse(self.fullpath) self.mediainfo = {} for track in tempMediainfo.tracks: if track.track_type not in self.mediainfo: self.mediainfo[track.track_type] = track.to_data() else: if track.track_type in ['Audio', 'Subtitle']: if not isinstance(self.mediainfo[track.track_type], list): tempTrack = self.mediainfo[track.track_type] self.mediainfo[track.track_type] = [] self.mediainfo[track.track_type].append(tempTrack) self.mediainfo[track.track_type].append(track.to_data()) self.outputfilename = pacvert.CONFIG.OUTPUT_DIRECTORY+'/'+generateOutputFilename(self.fullpath) self.createThumbs() self.crop = self.analyzeThumbs() self.deleteThumbs() self.updateStatus(2) except Exception as e: logger.error(e)
def set_techmd_values(self): techmd = MediaInfo.parse(self.filepath) md_track = None for track in techmd.tracks: if track.track_type == "General": md_track = track if not md_track: self.raise_AMIFileError('Could not find General track') self.base_filename = md_track.file_name self.extension = md_track.file_extension self.format = md_track.format self.size = md_track.file_size if md_track.encoded_date: self.date_created = parse_date(md_track.encoded_date) elif md_track.recorded_date: self.date_created = parse_date(md_track.recorded_date) elif md_track.file_last_modification_date: self.date_created = parse_date(md_track.file_last_modification_date) self.duration_milli = md_track.duration self.duration_human = parse_duration(self.duration_milli) self.audio_codec = md_track.audio_codecs if md_track.codecs_video: self.video_codec = md_track.codecs_video
def get_video_size(input_file): media_info = MediaInfo.parse(input_file) for track in media_info.tracks: if track.track_type == 'Video': #print(str(track.width)+"x"+str(track.height)) return str(track.width)+":"+str(track.height) raise AssertionError("failed to read video info from " + input_file)
def un(pathname): # files from the UN. # date is in the file name # time is in other: time_code_of_first_frame # 1672828_DMOICT Open Camps CR7 8AM-12PM 16 JULY 16.mov year = "2016" month = 'JULY' day = pathname.split(month)[0].split()[-1] media_info = MediaInfo.parse(pathname) t3=media_info.tracks[3] time = t3.time_code_of_first_frame dt = "{year}-{month}-{day} {time}".format( year=year, month=month, day=day, time=time) # start = datetime.datetime.strptime("16 JULY 2016 07:50:00;00", "%d %B %Y %H:%M:%S;00") start = datetime.datetime.strptime(dt, "%Y-%B-%d %H:%M:%S;00") print( start ) return start # d+_DMOICT...move stuff so it errors if it finds something else start_date_re = r".*/" + date_re + ".*/\d+_DMOICT.*\.mov" start_date_o = re.match(start_date_re, pathname) dt_parts = start_date_o.groupdict() print("date_parts:", dt_parts) cmd = ['mediainfo', '--Inform=Video;%TimeCode_FirstFrame%', pathname ] p = subprocess.Popen( cmd, stdout = subprocess.PIPE ) stdout = p.stdout.read() # '07:50:00:00\n' # time_code = stdout.strip().split(':') start_time_re = time_re + rb":\d\d\n" start_time_o = re.match(start_time_re, stdout) start_time_d = start_time_o.groupdict() print("start_time_d:",start_time_d) dt_parts.update(start_time_d) pprint.pprint(dt_parts) dt_parts = {k:int(v) for k,v in list(dt_parts.items())} print(dt_parts) start=datetime.datetime( **dt_parts ) print(start) return start
def get_cbr(video_file, t_type): media_info = MediaInfo.parse(video_file) cbr = 0 for track in media_info.tracks: if track.track_type == t_type: cbr=track.bit_rate return str(cbr)
def _get_rotation(self, input_file): video_track_infos = [t for t in MediaInfo.parse(input_file).tracks if t.track_type == 'Video'] if not video_track_infos: return 0 rotation = video_track_infos[0].rotation if not rotation: return 0 return int(float(rotation))
def process(fname): media_info = MediaInfo.parse(fname) for track in media_info.tracks: print_frame(track.track_type) pprint(track.to_data()) # print() for track in media_info.tracks: if track.track_type == 'General': print("Duration: {} sec.".format(track.duration / 1000.0))
def _get_metadata(self): source = Metadata_Source(self.config) metadata = source.get_metadata(self.tvdb_id, self.filename) print ' - Setting options' tags = dict() # Metadata that's always present tags['TVSeasonNum'] = metadata['seasonnumber'] tags['TVEpisodeNum'] = metadata['episodenumber'] tags['TVShowName'] = tags['artist'] = metadata['seriesname'] tags['title'] = metadata['episodename'] tags['album'] = metadata['seriesname'] + ', Season ' + metadata['seasonnumber'] tags['disk'] = '1/1' tags['stik'] = 'TV Show' # Metadata that may not be present if 'poster' in metadata: tags['artwork'] = self._get_artwork(metadata['poster']) else: tags['artwork'] = self._get_artwork(None) if 'network' in metadata: tags['TVNetwork'] = metadata['network'] if 'airdate' in metadata: tags['year'] = metadata['airdate'] self.airdate = metadata['airdate'] if 'certificate' in metadata: tags['contentRating'] = metadata['certificate'] if 'genre' in metadata: tags['genre'] = metadata['genre'] if 'episodecount' in metadata: tags['tracknum'] = metadata['episodenumber'] + '/' + metadata['episodecount'] if 'id' in metadata: tags['cnID'] = metadata['id'] if 'description' in metadata: tags['description'] = metadata['description'] if 'description' in metadata: tags['longdesc'] = metadata['description'] # Check file for HDness print ' - Scanning video to check HDness' mi = MediaInfo.parse(self.filename) video_hd = False for track in mi.tracks: if track.track_type == 'Video': video_hd = True if (track.height >= 700 or track.width >= 1260) else False tags['hdvideo'] = '1' if video_hd else '0' xml = None if self.config.get('tagMP4', 'add_people') == 'True' and 'actors' in metadata: xml = self._gen_XML(metadata['actors']) return tags, xml
def get_movie_date(path): dt = None media_info = MediaInfo.parse(path) for track in media_info.tracks: dt = track.encoded_date if dt: return parse_movie_date(dt) # created_time = datetime.fromtimestamp(os.path.getctime(path)) # dt = arrow.get(created_time, 'Asia/Jakarta') return dt
def open( self, fd ): self.__test_music() if self.file : a = Media.parse( fd ) for track in a.tracks: if track.track_type == 'General' and self.__audio == True and self.music: #print track.bit_rate, track.bit_rate_mode, track.codec, track.format #print track.complete_name, print str(track.performer) + ' ' + str(track.album) + ' ' + str(track.recorded_date) if track.track_type == 'Audio' and self.__audio == True: print track.format , ' ' , track.bit_rate, ' ' , track.sampling_rate , ' ', track.channel_s
def set_from_media_info(self, directory_path): """ Set attributes by extracting them from media info input: directory_path <str> parent directory of the file """ file_path = os.path.join(directory_path, self.file_name) media = MediaInfo.parse(file_path) media_general_track = media.tracks[0] duration = getattr(media_general_track, "duration", 0) or 0 self.song.duration = timedelta(milliseconds=int(duration))
def parse(self): self.tracklist = [] mediainfo = MediaInfo.parse(self.path) # UID if self.ext == '.mkv' and mediainfo.tracks[0].other_unique_id: uid = mediainfo.tracks[0].other_unique_id[0] uid = re.findall('0x[^)]*', uid)[0].replace('0x', '') # Mediainfo strips leading zeroes self.uid = uid.rjust(32, '0') # Track: [track_id, track_type] for t in mediainfo.tracks: if t.track_type == 'Video': tr = VideoTrack() self.dimensions[0] = t.width self.dimensions[2] = t.width self.dimensions[1] = t.height self.dimensions[3] = t.height if t.frame_rate_mode == 'CFR': if t.frame_rate == '23.976': self.fps[0] = 24000 self.fps[2] = 24000 self.fps[1] = 1001 self.fps[3] = 1001 elif t.frame_rate == '29.970': self.fps[0] = 30000 self.fps[2] = 30000 self.fps[1] = 1001 self.fps[3] = 1001 elif t.track_type == 'Audio': tr = AudioTrack() tr.channel = t.channel_s tr.rate = t.sampling_rate tr.depth = t.bit_depth elif t.track_type == 'Text': tr = TextTrack() elif t.track_type == 'Menu': # tr = MenuTrack() pass if t.track_type not in ['General', 'Menu']: tr.file = self tr.id = t.track_id - 1 tr.default = True if t.default == 'Yes' else False tr.type = t.track_type tr.format = t.format tr.title = t.title if t.title else '' # We want the 3 letter code tr.lang = t.other_language[3] if t.other_language else '' self.tracklist.append(tr)
def get_episode_number(file): """Return the episode number from the SYNOPSIS field in the file metadata""" media_info = MediaInfo.parse(file) for track in media_info.tracks: if track.track_type == 'General': synopsis = track.synopsis """We assume that there won't be more than 99 episodes in a season here, so just trim the last two characters and what remains must be our season number. There has to be a smarter way.""" episode_num = synopsis[-2:] return int(episode_num)
def main(): parser = argparse.ArgumentParser() parser.add_argument("infile", help="Original input video file.") parser.add_argument("edlfile", help="EDL file with edit definitions.") parser.add_argument("outfile", help="Edited video file path/name.") parser.add_argument("-t", "--threads", type=int, help="Number of CPU threads to use.") parser.add_argument("-p", "--preset", choices=["ultrafast", "superfast", "fast", "medium", "slow", "superslow"], help="FFMPEG preset to use for optimizing the compression. Defaults to 'medium'.") parser.add_argument("-vb", "--videobitrate", help="Video bitrate setting. Auto-detected from original video unless specified.") parser.add_argument("-ab", "--audiobitrate", help="Audio bitrate setting. Auto-detected from original video unless specified.") parser.add_argument("-vc", "--vcodec", help="Video codec to use.") parser.add_argument("-ac", "--acodec", help="Audio codec to use.") parser.add_argument("-fp", "--ffmpegparams", help="Additional FFMpeg parameters to use. Example: '-crf=24 -s=640x480'.") args = parser.parse_args() estruct = edl.EDL(args.edlfile) videoBitrate = "" audioBitrate = "" if args.threads == None: threadNum = 2 else: threadNum = args.threads if args.preset == None: ffmpegPreset = "medium" else: ffmpegPreset = args.preset mi = MediaInfo.parse(args.infile) if args.videobitrate == None: videoBitrate = str(int(mi.tracks[1].bit_rate / 1000)) + "k" print("Using original video bitrate: "+videoBitrate) else: videoBitrate = args.videobitrate if videoBitrate[-1] != 'k': videoBitrate = videoBitrate+'k' if args.audiobitrate == None: try: audioBitrate = str(int(mi.tracks[2].bit_rate / 1000)) + "k" except TypeError: audioBitrate = str(int(int(mi.tracks[2].bit_rate.split(' / ')[1]) / 1000)) + "k" print("Using original audio bitrate: "+audioBitrate) else: audioBitrate = args.audiobitrate if audioBitrate[-1] != 'k': audioBitrate = audioBitrate+'k' render(args.infile, estruct, args.outfile, videoBitrate, audioBitrate, threadNum=threadNum, vcodec=args.vcodec, acodec=args.acodec, ffmpeg_params=args.ffmpegparams, ffmpegPreset=ffmpegPreset)
def get_video_size(*args, **kwargs): ''' use mediainfo to compute the video size ''' print args, kwargs context = args[0] media_info = MediaInfo.parse(context["original_file"]) for track in media_info.tracks: if track.track_type == 'Video': print "video is %d, %d" % (track.height, track.width) context["track_width"] = track.width context["track_height"] = track.height return context raise AssertionError("failed to read video info from " + context["original_file"])
def getMediaInfoForFile(file): media_info = MediaInfo.parse(file) vc = '' ac = '' for track in media_info.tracks: if track.track_type == 'Video': vc = track.codec if track.track_type == 'Audio': ac = track.codec c = {} c['audio'] = ac; c['video'] = vc; return c
def get_duration(f): mc = memcache.Client(['127.0.0.1:11211'], debug=0) key = escape_generic(f) d = mc.get(key) if d is not None: return d else: d = 0 info = MediaInfo.parse(f) for track in info.tracks: if getattr(track, 'duration') is not None: d = track.duration break mc.set(key, d) return d
def run(self): uri = urlparse(self.subject[dc.identifier]).path mime_type = self.subject[dc.format] if uri: media_info = MediaInfo.parse(uri) video_streams = list() audio_streams = list() for track in media_info.tracks: if track.track_type == 'General' and track.duration: self.subject.emit("duration", track.duration / 1000.0) elif track.track_type == 'Video': v = dict() if track.frame_rate: v["framerate"] = float(track.frame_rate) if track.codec: v["codec"] = track.codec if track.height: v["height"] = int(track.height) if track.width: v["width"] = int(track.width) video_streams.append(v) elif track.track_type == "Audio": a = dict() if track.sampling_rate: a["samplerate"] = int(track.sampling_rate) if track.codec: a["codec"] = track.codec if track.channel_s: a["channels"] = int(track.channel_s) audio_streams.append(a) for v in video_streams: self.subject.emit("video_stream", v) for a in audio_streams: self.subject.emit("audio_stream", a) if len(video_streams) > 0: self.subject.extendClass("item.video") elif len(audio_streams) > 0: self.subject.extendClass("item.audio")
def discover_forcedsubs(self, dbvideo): """ Attempts to find foreign subtitle track Input: dbvideo (Obj): Video database object Output: If successful, track number of forced subtitle Else, None """ MEDIADIR = os.path.join(dbvideo.path, dbvideo.filename) # wrapper class for mediainfo tool media_info = MediaInfo.parse(MEDIADIR.encode('unicode-escape')) subs = [] # Iterates though tracks and finds subtitles in preferred language, creates # list of dictionaries for track in media_info.tracks: data = track.to_data() if data['track_type'] == 'Text' and data['language']==self.lang: subs.append(data) if len(subs) is 0: self.log.info("No subtitle found, cannot determine foreign language track.") return None if len(subs) is 1: self.log.info("Only one {} subtitle found, cannot determine foreign language track." .format(self.lang)) return None # Sort list by size of track file subs.sort(key=lambda sub: sub['stream_size'], reverse = True) # Main language subtitle assumed to be largest main_sub = subs[0] main_subsize = main_sub['stream_size'] main_sublen = float(main_sub['duration']) # Checks other subs for size, duration, and if forced flag is set for sub in subs[1:]: if ( sub['stream_size'] <= main_subsize*self.secsub_ratio and main_sublen*.9 <= float(sub['duration']) <= main_sublen*1.1 and sub['forced']=='No' ): secondary_sub = sub else: self.log.info("No foreign language subtitle found, try adjusting ratio.") return None return secondary_sub['track_id']
def setUp(self): self.mi = MediaInfo.parse( os.path.join(data_dir, "vbr_requires_parsespeed_1.mp4"), parse_speed=1 )
def setUp(self): with open(os.path.join(data_dir, 'sample.xml'), 'r') as f: self.xml_data = f.read() self.mi = MediaInfo(self.xml_data)
def test_can_parse_true(self): self.assertTrue(MediaInfo.can_parse())
def setUp(self): self.mi = MediaInfo.parse(os.path.join(data_dir, "sample.mkv"))
def setUp(self): self.mi = MediaInfo.parse(os.path.join(data_dir, "accentué.txt"))
def lambda_handler(event, context): print("We got the following event:\n", event) operator_object = MediaInsightsOperationHelper(event) bucket = '' key = '' try: if "Video" in event["Input"]["Media"]: bucket = event["Input"]["Media"]["Video"]["S3Bucket"] key = event["Input"]["Media"]["Video"]["S3Key"] elif "Image" in event["Input"]["Media"]: bucket = event["Input"]["Media"]["Image"]["S3Bucket"] key = event["Input"]["Media"]["Image"]["S3Key"] workflow_id = str(operator_object.workflow_execution_id) except KeyError as e: operator_object.update_workflow_status("Error") operator_object.add_workflow_metadata( MediaconvertError="Missing a required metadata key {e}".format( e=e)) raise MasExecutionError(operator_object.return_output_object()) # Adding in exception block for now since we aren't guaranteed an asset id will be present, should remove later try: asset_id = operator_object.asset_id except KeyError as e: print("No asset id passed in with this workflow", e) asset_id = '' # Get metadata s3_cli = boto3.client("s3", region_name=region, config=Config(signature_version='s3v4', s3={'addressing_style': 'virtual'})) metadata_json = {} try: # The number of seconds that the Signed URL is valid: signed_url_expiration = 300 # Generate a signed URL for reading a file from S3 via HTTPS signed_url = s3_cli.generate_presigned_url( 'get_object', Params={ 'Bucket': bucket, 'Key': key }, ExpiresIn=signed_url_expiration) # Launch MediaInfo media_info = MediaInfo.parse(signed_url) # Save the result metadata_json = json.loads(media_info.to_json()) # If there's no Video, Audio, Image, or Text data then delete the file. track_types = [ track['track_type'] for track in metadata_json['tracks'] ] if ('Video' not in track_types and 'Audio' not in track_types and 'Image' not in track_types and 'Text' not in track_types): print( "ERROR: File does not contain valid video, audio, image, or text content" ) print("Deleting file s3://" + bucket + "/" + key) s3_cli.delete_object(Bucket=bucket, Key=key) operator_object.update_workflow_status("Error") operator_object.add_workflow_metadata( MediainfoError= "File does not contain valid video, audio, image, or text content" ) raise MasExecutionError(operator_object.return_output_object()) except RuntimeError as e: # If MediaInfo could not run then we assume it is not a valid # media file and delete it print("Exception:\n", e) print( "ERROR: File does not contain valid video, audio, image, or text content" ) print("Deleting file s3://" + bucket + "/" + key) s3_cli.delete_object(Bucket=bucket, Key=key) operator_object.update_workflow_status("Error") operator_object.add_workflow_metadata( MediainfoError= "File does not contain valid video, audio, image, or text content") raise MasExecutionError(operator_object.return_output_object()) except Exception as e: print("Exception:\n", e) operator_object.update_workflow_status("Error") operator_object.add_workflow_metadata( MediainfoError="Unable to get Mediainfo results. " + str(e)) raise MasExecutionError(operator_object.return_output_object()) # Verify that the metadata is a dict, as required by the dataplane if type(metadata_json) != dict: operator_object.update_workflow_status("Error") operator_object.add_workflow_metadata( MediainfoError="Metadata must be of type dict. Found " + str(type(metadata_json)) + " instead.") raise MasExecutionError(operator_object.return_output_object()) # Pass metadata to downstream operators # Number of audio tracks is used by the Transcribe operator num_audio_tracks = len( list( filter(lambda i: i['track_type'] == 'Audio', metadata_json['tracks']))) operator_object.add_workflow_metadata( Mediainfo_num_audio_tracks=str(num_audio_tracks)) # Save metadata to dataplane operator_object.add_workflow_metadata(AssetId=asset_id, WorkflowExecutionId=workflow_id) dataplane = DataPlane() metadata_upload = dataplane.store_asset_metadata(asset_id, operator_object.name, workflow_id, metadata_json) # Validate that the metadata was saved to the dataplane if "Status" not in metadata_upload: operator_object.add_workflow_metadata( MediainfoError="Unable to upload metadata for asset: {asset}". format(asset=asset_id)) operator_object.update_workflow_status("Error") raise MasExecutionError(operator_object.return_output_object()) else: # Update the workflow status if metadata_upload["Status"] == "Success": print( "Uploaded metadata for asset: {asset}".format(asset=asset_id)) operator_object.update_workflow_status("Complete") return operator_object.return_output_object() else: operator_object.add_workflow_metadata( MediainfoError="Unable to upload metadata for asset: {asset}". format(asset=asset_id)) operator_object.update_workflow_status("Error") raise MasExecutionError(operator_object.return_output_object())
def test_parse_pathlib_path(self): path = self.pathlib.Path(data_dir) / "sample.mp4" mi = MediaInfo.parse(path) self.assertEqual(len(mi.tracks), 3)
async def video_catfile(event): reply = await event.get_reply_message() input_str = "".join(event.text.split(maxsplit=1)[1:]) if input_str: path = Path(input_str) if not os.path.exists(path): await edit_or_reply( event, f"`there is no such directory/file with the name {path} to upload`", ) return catevent = await edit_or_reply(event, "`Converting to video note..........`") filename = os.path.basename(path) catfile = os.path.join("./temp", filename) copyfile(path, catfile) else: if not reply: await edit_delete(event, "`Reply to supported media`", 5) return if not (reply and (reply.media)): await edit_delete(event, "`Reply to supported Media...`", 5) return catevent = await edit_or_reply(event, "`Converting to video note..........`") catfile = await reply.download_media(file="./temp/") if not catfile.endswith((".mp4", ".tgs", ".mp3", ".mov", ".gif", ".opus")): os.remove(catfile) await edit_delete(catevent, "```Supported Media not found...```", 5) return if catfile.endswith((".mp4", ".tgs", ".mov", ".gif")): if catfile.endswith((".tgs")): hmm = await make_gif(catevent, catfile) if hmm.endswith(("@tgstogifbot")): os.remove(catfile) return await catevent.edit(hmm) os.rename(hmm, "./temp/circle.mp4") catfile = "./temp/circle.mp4" media_info = MediaInfo.parse(catfile) aspect_ratio = 1 for track in media_info.tracks: if track.track_type == "Video": aspect_ratio = track.display_aspect_ratio height = track.height width = track.width if aspect_ratio != 1: crop_by = width if (height > width) else height await runcmd( f'ffmpeg -i {catfile} -vf "crop={crop_by}:{crop_by}" {PATH}') else: copyfile(catfile, PATH) if str(catfile) != str(PATH): os.remove(catfile) else: thumb_loc = os.path.join(Config.TMP_DOWNLOAD_DIRECTORY, "thumb_image.jpg") catthumb = None try: catthumb = await reply.download_media(thumb=-1) except: catthumb = os.path.join("./temp", "thumb.jpg") await thumb_from_audio(catfile, catthumb) if catthumb is None: catthumb = os.path.join("./temp", "thumb.jpg") copyfile(thumb_loc, catthumb) if (catthumb is not None and not os.path.exists(catthumb) and os.path.exists(thumb_loc)): catthumb = os.path.join("./temp", "thumb.jpg") copyfile(thumb_loc, catthumb) if catthumb is not None and os.path.exists(catthumb): await runcmd( f"ffmpeg -loop 1 -i {catthumb} -i {catfile} -c:v libx264 -tune stillimage -c:a aac -b:a 192k -vf \"scale='iw-mod (iw,2)':'ih-mod(ih,2)',format=yuv420p\" -shortest -movflags +faststart {PATH}" ) os.remove(catfile) else: os.remove(catfile) return await edit_delete(catevent, "`No thumb found to make it video note`", 5) if os.path.exists(PATH): catid = event.reply_to_msg_id c_time = time.time() await event.client.send_file( event.chat_id, PATH, allow_cache=False, reply_to=catid, video_note=True, attributes=[ DocumentAttributeVideo( duration=60, w=1, h=1, round_message=True, supports_streaming=True, ) ], progress_callback=lambda d, t: asyncio.get_event_loop( ).create_task( progress(d, t, catevent, c_time, "Uploading...", PATH)), ) os.remove(PATH) await catevent.delete()
def __init__(self, filename): self.mi = MediaInfo.parse(filename)
def test_track_parsing(self): mi = MediaInfo.parse(os.path.join(data_dir, "issue55.flv")) self.assertEqual(len(mi.tracks), 2)
def setUp(self): self.mi = MediaInfo.parse( os.path.join(data_dir, "sample_with_cover.mp3"), cover_data=True )
def main(): # Получаем исходную и целевую дирректории. dirslist = getDirs(sys.argv, DEFAULT_OUTPUT) # Показываем куда и откуда будет импорт. print('Источник импорта: ', dirslist[0]) print('Дирректория назначения: ', dirslist[1]) # Просим подтверждение перед началом работы. #if not confirm(): # sys.exit("Задание отменено") # Задаем счетчики "плохих" и "хороших" операций. fTotal = okOp = badOp = 0 badFiles = [] # Собственно, импорт. for root, dirs, files in os.walk(dirslist[0]): i = 0 for name in files: i += 1 fTotal += 1 filePath = os.path.join(root, name) # Отображаем текущий процесс и прогресс выполнения. fileExtension = os.path.splitext(name)[1] print('Обработка файла {} из {} (дир. -= {} =-)' .format(i, len(files), os.path.basename(root).upper())) if fileExtension.upper() == '.MP4' and MediaInfo.can_parse(): media_info = MediaInfo.parse(filePath) # wrongDate = datetime.strptime(media_info.tracks[0].encoded_date, "UTC %Y-%m-%d %H:%M:%S").year == 1970 # encodedDate = media_info.tracks[0].file_creation_date__local.replace('-', ':') if wrongDate else media_info.tracks[0].encoded_date.replace('-', ':') encodedDate = media_info.tracks[0].encoded_date.replace('-', ':') exifDateTime = encodedDate.replace('UTC ', '') elif fileExtension == '.JPG': with open(filePath, 'rb') as f: tags = exifread.process_file(f, details=False) exifDateTime = tags.get('EXIF DateTimeOriginal') # При отсутствии у фото EXIF-данных - файл пропускаем, # занося его в список "плохих" файлов. if not exifDateTime: print('Отсутствуют EXIF данные. Файл пропущен!!!', name) badOp += 1 badFiles.append(filePath) continue else: print('Неизвестный файл', name) badOp += 1 badFiles.append(filePath) continue exifDate, exifTime = str(exifDateTime).split(' ') year, month, day = exifDate.split(':') hour, minute, sec = exifTime.split(':') # Задаем название импортированного файла. newFileName = '{}-{}-{}'.format(year + month + day, hour, minute) path = dirslist[1] if not os.path.exists(path): print('Создаем новую папку') os.makedirs(path, exist_ok=True) newPath = os.path.join(path, newFileName) # Найдем подходящие имя для файла tmpPath = newPath fileindex = 1 while os.path.exists(tmpPath + fileExtension): tmpPath = newPath + '_{}'.format(fileindex) fileindex += 1 newPath = tmpPath + fileExtension try: shutil.copy2(filePath, newPath) print('OK') okOp += 1 except (IOError, Exception) as e: print('BAD ' + e) badOp += 1 # По завершении импорта выводим статистику сделанного. print('Обработано файлов всего:', fTotal) print('Успешных операций:', okOp) print('Завершенных с ошибками:', badOp) # И имена "плохих" файлов, если таковые есть. if len(badFiles): print('Необработанные файлы: ') for bf in badFiles: print(bf)
async def _send_file( self, item_uuid: UUID, room_id: str, path: Union[Path, str], ) -> None: """Upload and monitor a file + thumbnail and send the built event.""" # TODO: this function is way too complex, and most of it should be # refactored into nio. transaction_id = uuid4() path = Path(path) encrypt = room_id in self.encrypted_rooms try: size = path.resolve().stat().st_size except (PermissionError, FileNotFoundError): # This error will be caught again by the try block later below size = 0 monitor = nio.TransferMonitor(size) upload_item = Upload(item_uuid, path, total_size=size) self.models[room_id, "uploads"][str(item_uuid)] = upload_item self.upload_monitors[item_uuid] = monitor self.upload_tasks[item_uuid] = asyncio.current_task() # type: ignore def on_transferred(transferred: int) -> None: upload_item.uploaded = transferred def on_speed_changed(speed: float) -> None: upload_item.speed = speed upload_item.time_left = monitor.remaining_time or timedelta(0) monitor.on_transferred = on_transferred monitor.on_speed_changed = on_speed_changed try: url, mime, crypt_dict = await self.upload( lambda *_: path, filename=path.name, filesize=size, encrypt=encrypt, monitor=monitor, ) # FIXME: nio might not catch the cancel in time if monitor.cancel: raise nio.TransferCancelledError() except (MatrixError, OSError) as err: upload_item.status = UploadStatus.Error upload_item.error = type(err) upload_item.error_args = err.args # Wait for cancellation from UI, see parent send_file() method while True: await asyncio.sleep(0.1) upload_item.status = UploadStatus.Caching await Media.from_existing_file(self.backend.media_cache, url, path) kind = (mime or "").split("/")[0] thumb_url: str = "" thumb_info: Optional[MatrixImageInfo] = None content: dict = { f"{__app_name__}.transaction_id": str(transaction_id), "body": path.name, "info": { "mimetype": mime, "size": upload_item.total_size, }, } if encrypt: content["file"] = {"url": url, **crypt_dict} else: content["url"] = url if kind == "image": is_svg = mime == "image/svg+xml" event_type = \ nio.RoomEncryptedImage if encrypt else nio.RoomMessageImage content["msgtype"] = "m.image" content["info"]["w"], content["info"]["h"] = ( await utils.svg_dimensions(path) if is_svg else PILImage.open(path).size) try: thumb_data, thumb_info = await self.generate_thumbnail( path, is_svg=is_svg, ) except UneededThumbnail: pass except Exception: trace = traceback.format_exc().rstrip() log.warning("Failed thumbnailing %s:\n%s", path, trace) else: thumb_ext = "png" if thumb_info.mime == "image/png" else "jpg" thumb_name = f"{path.stem}_thumbnail.{thumb_ext}" upload_item.status = UploadStatus.Uploading upload_item.filepath = Path(thumb_name) upload_item.total_size = len(thumb_data) try: upload_item.total_size = thumb_info.size monitor = nio.TransferMonitor(thumb_info.size) monitor.on_transferred = on_transferred monitor.on_speed_changed = on_speed_changed self.upload_monitors[item_uuid] = monitor thumb_url, _, thumb_crypt_dict = await self.upload( lambda *_: thumb_data, filename=f"{path.stem}_sample{path.suffix}", filesize=thumb_info.size, encrypt=encrypt, monitor=monitor, ) # FIXME: nio might not catch the cancel in time if monitor.cancel: raise nio.TransferCancelledError() except MatrixError as err: log.warning(f"Failed uploading thumbnail {path}: {err}") else: upload_item.status = UploadStatus.Caching await Thumbnail.from_bytes( self.backend.media_cache, thumb_url, path.name, thumb_data, wanted_size=(content["info"]["w"], content["info"]["h"]), ) if encrypt: content["info"]["thumbnail_file"] = { "url": thumb_url, **thumb_crypt_dict, } else: content["info"]["thumbnail_url"] = thumb_url content["info"]["thumbnail_info"] = thumb_info.as_dict() elif kind == "audio": event_type = \ nio.RoomEncryptedAudio if encrypt else nio.RoomMessageAudio content["msgtype"] = "m.audio" content["info"]["duration"] = getattr( MediaInfo.parse(path).tracks[0], "duration", 0, ) or 0 elif kind == "video": event_type = \ nio.RoomEncryptedVideo if encrypt else nio.RoomMessageVideo content["msgtype"] = "m.video" tracks = MediaInfo.parse(path).tracks content["info"]["duration"] = \ getattr(tracks[0], "duration", 0) or 0 content["info"]["w"] = max( getattr(t, "width", 0) or 0 for t in tracks) content["info"]["h"] = max( getattr(t, "height", 0) or 0 for t in tracks) else: event_type = \ nio.RoomEncryptedFile if encrypt else nio.RoomMessageFile content["msgtype"] = "m.file" content["filename"] = path.name del self.upload_monitors[item_uuid] del self.upload_tasks[item_uuid] del self.models[room_id, "uploads"][str(upload_item.id)] await self._local_echo( room_id, transaction_id, event_type, inline_content=path.name, media_url=url, media_title=path.name, media_width=content["info"].get("w", 0), media_height=content["info"].get("h", 0), media_duration=content["info"].get("duration", 0), media_size=content["info"]["size"], media_mime=content["info"]["mimetype"], thumbnail_url=thumb_url, thumbnail_width=content["info"].get("thumbnail_info", {}).get("w", 0), thumbnail_height=content["info"].get("thumbnail_info", {}).get("h", 0), thumbnail_mime=content["info"].get("thumbnail_info", {}).get("mimetype", ""), ) await self._send_message(room_id, content)
def setUp(self): self.mi = MediaInfo.parse("https://github.com/sbraz/pymediainfo/blob/master/tests/data/sample.mkv?raw=true")