def runjob(jobid=None, chanid=None, starttime=None): db = MythDB() if jobid: job = Job(jobid, db=db) chanid = job.chanid starttime = job.starttime rec = Recorded((chanid, starttime), db=db) sg = findfile('/' + rec.basename, rec.storagegroup, db=db) if sg is None: print 'Local access to recording not found.' sys.exit(1) infile = os.path.join(sg.dirname, rec.basename) tmpfile = '%s.tmp' % infile.rsplit('.', 1)[0] outfile = '%s.mp4' % infile.rsplit('.', 1)[0] # reformat 'starttime' for use with mythtranscode/ffmpeg/mythcommflag starttime = str(starttime.utcisoformat().replace(u':', '').replace( u' ', '').replace(u'T', '').replace('-', '')) # Lossless transcode to strip cutlist if rec.cutlist == 1: if jobid: job.update({'status': 4, 'comment': 'Removing Cutlist'}) task = System(path='mythtranscode', db=db) try: output = task('--chanid "%s"' % chanid, '--starttime "%s"' % starttime, '--mpeg2', '--honorcutlist', '-o "%s"' % tmpfile, '2> /dev/null') except MythError, e: print 'Command failed with output:\n%s' % e.stderr if jobid: job.update({ 'status': 304, 'comment': 'Removing Cutlist failed' }) sys.exit(e.retcode)
class VIDEO: def __init__(self, opts, jobid=None): if jobid: self.job = Job(jobid) self.chanid = self.job.chanid self.starttime = self.job.starttime self.job.update(status=Job.STARTING) else: self.job = None self.chanid = opts.chanid self.starttime = opts.starttime self.opts = opts self.db = MythDB() self.log = MythLog(module='mythvidexport.py', db=self.db) # load setting strings self.get_format() # prep objects self.rec = Recorded((self.chanid,self.starttime), db=self.db) self.log(MythLog.GENERAL, MythLog.INFO, 'Using recording', '%s - %s' % (self.rec.title, self.rec.subtitle)) self.vid = Video(db=self.db).create({'title':'', 'filename':'', 'host':gethostname()}) # process data self.get_meta() self.get_dest() # bug fix to work around limitation in the bindings where DBDataRef classes # are mapped to the filename at time of Video element creation. since the # filename is specified as blank when the video is created, the markup # handler is not properly initialized self.vid.markup._refdat = (self.vid.filename,) # save file self.copy() if opts.seekdata: self.copy_seek() if opts.skiplist: self.copy_markup(static.MARKUP.MARK_COMM_START, static.MARKUP.MARK_COMM_END) if opts.cutlist: self.copy_markup(static.MARKUP.MARK_CUT_START, static.MARKUP.MARK_CUT_END) self.vid.update() # delete old file if opts.delete: self.rec.delete() def get_format(self): host = self.db.gethostname() # TV Format if self.opts.tformat: self.tfmt = self.opts.tformat elif self.db.settings[host]['mythvideo.TVexportfmt']: self.tfmt = self.db.settings[host]['mythvideo.TVexportfmt'] else: self.tfmt = 'Television/%TITLE%/Season %SEASON%/'+\ '%TITLE% - S%SEASON%E%EPISODEPAD% - %SUBTITLE%' # Movie Format if self.opts.mformat: self.mfmt = self.opts.mformat elif self.db.settings[host]['mythvideo.MOVIEexportfmt']: self.mfmt = self.db.settings[host]['mythvideo.MOVIEexportfmt'] else: self.mfmt = 'Movies/%TITLE%' # Generic Format if self.opts.gformat: self.gfmt = self.opts.gformat elif self.db.settings[host]['mythvideo.GENERICexportfmt']: self.gfmt = self.db.settings[host]['mythvideo.GENERICexportfmt'] else: self.gfmt = 'Videos/%TITLE%' def get_meta(self): self.vid.hostname = self.db.gethostname() if self.rec.inetref: # good data is available, use it if self.rec.season is not None: self.log(self.log.GENERAL, self.log.INFO, 'Performing TV export with local data.') self.type = 'TV' else: self.log(self.log.GENERAL, self.log.INFO, 'Performing Movie export with local data.') self.type = 'MOVIE' metadata = self.rec.exportMetadata() elif self.opts.listingonly: # force use of local data if self.rec.subtitle: self.log(self.log.GENERAL, self.log.INFO, 'Forcing TV export with local data.') self.type = 'TV' else: self.log(self.log.GENERAL, self.log.INFO, 'Forcing Movie export with local data.') self.type = 'MOVIE' metadata = self.rec.exportMetadata() else: if self.rec.subtitle: # subtitle exists, assume tv show self.type = 'TV' self.log(self.log.GENERAL, self.log.INFO, 'Attempting TV export.') grab = VideoGrabber(self.type) match = grab.sortedSearch(self.rec.title, self.rec.subtitle) else: # assume movie self.type = 'MOVIE' self.log(self.log.GENERAL, self.log.INFO, 'Attempting Movie export.') grab = VideoGrabber(self.type) match = grab.sortedSearch(self.rec.title) if len(match) == 0: # no match found self.log(self.log.GENERAL, self.log.INFO, 'Falling back to generic export.') self.type = 'GENERIC' metadata = self.rec.exportMetadata() elif (len(match) > 1) & (match[0].levenshtein > 0): # multiple matches found, and closest is not exact self.vid.delete() raise MythError('Multiple metadata matches found: '\ +self.rec.title) else: self.log(self.log.GENERAL, self.log.INFO, 'Importing content from', match[0].inetref) metadata = grab.grabInetref(match[0]) self.vid.importMetadata(metadata) self.log(self.log.GENERAL, self.log.INFO, 'Import complete') def get_dest(self): if self.type == 'TV': self.vid.filename = self.process_fmt(self.tfmt) elif self.type == 'MOVIE': self.vid.filename = self.process_fmt(self.mfmt) elif self.type == 'GENERIC': self.vid.filename = self.process_fmt(self.gfmt) def process_fmt(self, fmt): # replace fields from viddata #print self.vid.data ext = '.'+self.rec.basename.rsplit('.',1)[1] rep = ( ('%TITLE%','title','%s'), ('%SUBTITLE%','subtitle','%s'), ('%SEASON%','season','%d'), ('%SEASONPAD%','season','%02d'), ('%EPISODE%','episode','%d'), ('%EPISODEPAD%','episode','%02d'), ('%YEAR%','year','%s'), ('%DIRECTOR%','director','%s')) for tag, data, format in rep: if self.vid[data]: fmt = fmt.replace(tag,format % self.vid[data]) else: fmt = fmt.replace(tag,'') # replace fields from program data rep = ( ('%HOSTNAME%', 'hostname', '%s'), ('%STORAGEGROUP%','storagegroup','%s')) for tag, data, format in rep: data = getattr(self.rec, data) fmt = fmt.replace(tag,format % data) # fmt = fmt.replace('%CARDID%',self.rec.cardid) # fmt = fmt.replace('%CARDNAME%',self.rec.cardid) # fmt = fmt.replace('%SOURCEID%',self.rec.cardid) # fmt = fmt.replace('%SOURCENAME%',self.rec.cardid) # fmt = fmt.replace('%CHANNUM%',self.rec.channum) # fmt = fmt.replace('%CHANNAME%',self.rec.cardid) if len(self.vid.genre): fmt = fmt.replace('%GENRE%',self.vid.genre[0].genre) else: fmt = fmt.replace('%GENRE%','') # if len(self.country): # fmt = fmt.replace('%COUNTRY%',self.country[0]) # else: # fmt = fmt.replace('%COUNTRY%','') return fmt+ext def copy(self): stime = time.time() srcsize = self.rec.filesize htime = [stime,stime,stime,stime] self.log(MythLog.GENERAL|MythLog.FILE, MythLog.INFO, "Copying myth://%s@%s/%s"\ % (self.rec.storagegroup, self.rec.hostname, self.rec.basename)\ +" to myth://Videos@%s/%s"\ % (self.vid.host, self.vid.filename)) srcfp = self.rec.open('r') dstfp = self.vid.open('w') if self.job: self.job.setStatus(Job.RUNNING) tsize = 2**24 while tsize == 2**24: tsize = min(tsize, srcsize - dstfp.tell()) dstfp.write(srcfp.read(tsize)) htime.append(time.time()) rate = float(tsize*4)/(time.time()-htime.pop(0)) remt = (srcsize-dstfp.tell())/rate if self.job: self.job.setComment("%02d%% complete - %d seconds remaining" %\ (dstfp.tell()*100/srcsize, remt)) srcfp.close() dstfp.close() self.vid.hash = self.vid.getHash() self.log(MythLog.GENERAL|MythLog.FILE, MythLog.INFO, "Transfer Complete", "%d seconds elapsed" % int(time.time()-stime)) if self.opts.reallysafe: if self.job: self.job.setComment("Checking file hashes") self.log(MythLog.GENERAL|MythLog.FILE, MythLog.INFO, "Checking file hashes.") srchash = hashfile(self.rec.open('r')) dsthash = hashfile(self.rec.open('r')) if srchash != dsthash: raise MythError('Source hash (%s) does not match destination hash (%s)' \ % (srchash, dsthash)) elif self.opts.safe: self.log(MythLog.GENERAL|MythLog.FILE, MythLog.INFO, "Checking file sizes.") be = MythBE(db=self.vid._db) try: srcsize = be.getSGFile(self.rec.hostname, self.rec.storagegroup, \ self.rec.basename)[1] dstsize = be.getSGFile(self.vid.host, 'Videos', self.vid.filename)[1] except: raise MythError('Could not query file size from backend') if srcsize != dstsize: raise MythError('Source size (%d) does not match destination size (%d)' \ % (srcsize, dstsize)) if self.job: self.job.setComment("Complete - %d seconds elapsed" % \ (int(time.time()-stime))) self.job.setStatus(Job.FINISHED) def copy_seek(self): for seek in self.rec.seek: self.vid.markup.add(seek.mark, seek.offset, seek.type) def copy_markup(self, start, stop): for mark in self.rec.markup: if mark.type in (start, stop): self.vid.markup.add(mark.mark, 0, mark.type)
class VIDEO: def __init__(self, opts, jobid=None): if jobid: self.job = Job(jobid) self.chanid = self.job.chanid self.starttime = self.job.starttime self.job.update(status=3) else: self.job = None self.chanid = opts.chanid self.starttime = opts.starttime self.opts = opts self.db = MythDB() self.log = MythLog(module='mythvidexport.py', db=self.db) # load setting strings self.get_format() # prep objects self.rec = Recorded((self.chanid, self.starttime), db=self.db) self.log(MythLog.IMPORTANT, 'Using recording', '%s - %s' % (self.rec.title, self.rec.subtitle)) self.vid = Video(db=self.db).create({'title':'', 'filename':'', 'host':gethostname()}) # process data self.get_meta() self.get_dest() # kludgy fix for issue with altered filenames in the bindings self.vid.markup._refdat = (self.vid.filename,) # save file self.copy() if opts.seekdata: self.copy_seek() if opts.skiplist: self.copy_markup(static.MARKUP.MARK_COMM_START, static.MARKUP.MARK_COMM_END) if opts.cutlist: self.copy_markup(static.MARKUP.MARK_CUT_START, static.MARKUP.MARK_CUT_END) self.vid.update() def get_format(self): host = self.db.gethostname() # TV Format if self.opts.tformat: self.tfmt = self.opts.tformat elif self.db.settings[host]['mythvideo.TVexportfmt']: self.tfmt = self.db.settings[host]['mythvideo.TVexportfmt'] else: self.tfmt = 'Television/%TITLE%/Season %SEASON%/' + \ '%TITLE% - S%SEASON%E%EPISODEPAD% - %SUBTITLE%' # Movie Format if self.opts.mformat: self.mfmt = self.opts.mformat elif self.db.settings[host]['mythvideo.MOVIEexportfmt']: self.mfmt = self.db.settings[host]['mythvideo.MOVIEexportfmt'] else: self.mfmt = 'Movies/%TITLE%' # Generic Format if self.opts.gformat: self.gfmt = self.opts.gformat elif self.db.settings[host]['mythvideo.GENERICexportfmt']: self.gfmt = self.db.settings[host]['mythvideo.GENERICexportfmt'] else: self.gfmt = 'Videos/%TITLE%' def get_meta(self): self.vid.hostname = self.db.gethostname() if self.rec.subtitle: # subtitle exists, assume tv show self.type = 'TV' self.log(self.log.IMPORTANT, 'Attempting TV export.') if self.opts.listingonly: self.log(self.log.IMPORTANT, 'Forcing listing data only.') self.get_generic(False) return grab = VideoGrabber(self.type) match = grab.sortedSearch(self.rec.title, self.rec.subtitle) else: # assume movie self.type = 'MOVIE' self.log(self.log.IMPORTANT, 'Attempting Movie export.') if self.opts.listingonly: self.log(self.log.IMPORTANT, 'Forcing listing data only.') self.get_generic(False) return grab = VideoGrabber(self.type) match = grab.sortedSearch(self.rec.title) if len(match) == 0: # no match found self.log(self.log.IMPORTANT, 'Falling back to generic export.') self.get_generic() elif (len(match) > 1) & (match[0].levenshtein > 0): # multiple matches found, and closest is not exact self.vid.delete() raise MythError('Multiple metadata matches found: '\ + self.rec.title) else: self.log(self.log.IMPORTANT, 'Importing content from', match[0].inetref) self.vid.importMetadata(grab.grabInetref(match[0])) def get_generic(self, name_as_generic=True): self.vid.title = self.rec.title if self.rec.subtitle: self.vid.subtitle = self.rec.subtitle if self.rec.description: self.vid.plot = self.rec.description if self.rec.originalairdate: self.vid.year = self.rec.originalairdate.year self.vid.releasedate = self.rec.originalairdate lsec = (self.rec.endtime - self.rec.starttime).seconds self.vid.length = str(lsec / 60) for member in self.rec.cast: if member.role == 'director': self.vid.director = member.name elif member.role == 'actor': self.vid.cast.append(member.name) if name_as_generic: self.type = 'GENERIC' def get_dest(self): if self.type == 'TV': self.vid.filename = self.process_fmt(self.tfmt) elif self.type == 'MOVIE': self.vid.filename = self.process_fmt(self.mfmt) elif self.type == 'GENERIC': self.vid.filename = self.process_fmt(self.gfmt) def process_fmt(self, fmt): # replace fields from viddata # print self.vid.data ext = '.' + self.rec.basename.rsplit('.', 1)[1] rep = (('%TITLE%', 'title', '%s'), ('%SUBTITLE%', 'subtitle', '%s'), ('%SEASON%', 'season', '%d'), ('%SEASONPAD%', 'season', '%02d'), ('%EPISODE%', 'episode', '%d'), ('%EPISODEPAD%', 'episode', '%02d'), ('%YEAR%', 'year', '%s'), ('%DIRECTOR%', 'director', '%s')) for tag, data, format in rep: if self.vid[data]: fmt = fmt.replace(tag, format % self.vid[data]) else: fmt = fmt.replace(tag, '') # replace fields from program data rep = (('%HOSTNAME', 'hostname', '%s'), ('%STORAGEGROUP%', 'storagegroup', '%s')) for tag, data, format in rep: data = getattr(self.rec, data) fmt = fmt.replace(tag, format % data) # fmt = fmt.replace('%CARDID%',self.rec.cardid) # fmt = fmt.replace('%CARDNAME%',self.rec.cardid) # fmt = fmt.replace('%SOURCEID%',self.rec.cardid) # fmt = fmt.replace('%SOURCENAME%',self.rec.cardid) # fmt = fmt.replace('%CHANNUM%',self.rec.channum) # fmt = fmt.replace('%CHANNAME%',self.rec.cardid) if len(self.vid.genre): fmt = fmt.replace('%GENRE%', self.vid.genre[0].genre) else: fmt = fmt.replace('%GENRE%', '') # if len(self.country): # fmt = fmt.replace('%COUNTRY%',self.country[0]) # else: # fmt = fmt.replace('%COUNTRY%','') return fmt + ext def copy(self): stime = time.time() srcsize = self.rec.filesize htime = [stime, stime, stime, stime] self.log.log(MythLog.IMPORTANT | MythLog.FILE, "Copying myth://%s@%s/%s"\ % (self.rec.storagegroup, self.rec.hostname, self.rec.basename)\ + " to myth://Videos@%s/%s"\ % (self.vid.host, self.vid.filename)) srcfp = self.rec.open('r') dstfp = self.vid.open('w') if self.job: self.job.setStatus(4) tsize = 2 ** 24 while tsize == 2 ** 24: tsize = min(tsize, srcsize - dstfp.tell()) dstfp.write(srcfp.read(tsize)) htime.append(time.time()) rate = float(tsize * 4) / (time.time() - htime.pop(0)) remt = (srcsize - dstfp.tell()) / rate if self.job: self.job.setComment("%02d%% complete - %d seconds remaining" % \ (dstfp.tell() * 100 / srcsize, remt)) srcfp.close() dstfp.close() self.vid.hash = self.vid.getHash() self.log(MythLog.IMPORTANT | MythLog.FILE, "Transfer Complete", "%d seconds elapsed" % int(time.time() - stime)) if self.job: self.job.setComment("Complete - %d seconds elapsed" % \ (int(time.time() - stime))) self.job.setStatus(256) def copy_seek(self): for seek in self.rec.seek: self.vid.markup.add(seek.mark, seek.offset, seek.type) def copy_markup(self, start, stop): for mark in self.rec.markup: if mark.type in (start, stop): self.vid.markup.add(mark.mark, 0, mark.type)
def runjob(jobid=None, chanid=None, starttime=None, tzoffset=None): global estimateBitrate db = MythDB() if jobid: job = Job(jobid, db=db) chanid = job.chanid utcstarttime = job.starttime else: job=None; utcstarttime = datetime.strptime(starttime, "%Y%m%d%H%M%S") utcstarttime = utcstarttime + timedelta(hours=tzoffset) if debug: print 'chanid "%s"' % chanid print 'utcstarttime "%s"' % utcstarttime rec = Recorded((chanid, utcstarttime), db=db); utcstarttime = rec.starttime; starttime_datetime = utcstarttime # reformat 'starttime' for use with mythtranscode/ffmpeg/mythcommflag starttime = str(utcstarttime.utcisoformat().replace(u':', '').replace(u' ', '').replace(u'T', '').replace('-', '')) if debug: print 'mythtv format starttime "%s"' % starttime input_filesize = rec.filesize if rec.commflagged: if debug: print 'Recording has been scanned to detect commerical breaks.' waititer=1 keepWaiting = True while keepWaiting == True: keepWaiting=False; for index,jobitem in reversed(list(enumerate(db.searchJobs(chanid=chanid, starttime=starttime_datetime)))): if jobitem.type == jobitem.COMMFLAG: # Commercial flagging job if debug: print 'Commercial flagging job detected with status %s' % jobitem.status if jobitem.status == jobitem.RUNNING: # status = RUNNING? job.update({'status':job.PAUSED, 'comment':'Waited %d secs for the commercial flagging job' % (waititer*POLL_INTERVAL) \ + ' currently running on this recording to complete.'}) if debug: print 'Waited %d secs for the commercial flagging job' % (waititer*POLL_INTERVAL) \ + ' currently running on this recording to complete.' time.sleep(POLL_INTERVAL); keepWaiting=True waititer = waititer + 1 break else: if debug: print 'Recording has not been scanned to detect/remove commercial breaks.' if require_commflagged: if jobid: job.update({'status':job.RUNNING, 'comment':'Required commercial flagging for this file is not found.' + 'Flagging commercials and cancelling any queued commercial flagging.'}) # cancel any queued job to flag commercials for this recording and run commercial flagging in this script for index,jobitem in reversed(list(enumerate(db.searchJobs(chanid=chanid,starttime=starttime_datetime)))): if debug: if index==0: print jobitem.keys() print index,jobitem.id,jobitem.chanid if jobitem.type == jobitem.COMMFLAG: # Commercial flagging job if jobitem.status == jobitem.RUNNING: # status = RUNNING? jobitem.cmds = jobitem.STOP # stop command from the frontend to stop the commercial flagging job #jobitem.setStatus(jobitem.CANCELLED) #jobitem.setComment('Cancelled: Transcode command ran commercial flagging for this recording.') jobitem.update({'status':jobitem.CANCELLED, 'comment':'A user transcode job ran commercial flagging for' + ' this recording and cancelled this job.'}) if debug: print 'Flagging Commercials...' # Call "mythcommflag --chanid $CHANID --starttime $STARTTIME" task = System(path='mythcommflag', db=db) try: output = task('--chanid "%s"' % chanid, '--starttime "%s"' % starttime, '2> /dev/null') except MythError, e: # it seems mythcommflag always exits with an decoding error "eno: Unknown error 541478725 (541478725)" pass
class VIDEO: def __init__(self, opts, jobid=None): if jobid: self.job = Job(jobid) self.chanid = self.job.chanid self.starttime = self.job.starttime self.job.update(status=3) else: self.job = None self.chanid = opts.chanid self.starttime = opts.starttime self.opts = opts self.db = MythDB() self.log = MythLog(module='mythvidexport.py', db=self.db) # load setting strings self.get_format() # prep objects self.rec = Recorded((self.chanid, self.starttime), db=self.db) self.log(MythLog.IMPORTANT, 'Using recording', '%s - %s' % (self.rec.title, self.rec.subtitle)) self.vid = Video(db=self.db).create({ 'title': '', 'filename': '', 'host': gethostname() }) # process data self.get_meta() self.get_dest() # save file self.copy() if opts.seekdata: self.copy_seek() if opts.skiplist: self.copy_markup(static.MARKUP.MARK_COMM_START, static.MARKUP.MARK_COMM_END) if opts.cutlist: self.copy_markup(static.MARKUP.MARK_CUT_START, static.MARKUP.MARK_CUT_END) self.vid.update() def get_format(self): host = self.db.gethostname() # TV Format if self.opts.tformat: self.tfmt = self.opts.tformat elif self.db.settings[host]['mythvideo.TVexportfmt']: self.tfmt = self.db.settings[host]['mythvideo.TVexportfmt'] else: self.tfmt = 'Television/%TITLE%/Season %SEASON%/'+\ '%TITLE% - S%SEASON%E%EPISODEPAD% - %SUBTITLE%' # Movie Format if self.opts.mformat: self.mfmt = self.opts.mformat elif self.db.settings[host]['mythvideo.MOVIEexportfmt']: self.mfmt = self.db.settings[host]['mythvideo.MOVIEexportfmt'] else: self.mfmt = 'Movies/%TITLE%' # Generic Format if self.opts.gformat: self.gfmt = self.opts.gformat elif self.db.settings[host]['mythvideo.GENERICexportfmt']: self.gfmt = self.db.settings[host]['mythvideo.GENERICexportfmt'] else: self.gfmt = 'Videos/%TITLE%' def get_meta(self): self.vid.hostname = self.db.gethostname() if self.rec.subtitle: # subtitle exists, assume tv show self.type = 'TV' self.log(self.log.IMPORTANT, 'Attempting TV export.') if self.opts.listingonly: self.log(self.log.IMPORTANT, 'Forcing listing data only.') self.get_generic(False) return grab = VideoGrabber(self.type) match = grab.sortedSearch(self.rec.title, self.rec.subtitle) else: # assume movie self.type = 'MOVIE' self.log(self.log.IMPORTANT, 'Attempting Movie export.') if self.opts.listingonly: self.log(self.log.IMPORTANT, 'Forcing listing data only.') self.get_generic(False) return grab = VideoGrabber(self.type) match = grab.sortedSearch(self.rec.title) if len(match) == 0: # no match found self.log(self.log.IMPORTANT, 'Falling back to generic export.') self.get_generic() elif (len(match) > 1) & (match[0].levenshtein > 0): # multiple matches found, and closest is not exact self.vid.delete() raise MythError('Multiple metadata matches found: '\ +self.rec.title) else: self.log(self.log.IMPORTANT, 'Importing content from', match[0].inetref) self.vid.importMetadata(grab.grabInetref(match[0])) def get_generic(self, name_as_generic=True): self.vid.title = self.rec.title if self.rec.subtitle: self.vid.subtitle = self.rec.subtitle if self.rec.description: self.vid.plot = self.rec.description if self.rec.originalairdate: self.vid.year = self.rec.originalairdate.year self.vid.releasedate = self.rec.originalairdate lsec = (self.rec.endtime - self.rec.starttime).seconds self.vid.length = str(lsec / 60) for member in self.rec.cast: if member.role == 'director': self.vid.director = member.name elif member.role == 'actor': self.vid.cast.append(member.name) if name_as_generic: self.type = 'GENERIC' def get_dest(self): if self.type == 'TV': self.vid.filename = self.process_fmt(self.tfmt) elif self.type == 'MOVIE': self.vid.filename = self.process_fmt(self.mfmt) elif self.type == 'GENERIC': self.vid.filename = self.process_fmt(self.gfmt) def process_fmt(self, fmt): # replace fields from viddata #print self.vid.data ext = '.' + self.rec.basename.rsplit('.', 1)[1] rep = (('%TITLE%', 'title', '%s'), ('%SUBTITLE%', 'subtitle', '%s'), ('%SEASON%', 'season', '%d'), ('%SEASONPAD%', 'season', '%02d'), ('%EPISODE%', 'episode', '%d'), ('%EPISODEPAD%', 'episode', '%02d'), ('%YEAR%', 'year', '%s'), ('%DIRECTOR%', 'director', '%s')) for tag, data, format in rep: if self.vid[data]: fmt = fmt.replace(tag, format % self.vid[data]) else: fmt = fmt.replace(tag, '') # replace fields from program data rep = (('%HOSTNAME', 'hostname', '%s'), ('%STORAGEGROUP%', 'storagegroup', '%s')) for tag, data, format in rep: data = getattr(self.rec, data) fmt = fmt.replace(tag, format % data) # fmt = fmt.replace('%CARDID%',self.rec.cardid) # fmt = fmt.replace('%CARDNAME%',self.rec.cardid) # fmt = fmt.replace('%SOURCEID%',self.rec.cardid) # fmt = fmt.replace('%SOURCENAME%',self.rec.cardid) # fmt = fmt.replace('%CHANNUM%',self.rec.channum) # fmt = fmt.replace('%CHANNAME%',self.rec.cardid) if len(self.vid.genre): fmt = fmt.replace('%GENRE%', self.vid.genre[0].genre) else: fmt = fmt.replace('%GENRE%', '') # if len(self.country): # fmt = fmt.replace('%COUNTRY%',self.country[0]) # else: # fmt = fmt.replace('%COUNTRY%','') return fmt + ext def copy(self): stime = time.time() srcsize = self.rec.filesize htime = [stime, stime, stime, stime] self.log.log(MythLog.IMPORTANT|MythLog.FILE, "Copying myth://%s@%s/%s"\ % (self.rec.storagegroup, self.rec.hostname, self.rec.basename)\ +" to myth://Videos@%s/%s"\ % (self.vid.host, self.vid.filename)) srcfp = self.rec.open('r') dstfp = self.vid.open('w') if self.job: self.job.setStatus(4) tsize = 2**24 while tsize == 2**24: tsize = min(tsize, srcsize - dstfp.tell()) dstfp.write(srcfp.read(tsize)) htime.append(time.time()) rate = float(tsize * 4) / (time.time() - htime.pop(0)) remt = (srcsize - dstfp.tell()) / rate if self.job: self.job.setComment("%02d%% complete - %d seconds remaining" %\ (dstfp.tell()*100/srcsize, remt)) srcfp.close() dstfp.close() self.vid.hash = self.vid.getHash() self.log(MythLog.IMPORTANT | MythLog.FILE, "Transfer Complete", "%d seconds elapsed" % int(time.time() - stime)) if self.job: self.job.setComment("Complete - %d seconds elapsed" % \ (int(time.time()-stime))) self.job.setStatus(256) def copy_seek(self): for seek in self.rec.seek: self.vid.markup.add(seek.mark, seek.offset, seek.type) def copy_markup(self, start, stop): for mark in self.rec.markup: if mark.type in (start, stop): self.vid.markup.add(mark.mark, 0, mark.type)
class VIDEO: def __init__(self, opts, jobid=None): if jobid: self.job = Job(jobid) self.chanid = self.job.chanid self.starttime = self.job.starttime self.job.update(status=Job.STARTING) else: self.job = None self.chanid = opts.chanid self.starttime = opts.starttime self.opts = opts self.db = MythDB() self.log = MythLog(module='mythvidexport.py', db=self.db) # load setting strings self.get_format() # prep objects self.rec = Recorded((self.chanid, self.starttime), db=self.db) self.log( MythLog.GENERAL, MythLog.INFO, 'Using recording', '%s - %s' % (self.rec.title.encode('utf-8'), self.rec.subtitle.encode('utf-8'))) self.vid = Video(db=self.db).create({ 'title': '', 'filename': '', 'host': gethostname() }) # process data self.get_meta() self.get_dest() # bug fix to work around limitation in the bindings where DBDataRef classes # are mapped to the filename at time of Video element creation. since the # filename is specified as blank when the video is created, the markup # handler is not properly initialized self.vid.markup._refdat = (self.vid.filename, ) # save file self.copy() if opts.seekdata: self.copy_seek() if opts.skiplist: self.copy_markup(static.MARKUP.MARK_COMM_START, static.MARKUP.MARK_COMM_END) if opts.cutlist: self.copy_markup(static.MARKUP.MARK_CUT_START, static.MARKUP.MARK_CUT_END) self.vid.update() # delete old file if opts.delete: self.rec.delete() def get_format(self): host = self.db.gethostname() # TV Format if self.opts.tformat: self.tfmt = self.opts.tformat elif self.db.settings[host]['mythvideo.TVexportfmt']: self.tfmt = self.db.settings[host]['mythvideo.TVexportfmt'] else: self.tfmt = 'Television/%TITLE%/Season %SEASON%/'+\ '%TITLE% - S%SEASON%E%EPISODEPAD% - %SUBTITLE%' # Movie Format if self.opts.mformat: self.mfmt = self.opts.mformat elif self.db.settings[host]['mythvideo.MOVIEexportfmt']: self.mfmt = self.db.settings[host]['mythvideo.MOVIEexportfmt'] else: self.mfmt = 'Movies/%TITLE%' # Generic Format if self.opts.gformat: self.gfmt = self.opts.gformat elif self.db.settings[host]['mythvideo.GENERICexportfmt']: self.gfmt = self.db.settings[host]['mythvideo.GENERICexportfmt'] else: self.gfmt = 'Videos/%TITLE%' def get_meta(self): self.vid.hostname = self.db.gethostname() if self.rec.inetref: # good data is available, use it if self.rec.season > 0 or self.rec.episode > 0: self.log(self.log.GENERAL, self.log.INFO, 'Performing TV export with local data.') self.type = 'TV' grab = VideoGrabber(self.type) metadata = grab.grabInetref(self.rec.inetref, self.rec.season, self.rec.episode) else: self.log(self.log.GENERAL, self.log.INFO, 'Performing Movie export with local data.') self.type = 'MOVIE' grab = VideoGrabber(self.type) metadata = grab.grabInetref(self.rec.inetref) elif self.opts.listingonly: # force use of local data if self.rec.subtitle: self.log(self.log.GENERAL, self.log.INFO, 'Forcing TV export with local data.') self.type = 'TV' else: self.log(self.log.GENERAL, self.log.INFO, 'Forcing Movie export with local data.') self.type = 'MOVIE' metadata = self.rec.exportMetadata() else: if self.rec.subtitle: # subtitle exists, assume tv show self.type = 'TV' self.log(self.log.GENERAL, self.log.INFO, 'Attempting TV export.') grab = VideoGrabber(self.type) match = grab.sortedSearch(self.rec.title, self.rec.subtitle) else: # assume movie self.type = 'MOVIE' self.log(self.log.GENERAL, self.log.INFO, 'Attempting Movie export.') grab = VideoGrabber(self.type) match = grab.sortedSearch(self.rec.title) if len(match) == 0: # no match found self.log(self.log.GENERAL, self.log.INFO, 'Falling back to generic export.') self.type = 'GENERIC' metadata = self.rec.exportMetadata() elif (len(match) > 1) & (match[0].levenshtein > 0): # multiple matches found, and closest is not exact self.vid.delete() raise MythError('Multiple metadata matches found: '\ +self.rec.title) else: self.log(self.log.GENERAL, self.log.INFO, 'Importing content from', match[0].inetref) metadata = grab.grabInetref(match[0]) self.vid.importMetadata(metadata) self.log(self.log.GENERAL, self.log.INFO, 'Import complete') def get_dest(self): if self.type == 'TV': self.vid.filename = self.process_fmt(self.tfmt) elif self.type == 'MOVIE': self.vid.filename = self.process_fmt(self.mfmt) elif self.type == 'GENERIC': self.vid.filename = self.process_fmt(self.gfmt) def process_fmt(self, fmt): # replace fields from viddata #print self.vid.data ext = '.' + self.rec.basename.rsplit('.', 1)[1] rep = (('%TITLE%', 'title', '%s'), ('%SUBTITLE%', 'subtitle', '%s'), ('%SEASON%', 'season', '%d'), ('%SEASONPAD%', 'season', '%02d'), ('%EPISODE%', 'episode', '%d'), ('%EPISODEPAD%', 'episode', '%02d'), ('%YEAR%', 'year', '%s'), ('%DIRECTOR%', 'director', '%s')) for tag, data, format in rep: if self.vid[data]: fmt = fmt.replace(tag, format % self.vid[data]) else: fmt = fmt.replace(tag, '') # replace fields from program data rep = (('%HOSTNAME%', 'hostname', '%s'), ('%STORAGEGROUP%', 'storagegroup', '%s')) for tag, data, format in rep: data = getattr(self.rec, data) fmt = fmt.replace(tag, format % data) # fmt = fmt.replace('%CARDID%',self.rec.cardid) # fmt = fmt.replace('%CARDNAME%',self.rec.cardid) # fmt = fmt.replace('%SOURCEID%',self.rec.cardid) # fmt = fmt.replace('%SOURCENAME%',self.rec.cardid) # fmt = fmt.replace('%CHANNUM%',self.rec.channum) # fmt = fmt.replace('%CHANNAME%',self.rec.cardid) if len(self.vid.genre): fmt = fmt.replace('%GENRE%', self.vid.genre[0].genre) else: fmt = fmt.replace('%GENRE%', '') # if len(self.country): # fmt = fmt.replace('%COUNTRY%',self.country[0]) # else: # fmt = fmt.replace('%COUNTRY%','') return fmt + ext def copy(self): stime = time.time() srcsize = self.rec.filesize htime = [stime, stime, stime, stime] self.log(MythLog.GENERAL|MythLog.FILE, MythLog.INFO, "Copying myth://%s@%s/%s"\ % (self.rec.storagegroup, self.rec.hostname, self.rec.basename)\ +" to myth://Videos@%s/%s"\ % (self.vid.host, self.vid.filename)) srcfp = self.rec.open('r') dstfp = self.vid.open('w', nooverwrite=True) if self.job: self.job.setStatus(Job.RUNNING) tsize = 2**24 while tsize == 2**24: tsize = min(tsize, srcsize - dstfp.tell()) dstfp.write(srcfp.read(tsize)) htime.append(time.time()) rate = float(tsize * 4) / (time.time() - htime.pop(0)) remt = (srcsize - dstfp.tell()) / rate if self.job: self.job.setComment("%02d%% complete - %d seconds remaining" %\ (dstfp.tell()*100/srcsize, remt)) srcfp.close() dstfp.close() self.vid.hash = self.vid.getHash() self.log(MythLog.GENERAL | MythLog.FILE, MythLog.INFO, "Transfer Complete", "%d seconds elapsed" % int(time.time() - stime)) if self.opts.reallysafe: if self.job: self.job.setComment("Checking file hashes") self.log(MythLog.GENERAL | MythLog.FILE, MythLog.INFO, "Checking file hashes.") srchash = hashfile(self.rec.open('r')) dsthash = hashfile(self.vid.open('r')) if srchash != dsthash: raise MythError('Source hash (%s) does not match destination hash (%s)' \ % (srchash, dsthash)) elif self.opts.safe: self.log(MythLog.GENERAL | MythLog.FILE, MythLog.INFO, "Checking file sizes.") be = MythBE(db=self.vid._db) try: srcsize = be.getSGFile(self.rec.hostname, self.rec.storagegroup, \ self.rec.basename)[1] dstsize = be.getSGFile(self.vid.host, 'Videos', self.vid.filename)[1] except: raise MythError('Could not query file size from backend') if srcsize != dstsize: raise MythError('Source size (%d) does not match destination size (%d)' \ % (srcsize, dstsize)) if self.job: self.job.setComment("Complete - %d seconds elapsed" % \ (int(time.time()-stime))) self.job.setStatus(Job.FINISHED) def copy_seek(self): for seek in self.rec.seek: self.vid.markup.add(seek.mark, seek.offset, seek.type) def copy_markup(self, start, stop): for mark in self.rec.markup: if mark.type in (start, stop): self.vid.markup.add(mark.mark, 0, mark.type)
def runjob(jobid=None, chanid=None, starttime=None, tzoffset=None): global estimateBitrate db = MythDB() if jobid: job = Job(jobid, db=db) chanid = job.chanid utcstarttime = job.starttime else: job=None; #utcstarttime = datetime.strptime(starttime, "%Y%m%d%H%M%S%z") utcstarttime = parse(starttime) utcstarttime = utcstarttime + timedelta(hours=tzoffset) if debug: print('chanid "%s"' % chanid) print('utcstarttime "%s"' % utcstarttime) rec = Recorded((chanid, utcstarttime), db=db); utcstarttime = rec.starttime; starttime_datetime = utcstarttime # reformat 'starttime' for use with mythtranscode/ffmpeg/mythcommflag starttime = str(utcstarttime.utcisoformat().replace(':', '').replace(' ', '').replace('T', '').replace('-', '')) if debug: print('mythtv format starttime "%s"' % starttime) input_filesize = rec.filesize if rec.commflagged: if debug: print('Recording has been scanned to detect commerical breaks.') waititer=1 keepWaiting = True while keepWaiting == True: keepWaiting=False; for index,jobitem in reversed(list(enumerate(db.searchJobs(chanid=chanid, starttime=starttime_datetime)))): if jobitem.type == jobitem.COMMFLAG: # Commercial flagging job if debug: print('Commercial flagging job detected with status %s' % jobitem.status) if jobitem.status == jobitem.RUNNING: # status = RUNNING? job.update({'status':job.PAUSED, 'comment':'Waited %d secs for the commercial flagging job' % (waititer*POLL_INTERVAL) \ + ' currently running on this recording to complete.'}) if debug: print('Waited %d secs for the commercial flagging job' % (waititer*POLL_INTERVAL) \ + ' currently running on this recording to complete.') time.sleep(POLL_INTERVAL); keepWaiting=True waititer = waititer + 1 break else: if debug: print('Recording has not been scanned to detect/remove commercial breaks.') if require_commflagged: if jobid: job.update({'status':job.RUNNING, 'comment':'Required commercial flagging for this file is not found.' + 'Flagging commercials and cancelling any queued commercial flagging.'}) # cancel any queued job to flag commercials for this recording and run commercial flagging in this script for index,jobitem in reversed(list(enumerate(db.searchJobs(chanid=chanid,starttime=starttime_datetime)))): if debug: if index==0: print(list(jobitem.keys())) print(index,jobitem.id,jobitem.chanid) if jobitem.type == jobitem.COMMFLAG: # Commercial flagging job if jobitem.status == jobitem.RUNNING: # status = RUNNING? jobitem.cmds = jobitem.STOP # stop command from the frontend to stop the commercial flagging job #jobitem.setStatus(jobitem.CANCELLED) #jobitem.setComment('Cancelled: Transcode command ran commercial flagging for this recording.') jobitem.update({'status':jobitem.CANCELLED, 'comment':'A user transcode job ran commercial flagging for' + ' this recording and cancelled this job.'}) if debug: print('Flagging Commercials...') # Call "mythcommflag --chanid $CHANID --starttime $STARTTIME" task = System(path='mythcommflag', db=db) try: output = task('--chanid "%s"' % chanid, '--starttime "%s"' % starttime, '2> /dev/null') except MythError as e: # it seems mythcommflag always exits with an decoding error "eno: Unknown error 541478725 (541478725)" pass #print 'Command failed with output:\n%s' % e.stderr #if jobid: # job.update({'status':304, 'comment':'Flagging commercials failed'}) #sys.exit(e.retcode) sg = findfile(rec.basename, rec.storagegroup, db=db) if sg is None: print('Local access to recording not found.') sys.exit(1) infile = os.path.join(sg.dirname, rec.basename) tmpfile = '%s.tmp' % infile.rsplit('.',1)[0] # tmpfile = infile outfile = '%s.mp4' % infile.rsplit('.',1)[0] if debug: print('tmpfile "%s"' % tmpfile) clipped_bytes=0; # If selected, create a cutlist to remove commercials via mythtranscode by running: # mythutil --gencutlist --chanid $CHANID --starttime $STARTTIME if generate_commcutlist: if jobid: job.update({'status':job.RUNNING, 'comment':'Generating Cutlist for commercial removal'}) task = System(path='mythutil', db=db) try: output = task('--gencutlist', '--chanid "%s"' % chanid, '--starttime "%s"' % starttime) # '--loglevel debug', # '2> /dev/null') except MythError as e: print('Command "mythutil --gencutlist" failed with output:\n%s' % e.stderr) if jobid: job.update({'status':job.ERRORED, 'comment':'Generation of commercial Cutlist failed'}) sys.exit(e.retcode) # Lossless transcode to strip cutlist if generate_commcutlist or rec.cutlist==1: if jobid: job.update({'status':job.RUNNING, 'comment':'Removing Cutlist'}) task = System(path='mythtranscode', db=db) try: output = task('--chanid "%s"' % chanid, '--starttime "%s"' % starttime, '--mpeg2', '--honorcutlist', '-o "%s"' % tmpfile, '1>&2') # '2> /dev/null') clipped_filesize = os.path.getsize(tmpfile) clipped_bytes = input_filesize - clipped_filesize clipped_compress_pct = float(clipped_bytes)/input_filesize rec.commflagged = 0 except MythError as e: print('Command "mythtranscode --honorcutlist" failed with output:\n%s' % e.stderr) if jobid: job.update({'status':job.ERRORED, 'comment':'Removing Cutlist failed. Copying file instead.'}) # sys.exit(e.retcode) copyfile('%s' % infile, '%s' % tmpfile) clipped_filesize = input_filesize clipped_bytes = 0 clipped_compress_pct = 0 pass else: if jobid: job.update({'status':job.RUNNING, 'comment':'Creating temporary file for transcoding.'}) copyfile('%s' % infile, '%s' % tmpfile) clipped_filesize = input_filesize clipped_bytes = 0 clipped_compress_pct = 0 duration_secs = 0 # Estimate bitrate, and detect duration and number of frames if estimateBitrate: if jobid: job.update({'status':job.RUNNING, 'comment':'Estimating bitrate; detecting frames per second, and resolution.'}) duration_secs, e = get_duration(db, rec, transcoder, tmpfile); if duration_secs>0: bitrate = int(clipped_filesize*8/(1024*duration_secs)) else: print('Estimate bitrate failed falling back to constant rate factor encoding.\n') estimateBitrate = False duration_secs = 0 print(e.stderr.decode('utf-8')) # get framerate of mpeg2 video stream and detect if stream is HD r = re.compile('mpeg2video (.*?) fps,') m = r.search(e.stderr.decode('utf-8')) strval = m.group(1) if debug: print(strval) isHD = False if "1920x1080" in strval or "1280x720" in strval or "2560x1440" in strval: if debug: print('Stream is HD') isHD = True else: if debug: print('Stream is not HD') framerate = float(m.group(1).split(' ')[-1]) if debug: print('Framerate %s' % framerate) # Setup transcode video bitrate and quality parameters # if estimateBitrate is true and the input content is HD: # encode 'medium' preset and vbitrate = inputfile_bitrate*compressionRatio # else: # encode at user default preset and constant rate factor ('slow' and 20) preset = preset_nonHD if estimateBitrate: if isHD: h264_bitrate = int(bitrate*compressionRatio) # HD coding with specified target bitrate (CRB encoding) if hdvideo_tgt_bitrate > 0 and h264_bitrate > hdvideo_tgt_bitrate: h264_bitrate = hdvideo_tgt_bitrate; vbitrate_param = '-b:v %dk' % h264_bitrate else: # HD coding with disabled or acceptable target bitrate (CRF encoding) vbitrate_param = '-crf:v %s' % crf preset = preset_HD else: # non-HD encoding (CRF encoding) vbitrate_param = '-crf:v %s' % crf else: vbitrate_param = '-crf:v %s' % crf if hdvideo_min_bitrate > 0: vbitrate_param = vbitrate_param + ' -minrate %sk' % hdvideo_min_bitrate if hdvideo_max_bitrate > 0: vbitrate_param = vbitrate_param + ' -maxrate %sk' % hdvideo_max_bitrate if hdvideo_max_bitrate > 0 or hdvideo_min_bitrate > 0: vbitrate_param = vbitrate_param + ' -bufsize %sk' % device_bufsize if debug: print('Video bitrate parameter "%s"' % vbitrate_param) print('Video h264 preset parameter "%s"' % preset) # Setup transcode audio bitrate and quality parameters # Right now, the setup is as follows: # if input is HD: # copy audio streams to output, i.e., input=output audio # else: # output is libfdk_aac encoded at 128kbps if isHD: abitrate_param = abitrate_param_HD # preserve 5.1 audio else: abitrate_param = abitrate_param_nonHD if debug: print('Audio bitrate parameter "%s"' % abitrate_param) # Transcode to mp4 # if jobid: # job.update({'status':4, 'comment':'Transcoding to mp4'}) # ffmpeg output is redirected to the temporary file tmpstatusfile and # a second thread continuously reads this file while # the transcode is in-process. see while loop below for the monitoring thread tf = tempfile.NamedTemporaryFile() tmpstatusfile = tf.name # tmpstatusfile = '/tmp/ffmpeg-transcode.txt' if debug: print('Using temporary file "%s" for ffmpeg status updates.' % tmpstatusfile) res = [] # create a thread to perform the encode ipq = queue.Queue() t = threading.Thread(target=wrapper, args=(encode, (jobid, db, job, ipq, preset, vbitrate_param, abitrate_param, tmpfile, outfile, tmpstatusfile,), res)) t.start() # wait for ffmpeg to open the file and emit its initialization information # before we start the monitoring process time.sleep(1) # open the temporary file having the ffmeg output text and process it to generate status updates hangiter=0; with open(tmpstatusfile) as f: # read all the opening ffmpeg status/analysis lines lines = f.readlines() # set initial progress to -1 prev_progress=-1 framenum=0 fps=1.0 while t.is_alive(): # read all output since last readline() call lines = f.readlines() if len(lines) > 0: # every ffmpeg output status line ends with a carriage return '\r' # split the last read line at these locations lines=lines[-1].split('\r') # if debug: # print lines; hangiter=0 if len(lines) > 1 and lines[-2].startswith('frame'): # since typical reads will have the last line ending with \r the last status # message is at index=[-2] start processing this line # replace multiple spaces with one space lines[-2] = re.sub(' +',' ',lines[-2]) # remove any spaces after equals signs lines[-2] = re.sub('= +','=',lines[-2]) # split the fields at the spaces the first two fields for typical # status lines will be framenum=XXXX and fps=YYYY parse the values values = lines[-2].split(' ') if len(values) > 1: if debug: print('values %s' % values) prev_framenum = framenum prev_fps = fps try: # framenum = current frame number being encoded framenum = int(values[0].split('=')[1]) # fps = frames per second for the encoder fps = float(values[1].split('=')[1]) except ValueError as e: print('ffmpeg status parse exception: "%s"' % e) framenum = prev_framenum fps = prev_fps pass # progress = 0-100 represent percent complete for the transcode progress = int((100*framenum)/(duration_secs*framerate)) # eta_secs = estimated number of seconds until transcoding is complete eta_secs = int((float(duration_secs*framerate)-framenum)/fps) # pct_realtime = how many real seconds it takes to encode 1 second of video pct_realtime = float(fps/framerate) if debug: print('framenum = %d fps = %.2f' % (framenum, fps)) if progress != prev_progress: if debug: print('Progress %d%% encoding %.1f frames per second ETA %d mins' \ % ( progress, fps, float(eta_secs)/60)) if jobid: progress_str = 'Transcoding to mp4 %d%% complete ETA %d mins fps=%.1f.' \ % ( progress, float(eta_secs)/60, fps) job.update({'status':job.RUNNING, 'comment': progress_str}) prev_progress = progress elif len(lines) > 1: if debug: print('Read pathological output %s' % lines[-2]) else: if debug: print('Read no lines of ffmpeg output for %s secs. Possible hang?' % (POLL_INTERVAL*hangiter)) hangiter = hangiter + 1 if jobid: progress_str = 'Read no lines of ffmpeg output for %s secs. Possible hang?' % (POLL_INTERVAL*hangiter) job.update({'status':job.RUNNING, 'comment': progress_str}) time.sleep(POLL_INTERVAL) if debug: print('res = "%s"' % res) t.join(1) try: if ipq.get_nowait() == CleanExit: sys.exit() except queue.Empty: pass if flush_commskip: task = System(path='mythutil') task.command('--chanid %s' % chanid, '--starttime %s' % starttime, '--clearcutlist', '2> /dev/null') task = System(path='mythutil') task.command('--chanid %s' % chanid, '--starttime %s' % starttime, '--clearskiplist', '2> /dev/null') if flush_commskip: for index,mark in reversed(list(enumerate(rec.markup))): if mark.type in (rec.markup.MARK_COMM_START, rec.markup.MARK_COMM_END): del rec.markup[index] rec.bookmark = 0 rec.cutlist = 0 rec.markup.commit() # tf.close(); # os.remove(tmpstatusfile); rec.basename = os.path.basename(outfile) rec.filesize = os.path.getsize(outfile) # rec.commflagged = 0 rec.transcoded = 1 rec.seek.clean() rec.update() os.remove(infile) # Cleanup the old *.png files for filename in glob('%s*.png' % infile): os.remove(filename) os.remove(tmpfile) try: os.remove('%s.map' % tmpfile) except OSError: pass output_filesize = rec.filesize if duration_secs > 0: output_bitrate = int(output_filesize*8/(1024*duration_secs)) # kbps actual_compression_ratio = 1 - float(output_filesize)/clipped_filesize compressed_pct = 1 - float(output_filesize)/input_filesize if build_seektable: if jobid: job.update({'status':job.RUNNING, 'comment':'Rebuilding seektable'}) task = System(path='mythcommflag') task.command('--chanid %s' % chanid, '--starttime %s' % starttime, '--rebuild', '2> /dev/null') # fix during in the recorded markup table this will be off if commercials are removed duration_msecs, e = get_duration(db, rec, transcoder, outfile) duration_msecs = 1000*duration_msecs for index,mark in reversed(list(enumerate(rec.markup))): # find the duration markup entry and correct any error in the video duration that might be there if mark.type == 33: if debug: print('Markup Duration in milliseconds "%s"' % mark.data) error = mark.data - duration_msecs if error != 0: if debug: print('Markup Duration error is "%s"msecs' % error) mark.data = duration_msecs #rec.bookmark = 0 #rec.cutlist = 0 rec.markup.commit() if jobid: if output_bitrate: job.update({'status':job.FINISHED, 'comment':'Transcode Completed @ %dkbps, compressed file by %d%% (clipped %d%%, transcoder compressed %d%%)' % (output_bitrate,int(compressed_pct*100),int(clipped_compress_pct*100),int(actual_compression_ratio*100))}) else: job.update({'status':job.FINISHED, 'comment':'Transcode Completed'})
def runjob(jobid=None, chanid=None, starttime=None, tzoffset=None): global estimateBitrate db = MythDB() if jobid: job = Job(jobid, db=db) chanid = job.chanid utcstarttime = job.starttime else: print 'Job id not found.' sys.exit(1) if debug: print 'chanid "%s"' % chanid print 'utcstarttime "%s"' % utcstarttime rec = Recorded((chanid, utcstarttime), db=db) utcstarttime = rec.starttime starttime_datetime = utcstarttime # reformat 'starttime' for use with mythtranscode/ffmpeg/mythcommflag starttime = str(utcstarttime.utcisoformat().replace(u':', '').replace( u' ', '').replace(u'T', '').replace('-', '')) if debug: print 'mythtv format starttime "%s"' % starttime input_filesize = rec.filesize #sys.exit(e.retcode) print 'Transcoding. ' + rec.basename + " " + rec.storagegroup sg = findfile('/' + rec.basename, rec.storagegroup, db=db) if sg is None: print 'Local access to recording not found.' sys.exit(1) infile = os.path.join(sg.dirname, rec.basename) outfile = '%s.mp4' % os.path.join(sg.dirname, "proxy", rec.basename).rsplit('.', 1)[0] # outfile = '%s.mp4' % infile.rsplit('.',1)[0] framerate = 59.94 clipped_bytes = 0 duration_secs, e = get_duration(db, rec, transcoder, infile) abitrate_param = abitrate_param_HD # preserve 5.1 audio if debug: print 'Audio bitrate parameter "%s"' % abitrate_param # Transcode to mp4 #if jobid: # job.update({'status':4, 'comment':'Transcoding to mp4'}) # ffmpeg output is redirected to the temporary file tmpstatusfile and # a second thread continuously reads this file while # the transcode is in-process. see while loop below for the monitoring thread tf = tempfile.NamedTemporaryFile() tmpstatusfile = tf.name # tmpstatusfile = '/tmp/ffmpeg-transcode.txt' if debug: print 'Using temporary file "%s" for ffmpeg status updates.' % tmpstatusfile res = [] # create a thread to perform the encode ipq = Queue.Queue() t = threading.Thread(target=wrapper, args=(encode, ( jobid, db, job, ipq, preset, vbitrate_param, abitrate_param, infile, outfile, tmpstatusfile, ), res)) t.start() # wait for ffmpeg to open the file and emit its initialization information # before we start the monitoring process time.sleep(1) # open the temporary file having the ffmeg output text and process it to generate status updates hangiter = 0 with open(tmpstatusfile) as f: # read all the opening ffmpeg status/analysis lines lines = f.readlines() # set initial progress to -1 prev_progress = -1 framenum = 0 fps = 1.0 while t.is_alive(): # read all output since last readline() call lines = f.readlines() if len(lines) > 0: # every ffmpeg output status line ends with a carriage return '\r' # split the last read line at these locations lines = lines[-1].split('\r') hangiter = 0 if len(lines) > 1 and lines[-2].startswith('frame'): # since typical reads will have the last line ending with \r the last status # message is at index=[-2] start processing this line # replace multiple spaces with one space lines[-2] = re.sub(' +', ' ', lines[-2]) # remove any spaces after equals signs lines[-2] = re.sub('= +', '=', lines[-2]) # split the fields at the spaces the first two fields for typical # status lines will be framenum=XXXX and fps=YYYY parse the values values = lines[-2].split(' ') if len(values) > 1: if debug: print 'values %s' % values prev_framenum = framenum prev_fps = fps try: # framenum = current frame number being encoded framenum = int(values[0].split('=')[1]) # fps = frames per second for the encoder fps = float(values[1].split('=')[1]) except ValueError, e: print 'ffmpeg status parse exception: "%s"' % e framenum = prev_framenum fps = prev_fps pass # progress = 0-100 represent percent complete for the transcode progress = int( (100 * framenum) / (duration_secs * framerate)) # eta_secs = estimated number of seconds until transcoding is complete eta_secs = int( (float(duration_secs * framerate) - framenum) / fps) # pct_realtime = how many real seconds it takes to encode 1 second of video pct_realtime = float(fps / framerate) if debug: print 'framenum = %d fps = %.2f' % (framenum, fps) if progress != prev_progress: if debug: print 'Progress %d%% encoding %.1f frames per second ETA %d mins' \ % ( progress, fps, float(eta_secs)/60) if jobid: progress_str = 'Transcoding to mp4 %d%% complete ETA %d mins fps=%.1f.' \ % ( progress, float(eta_secs)/60, fps) job.update({ 'status': job.RUNNING, 'comment': progress_str }) prev_progress = progress elif len(lines) > 1: if debug: print 'Read pathological output %s' % lines[-2] else: if debug: print 'Read no lines of ffmpeg output for %s secs. Possible hang?' % ( POLL_INTERVAL * hangiter) hangiter = hangiter + 1 if jobid: progress_str = 'Read no lines of ffmpeg output for %s secs. Possible hang?' % ( POLL_INTERVAL * hangiter) job.update({ 'status': job.RUNNING, 'comment': progress_str }) time.sleep(POLL_INTERVAL) if debug: print 'res = "%s"' % res
class VIDEO: def __init__(self, opts, jobid=None): # Setup for the job to run if jobid: self.thisJob = Job(jobid) self.chanID = self.thisJob.chanid self.startTime = self.thisJob.starttime self.thisJob.update(status=Job.STARTING) # If no job ID given, must be a command line run else: self.thisJob = jobid self.chanID = opts.chanid self.startTime = opts.startdate + " " + opts.starttime + opts.offset self.opts = opts self.type = "none" self.db = MythDB() self.log = MythLog(module='Myth-Rec-to-Vid.py', db=self.db) # Capture the backend host name self.host = self.db.gethostname() # prep objects self.rec = Recorded((self.chanID, self.startTime), db=self.db) self.log( MythLog.GENERAL, MythLog.INFO, 'Using recording', '%s - %s' % (self.rec.title.encode('utf-8'), self.rec.subtitle.encode('utf-8'))) self.vid = Video(db=self.db).create({ 'title': '', 'filename': '', 'host': self.host }) self.bend = MythBE(db=self.db) def check_hash(self): self.log(self.log.GENERAL, self.log.INFO, 'Performing copy validation.') srchash = self.bend.getHash(self.rec.basename, self.rec.storagegroup) dsthash = self.bend.getHash(self.vid.filename, 'Videos') if srchash != dsthash: return False else: return True def copy(self): stime = time.time() srcsize = self.rec.filesize htime = [stime, stime, stime, stime] self.log(MythLog.GENERAL|MythLog.FILE, MythLog.INFO, "Copying myth://%s@%s/%s"\ % (self.rec.storagegroup, self.rec.hostname, self.rec.basename)\ +" to myth://Videos@%s/%s"\ % (self.host, self.vid.filename)) srcfp = self.rec.open('r') dstfp = self.vid.open('w') if self.thisJob: self.set_job_status(Job.RUNNING) tsize = 2**24 while tsize == 2**24: tsize = min(tsize, srcsize - dstfp.tell()) dstfp.write(srcfp.read(tsize)) htime.append(time.time()) rate = float(tsize * 4) / (time.time() - htime.pop(0)) remt = (srcsize - dstfp.tell()) / rate if self.thisJob: self.thisJob.setComment("%02d%% complete - %d seconds remaining" %\ (dstfp.tell()*100/srcsize, remt)) srcfp.close() dstfp.close() self.vid.hash = self.vid.getHash() self.log(MythLog.GENERAL | MythLog.FILE, MythLog.INFO, "Transfer Complete", "%d seconds elapsed" % int(time.time() - stime)) if self.thisJob: self.thisJob.setComment("Complete - %d seconds elapsed" % \ (int(time.time()-stime))) def copy_markup(self, start, stop): for mark in self.rec.markup: if mark.type in (start, stop): self.vid.markup.add(mark.mark, 0, mark.type) def copy_seek(self): for seek in self.rec.seek: self.vid.markup.add(seek.mark, seek.offset, seek.type) def delete_vid(self): self.vid.delete() def delete_rec(self): self.rec.delete() def dup_check(self): self.log(MythLog.GENERAL, MythLog.INFO, 'Processing new file name ', '%s' % (self.vid.filename)) self.log( MythLog.GENERAL, MythLog.INFO, 'Checking for duplication of ', '%s - %s' % (self.rec.title.encode('utf-8'), self.rec.subtitle.encode('utf-8'))) if self.bend.fileExists(self.vid.filename, 'Videos'): self.log(MythLog.GENERAL, MythLog.INFO, 'Recording already exists in Myth Videos') if self.thisJob: self.thisJob.setComment( "Action would result in duplicate entry") return True else: self.log( MythLog.GENERAL, MythLog.INFO, 'No duplication found for ', '%s - %s' % (self.rec.title.encode('utf-8'), self.rec.subtitle.encode('utf-8'))) return False def get_dest(self): if self.type == 'TV': self.vid.filename = self.process_fmt(TVFMT) elif self.type == 'MOVIE': self.vid.filename = self.process_fmt(MVFMT) self.vid.markup._refdat = (self.vid.filename, ) def get_meta(self): import_info = 'Listing only MetaData import complete' metadata = self.rec.exportMetadata() yrInfo = self.rec.getProgram() metadata['year'] = yrInfo.get('year') self.vid.importMetadata(metadata) if self.type == 'MOVIE': grab = VideoGrabber('Movie') results = grab.sortedSearch(self.rec.title) if len(results) > 0: for i in results: if i.year == yrInfo.get( 'year') and i.title == self.rec.get('title'): self.vid.importMetadata(i) match = grab.grabInetref(i.get('inetref')) length = len(match.people) for p in range(length - 2): self.vid.cast.add(match.people[p].get('name')) self.vid.director = match.people[length - 1].get('name') import_info = 'Full MetaData Import complete' else: grab = VideoGrabber('TV') results = grab.sortedSearch(self.rec.title, self.rec.subtitle) if len(results) > 0: for i in results: if i.title == self.rec.get( 'title') and i.subtitle == self.rec.get( 'subtitle'): self.vid.importMetadata(i) match = grab.grabInetref(grab.grabInetref(i.get('inetref'), \ season=i.get('season'),episode=i.get('episode'))) length = len(match.people) for p in range(length - 2): self.vid.cast.add(match.people[p].get('name')) self.vid.director = match.people[length - 1].get('name') import_info = 'Full MetaData Import complete' self.vid.category = self.rec.get('category') self.log(self.log.GENERAL, self.log.INFO, import_info) def get_type(self): if self.rec.seriesid != None and self.rec.programid[:2] != 'MV': self.type = 'TV' self.log(self.log.GENERAL, self.log.INFO, 'Performing TV type migration.') else: self.type = 'MOVIE' self.log(self.log.GENERAL, self.log.INFO, 'Performing Movie type migration.') def process_fmt(self, fmt): # replace fields from viddata ext = '.' + self.rec.basename.rsplit('.', 1)[1] rep = (('%TITLE%', 'title', '%s'), ('%SUBTITLE%', 'subtitle', '%s'), ('%SEASON%', 'season', '%d'), ('%SEASONPAD%', 'season', '%02d'), ('%EPISODE%', 'episode', '%d'), ('%EPISODEPAD%', 'episode', '%02d'), ('%YEAR%', 'year', '%s'), ('%DIRECTOR%', 'director', '%s')) for tag, data, format in rep: if self.vid[data]: fmt = fmt.replace(tag, format % self.vid[data]) else: fmt = fmt.replace(tag, '') # replace fields from program data rep = (('%HOSTNAME%', 'hostname', '%s'), ('%STORAGEGROUP%', 'storagegroup', '%s')) for tag, data, format in rep: data = getattr(self.rec, data) fmt = fmt.replace(tag, format % data) if len(self.vid.genre): fmt = fmt.replace('%GENRE%', self.vid.genre[0].genre) else: fmt = fmt.replace('%GENRE%', '') return fmt + ext def set_job_status(self, status): self.thisJob.setStatus(status) def set_vid_hash(self): self.vid.hash = self.vid.getHash() def update_vid(self): self.vid.update()
def runjob(jobid=None, chanid=None, starttime=None, tzoffset=None, maxWidth=maxWidth, maxHeight=maxHeight, sdonly=0, burncc=0, usemkv=0, overwrite=1): global estimateBitrate db = MythDB() try: if jobid: job = Job(jobid, db=db) chanid = job.chanid utcstarttime = job.starttime else: job = None utcstarttime = datetime.strptime(starttime, "%Y%m%d%H%M%S") utcstarttime = utcstarttime + timedelta(hours=tzoffset) if debug: print 'chanid "%s"' % chanid print 'utcstarttime "%s"' % utcstarttime rec = Recorded((chanid, utcstarttime), db=db) utcstarttime = rec.starttime starttime_datetime = utcstarttime # reformat 'starttime' for use with mythtranscode/HandBrakeCLI/mythcommflag starttime = str(utcstarttime.utcisoformat().replace(u':', '').replace( u' ', '').replace(u'T', '').replace('-', '')) if debug: print 'mythtv format starttime "%s"' % starttime input_filesize = rec.filesize if rec.commflagged: if debug: print 'Recording has been scanned to detect commerical breaks.' waititer = 1 keepWaiting = True while keepWaiting == True: keepWaiting = False for index, jobitem in reversed( list( enumerate( db.searchJobs(chanid=chanid, starttime=starttime_datetime)))): if jobitem.type == jobitem.COMMFLAG: # Commercial flagging job if debug: print 'Commercial flagging job detected with status %s' % jobitem.status if jobitem.status == jobitem.RUNNING: # status = RUNNING? job.update({'status':job.PAUSED, 'comment':'Waited %d secs for the commercial flagging job' % (waititer*POLL_INTERVAL) \ + ' currently running on this recording to complete.'}) if debug: print 'Waited %d secs for the commercial flagging job' % (waititer*POLL_INTERVAL) \ + ' currently running on this recording to complete.' time.sleep(POLL_INTERVAL) keepWaiting = True waititer = waititer + 1 break else: if debug: print 'Recording has not been scanned to detect/remove commercial breaks.' if require_commflagged: if jobid: job.update({ 'status': job.RUNNING, 'comment': 'Required commercial flagging for this file is not found.' + 'Flagging commercials and cancelling any queued commercial flagging.' }) # cancel any queued job to flag commercials for this recording and run commercial flagging in this script for index, jobitem in reversed( list( enumerate( db.searchJobs(chanid=chanid, starttime=starttime_datetime)))): if debug: if index == 0: print jobitem.keys() print index, jobitem.id, jobitem.chanid if jobitem.type == jobitem.COMMFLAG: # Commercial flagging job if jobitem.status == jobitem.RUNNING: # status = RUNNING? jobitem.cmds = jobitem.STOP # stop command from the frontend to stop the commercial flagging job #jobitem.setStatus(jobitem.CANCELLED) #jobitem.setComment('Cancelled: Transcode command ran commercial flagging for this recording.') jobitem.update({ 'status': jobitem.CANCELLED, 'comment': 'A user transcode job ran commercial flagging for' + ' this recording and cancelled this job.' }) if debug: print 'Flagging Commercials...' # Call "mythcommflag --chanid $CHANID --starttime $STARTTIME" task = System(path='mythcommflag', db=db) try: output = task('--chanid "%s"' % chanid, '--starttime "%s"' % starttime, '2> /dev/null') except MythError, e: # it seems mythcommflag always exits with an decoding error "eno: Unknown error 541478725 (541478725)" pass #print 'Command failed with output:\n%s' % e.stderr #if jobid: # job.update({'status':304, 'comment':'Flagging commercials failed'}) #sys.exit(e.retcode) sg = findfile('/' + rec.basename, rec.storagegroup, db=db) if sg is None: print 'Local access to recording not found.' sys.exit(1) infile = os.path.join(sg.dirname, rec.basename) #TODO: set overWrite to 0 if infile is m4v or mkv (already converted) #tmpfile = '%s.tmp' % infile.rsplit('.',1)[0] outtitle = rec.title.replace("&", "and") outtitle = re.sub('[^A-Za-z0-9 ]+', '', outtitle) filetype = 'm4v' #DEBUG CODE TO FIND OBJECT STRUCT: #print '{}'.format(dir(rec.getProgram())) #print '{}'.format(rec.getProgram().year) if usemkv == 1: filetype = 'mkv' #print '!{}!'.format(rec.programid[0:2]) if rec.season > 0 and rec.episode > 0: #if there are seasons and episode numbers in the recording data outtitle = '{0:s} S{1:d} E{2:02d}'.format(outtitle, rec.season, rec.episode) elif rec.programid[0:2] == 'MV' and str(rec.getProgram().year).isdigit( ): #if it's a movie and has an original air date for when it came out outtitle = '{} ({})'.format(outtitle, rec.getProgram().year) elif rec.programid[ 0: 2] == 'MV' and rec.originalairdate != None and rec.originalairdate > datetime.date( datetime(1, 1, 1, 0, 0) ): #if it's a movie and has an original air date for when it came out outtitle = '{} ({})'.format(outtitle, rec.originalairdate.year) elif 'Sports' in rec.category: #if it's sports outtitle = '{}-{}-{}'.format( outtitle, re.sub('[^A-Za-z0-9 ]+', '', rec.subtitle), str(rec.starttime.strftime("%Y%m%d"))) elif rec.programid[0:2] == 'SH' and (' News ' in rec.title or rec.category == 'News'): #if it's a news show outtitle = '{}-{}'.format(outtitle, str(rec.starttime.strftime("%Y%m%d"))) elif rec.originalairdate != None and rec.originalairdate > datetime.date( datetime(1, 1, 1, 0, 0)): #if it has an original air date outtitle = '{} {}'.format( outtitle, str(rec.originalairdate.strftime("%Y%m%d"))) else: outtitle = '{} {}'.format(outtitle, str(rec.starttime.strftime("%Y%m%d"))) outtitle = '{}.{}'.format(outtitle, filetype) outfile = os.path.join(sg.dirname, outtitle) tmpfile = '{}.{}'.format( outfile.rsplit('.', 1)[0], infile.rsplit('.', 1)[1]) if tmpfile == infile: tmpfile = '{}.tmp'.format(infile.rsplit('.', 1)[0]) if (overwrite == 0): # If not overwritting the file, use the export folder outfile = os.path.join(exportFolder, outtitle) if debug: print 'overwrite is 0. outfile "{}"'.format(outfile) if os.path.isfile(outfile) or infile == outfile: # If outfile exists already, create a new name for the file. outfile = '{}-{}.{}'.format( outfile.rsplit('.', 1)[0], str(rec.starttime.strftime("%Y%m%d")), filetype) if os.path.isfile(tmpfile): # If the infile and tmpfile are the same, create a new name for the tmpfile tmpfile = '{}-{}.tmp'.format( outfile.rsplit('.', 1)[0], str(rec.starttime.strftime("%Y%m%d"))) if os.path.isfile(tmpfile): # If tmp exists already, create a new name for the file. outfile = '{}-{}.tmp'.format( tmpfile.rsplit('.', 1)[0], str(rec.starttime.strftime("%Y%m%d"))) if debug: print 'tmp exists. outfile "{}"'.format(outfile) if debug: print 'infile "{}"'.format(infile) print 'tmpfile "{}"'.format(tmpfile) print 'outfile "{}"'.format(outfile) #add_metadata(db, jobid, debug, job, rec, filetype, tmpfile) clipped_bytes = 0 # If selected, create a cutlist to remove commercials via mythtranscode by running: # mythutil --gencutlist --chanid $CHANID --starttime $STARTTIME if generate_commcutlist: if jobid: job.update({ 'status': job.RUNNING, 'comment': 'Generating Cutlist for commercial removal' }) task = System(path='mythutil', db=db) try: output = task('--gencutlist', '--chanid "%s"' % chanid, '--starttime "%s"' % starttime) except MythError, e: print 'Command "mythutil --gencutlist" failed with output:\n%s' % e.stderr if jobid: job.update({ 'status': job.ERRORED, 'comment': 'Generation of commercial Cutlist failed' }) sys.exit(e.retcode)
class VIDEO: def __init__(self, opts, jobid=None): # Setup for the job to run if jobid: self.thisJob = Job(jobid) self.chanID = self.thisJob.chanid self.startTime = self.thisJob.starttime self.thisJob.update(status=Job.STARTING) # If no job ID given, must be a command line run else: self.thisJob = jobid self.chanID = opts.chanid self.startTime = opts.startdate + " " + opts.starttime + opts.offset self.opts = opts self.type = "none" self.db = MythDB() self.log = MythLog(module='Myth-Rec-to-Vid.py', db=self.db) # Capture the backend host name self.host = self.db.gethostname() # prep objects self.rec = Recorded((self.chanID,self.startTime), db=self.db) self.log(MythLog.GENERAL, MythLog.INFO, 'Using recording', '%s - %s' % (self.rec.title.encode('utf-8'), self.rec.subtitle.encode('utf-8'))) self.vid = Video(db=self.db).create({'title':'', 'filename':'', 'host':self.host}) self.bend = MythBE(db=self.db) def check_hash(self): self.log(self.log.GENERAL, self.log.INFO, 'Performing copy validation.') srchash = self.bend.getHash(self.rec.basename, self.rec.storagegroup) dsthash = self.bend.getHash(self.vid.filename, 'Videos') if srchash != dsthash: return False else: return True def copy(self): stime = time.time() srcsize = self.rec.filesize htime = [stime,stime,stime,stime] self.log(MythLog.GENERAL|MythLog.FILE, MythLog.INFO, "Copying myth://%s@%s/%s"\ % (self.rec.storagegroup, self.rec.hostname, self.rec.basename)\ +" to myth://Videos@%s/%s"\ % (self.host, self.vid.filename)) srcfp = self.rec.open('r') dstfp = self.vid.open('w') if self.thisJob: self.set_job_status(Job.RUNNING) tsize = 2**24 while tsize == 2**24: tsize = min(tsize, srcsize - dstfp.tell()) dstfp.write(srcfp.read(tsize)) htime.append(time.time()) rate = float(tsize*4)/(time.time()-htime.pop(0)) remt = (srcsize-dstfp.tell())/rate if self.thisJob: self.thisJob.setComment("%02d%% complete - %d seconds remaining" %\ (dstfp.tell()*100/srcsize, remt)) srcfp.close() dstfp.close() self.vid.hash = self.vid.getHash() self.log(MythLog.GENERAL|MythLog.FILE, MythLog.INFO, "Transfer Complete", "%d seconds elapsed" % int(time.time()-stime)) if self.thisJob: self.thisJob.setComment("Complete - %d seconds elapsed" % \ (int(time.time()-stime))) def copy_markup(self, start, stop): for mark in self.rec.markup: if mark.type in (start, stop): self.vid.markup.add(mark.mark, 0, mark.type) def copy_seek(self): for seek in self.rec.seek: self.vid.markup.add(seek.mark, seek.offset, seek.type) def delete_vid(self): self.vid.delete() def delete_rec(self): self.rec.delete() def dup_check(self): self.log(MythLog.GENERAL, MythLog.INFO, 'Processing new file name ', '%s' % (self.vid.filename)) self.log(MythLog.GENERAL, MythLog.INFO, 'Checking for duplication of ', '%s - %s' % (self.rec.title.encode('utf-8'), self.rec.subtitle.encode('utf-8'))) if self.bend.fileExists(self.vid.filename, 'Videos'): self.log(MythLog.GENERAL, MythLog.INFO, 'Recording already exists in Myth Videos') if self.thisJob: self.thisJob.setComment("Action would result in duplicate entry" ) return True else: self.log(MythLog.GENERAL, MythLog.INFO, 'No duplication found for ', '%s - %s' % (self.rec.title.encode('utf-8'), self.rec.subtitle.encode('utf-8'))) return False def get_dest(self): if self.type == 'TV': self.vid.filename = self.process_fmt(TVFMT) elif self.type == 'MOVIE': self.vid.filename = self.process_fmt(MVFMT) self.vid.markup._refdat = (self.vid.filename,) def get_meta(self): import_info = 'Listing only MetaData import complete' metadata = self.rec.exportMetadata() yrInfo = self.rec.getProgram() metadata['year'] = yrInfo.get('year') self.vid.importMetadata(metadata) if self.type == 'MOVIE': grab = VideoGrabber('Movie') results = grab.sortedSearch(self.rec.title) if len(results) > 0: for i in results: if i.year == yrInfo.get('year') and i.title == self.rec.get('title'): self.vid.importMetadata(i) match = grab.grabInetref(i.get('inetref')) length = len(match.people) for p in range(length-2): self.vid.cast.add(match.people[p].get('name')) self.vid.director = match.people[length - 1].get('name') import_info = 'Full MetaData Import complete' else: grab = VideoGrabber('TV') results = grab.sortedSearch(self.rec.title, self.rec.subtitle) if len(results) > 0: for i in results: if i.title == self.rec.get('title') and i.subtitle == self.rec.get('subtitle'): self.vid.importMetadata(i) match = grab.grabInetref(grab.grabInetref(i.get('inetref'), \ season=i.get('season'),episode=i.get('episode'))) length = len(match.people) for p in range(length-2): self.vid.cast.add(match.people[p].get('name')) self.vid.director = match.people[length - 1].get('name') import_info = 'Full MetaData Import complete' self.vid.category = self.rec.get('category') self.log(self.log.GENERAL, self.log.INFO, import_info) def get_type(self): if self.rec.seriesid != None and self.rec.programid[:2] != 'MV': self.type = 'TV' self.log(self.log.GENERAL, self.log.INFO, 'Performing TV type migration.') else: self.type = 'MOVIE' self.log(self.log.GENERAL, self.log.INFO, 'Performing Movie type migration.') def process_fmt(self, fmt): # replace fields from viddata ext = '.'+self.rec.basename.rsplit('.',1)[1] rep = ( ('%TITLE%','title','%s'), ('%SUBTITLE%','subtitle','%s'), ('%SEASON%','season','%d'), ('%SEASONPAD%','season','%02d'), ('%EPISODE%','episode','%d'), ('%EPISODEPAD%','episode','%02d'), ('%YEAR%','year','%s'), ('%DIRECTOR%','director','%s')) for tag, data, format in rep: if self.vid[data]: fmt = fmt.replace(tag,format % self.vid[data]) else: fmt = fmt.replace(tag,'') # replace fields from program data rep = ( ('%HOSTNAME%', 'hostname', '%s'), ('%STORAGEGROUP%','storagegroup','%s')) for tag, data, format in rep: data = getattr(self.rec, data) fmt = fmt.replace(tag,format % data) if len(self.vid.genre): fmt = fmt.replace('%GENRE%',self.vid.genre[0].genre) else: fmt = fmt.replace('%GENRE%','') return fmt+ext def set_job_status(self, status): self.thisJob.setStatus(status) def set_vid_hash(self): self.vid.hash = self.vid.getHash() def update_vid(self): self.vid.update()