def test_Dataheap_Job_002_01(self):
        """Test classmethod Job.fromProgram() from 'dataheap'.
        This test triggers a job to rebuild the the seek table of program.
        Note: I did not realized that this is possible, it is not mentioned
        in the wiki:
        You have to grep the source code for the flag 'JOB_REBUILD'.
        Additional note: The log does not say anything about rebuilding the seektable.
        """
        chanid        = self.testenv['DOWNCHANID']
        starttimemyth = self.testenv['DOWNSTARTTIME']

        hostname = self.mydb.getMasterBackend()
        rec = Recorded((chanid, starttimemyth), db = self.mydb)
        prgrm = rec.getProgram()
        self.assertEqual(RECSTATUS.rsRecorded, prgrm.recstatus)

        myjob = Job.fromProgram(prgrm, JOBTYPE.COMMFLAG, hostname=hostname,
                                flags=JOBFLAG.REBUILD)

        loopnr = 0
        while (myjob.status < JOBSTATUS.FINISHED):
            time.sleep(10)
            myjob._pull()     # this re-reads the jobqueue table
            loopnr += 1
            if (loopnr > 60):
                break

        self.assertEqual(myjob.status, JOBSTATUS.FINISHED)

        # test '__repr__' and '__str__'
        print()
        print(repr(myjob))
        print(str(myjob))
    def test_repr_002_01(self):
        """
        Test '__repr__' and '__str__' methods from dataheap.py
        """

        chanid = self.testenv['RECCHANID']
        starttimemyth = self.testenv['RECSTARTTIMEMYTH']
        title = self.testenv['RECTITLE']

        c_s = (chanid, starttimemyth)

        db = MythDB()
        # Recorded class
        r = Recorded(c_s, db=db)
        print()
        print(repr(r))
        print(str(r))

        # Markup table
        m = r.markup
        print()
        print(repr(m))
        print(str(m))
        # one entry of the marlup table:
        print()
        print(repr(m[0]))
        print(str(m[0]))
        # one value of a single entry:
        print()
        print(repr(m[0].mark))
        print(str(m[0].mark))
        print()
        print(repr(m[0].data))
        print(str(m[0].data))

        # Artwork
        a = r.artwork
        print()
        print(repr(a))
        print(str(a))

        # Coverart of an Artwork
        ac = a.coverart
        print()
        print(repr(ac))
        print(str(ac))

        # Program of the recorded entry:
        prgrm = r.getRecordedProgram()
        print()
        print(repr(prgrm))
        print(str(prgrm))

        # OldRecorded of the recorded entry:
        oldrecs = db.searchOldRecorded(chanid=chanid, title=title)
        oldrec = next(oldrecs)
        print()
        print(repr(oldrec))
        print(str(oldrec))
    def __init__(self, opts, jobid=None):
        if jobid:
            self.job = Job(jobid)
            self.chanid = self.job.chanid
            self.starttime = self.job.starttime
            self.job.update(status=Job.STARTING)
        else:
            self.job = None
            self.chanid = opts.chanid
            self.starttime = opts.starttime

        self.opts = opts
        self.db = MythDB()
        self.log = MythLog(module='mythvidexport.py', db=self.db)

        # load setting strings
        self.get_format()

        # prep objects
        self.rec = Recorded((self.chanid, self.starttime), db=self.db)
        self.log(
            MythLog.GENERAL, MythLog.INFO, 'Using recording',
            '%s - %s' % (self.rec.title.encode('utf-8'),
                         self.rec.subtitle.encode('utf-8')))
        self.vid = Video(db=self.db).create({
            'title': '',
            'filename': '',
            'host': gethostname()
        })

        # process data
        self.get_meta()
        self.get_dest()
        # bug fix to work around limitation in the bindings where DBDataRef classes
        # are mapped to the filename at time of Video element creation. since the
        # filename is specified as blank when the video is created, the markup
        # handler is not properly initialized
        self.vid.markup._refdat = (self.vid.filename, )

        # save file
        self.copy()
        if opts.seekdata:
            self.copy_seek()
        if opts.skiplist:
            self.copy_markup(static.MARKUP.MARK_COMM_START,
                             static.MARKUP.MARK_COMM_END)
        if opts.cutlist:
            self.copy_markup(static.MARKUP.MARK_CUT_START,
                             static.MARKUP.MARK_CUT_END)
        self.vid.update()

        # delete old file
        if opts.delete:
            self.rec.delete()
Exemple #4
0
    def test_Dataheap_Recorded_001_03(self):
        """Test method 'getRecordedProgram()' in class 'Recorded' from 'dataheap'.
        """
        chanid        = self.testenv['RECCHANID']
        starttimeutc  = self.testenv['RECSTARTTIMEUTC']
        starttimemyth = self.testenv['RECSTARTTIMEMYTH']
        title         = self.testenv['RECTITLE']
        basename      = self.testenv['RECBASENAME']
        recordedid    = self.testenv['RECRECORDID']
        inetref       = self.testenv['RECINETREF']

        rec = Recorded((chanid, starttimemyth), db = self.mydb)
        prgrm = rec.getRecordedProgram()
        self.assertEqual(prgrm.chanid, int(chanid))
Exemple #5
0
    def test_Dataheap_Recorded_001_04(self):
        """Test method 'formatPath()' in class 'Recorded' from 'dataheap'.
        """
        chanid        = self.testenv['RECCHANID']
        starttimeutc  = self.testenv['RECSTARTTIMEUTC']
        starttimemyth = self.testenv['RECSTARTTIMEMYTH']
        title         = self.testenv['RECTITLE']
        basename      = self.testenv['RECBASENAME']
        recordedid    = self.testenv['RECRECORDID']
        inetref       = self.testenv['RECINETREF']

        rec = Recorded((chanid, starttimemyth), db = self.mydb)
        formatpath = rec.formatPath("%U/%T/%pY-%pm-%pd %pH.%pi %T")
        ###print(rec.formatPath("%U/%T/%pY-%pm-%pd %pH.%pi %T"))
        self.assertTrue(title in formatpath)
Exemple #6
0
    def test_Dataheap_Recorded_001_05(self):
        """Test method 'exportMetadata()' in class 'Recorded' from 'dataheap'.
        """
        chanid        = self.testenv['RECCHANID']
        starttimeutc  = self.testenv['RECSTARTTIMEUTC']
        starttimemyth = self.testenv['RECSTARTTIMEMYTH']
        title         = self.testenv['RECTITLE']
        basename      = self.testenv['RECBASENAME']
        recordedid    = self.testenv['RECRECORDID']
        inetref       = self.testenv['RECINETREF']

        rec = Recorded((chanid, starttimemyth), db = self.mydb)
        metadata = rec.exportMetadata()
        self.assertTrue(isinstance(metadata, VideoMetadata))
        self.assertEqual(metadata.inetref, inetref)
    def __init__(self, opts, jobid=None):                           
        
        # Setup for the job to run
        if jobid:
            self.thisJob = Job(jobid)
            self.chanID = self.thisJob.chanid
            self.startTime = self.thisJob.starttime
            self.thisJob.update(status=Job.STARTING)
        
        # If no job ID given, must be a command line run
        else:
            self.thisJob = jobid
            self.chanID = opts.chanid
            self.startTime = opts.startdate + " " + opts.starttime + opts.offset
        self.opts = opts
        self.type = "none"
        self.db = MythDB()
        self.log = MythLog(module='Myth-Rec-to-Vid.py', db=self.db)
        

        # Capture the backend host name
        self.host = self.db.gethostname()

        # prep objects
        self.rec = Recorded((self.chanID,self.startTime), db=self.db)
        self.log(MythLog.GENERAL, MythLog.INFO, 'Using recording',
                        '%s - %s' % (self.rec.title.encode('utf-8'), 
                                     self.rec.subtitle.encode('utf-8')))

        self.vid = Video(db=self.db).create({'title':'', 'filename':'',
                                             'host':self.host})

        self.bend = MythBE(db=self.db)
Exemple #8
0
def transcode(jobid=None, chanid=None, starttime=None):
        ' connect to mythtv database '
        db = MythDB()
        be = MythBE(db=db)

	logging.info("start transcode")

	if jobid:
		job = Job(jobid, db=db)
		chanid = job.chanid
		starttime = job.starttime
		logging.info("%s" % jobid)
		logging.info("%s" % chanid)
		logging.info("%s" % starttime)
	rec = Recorded((chanid, starttime), db=db)
	' TODO: get the starttime into the correct format (from 20150827014200 to 2015-08-27 00:42:00-06:00)'

	' loop all files in lib_dir that are symlinks and find the one that matches this recording '
        for ld in LIBDIR:
                for dp, dn, files in os.walk(ld):
                        for file in files:
                                filepath = os.path.join(dp,file)
                                if (os.path.islink(filepath)):
                                        logging.debug("%s -> %s" % (filepath, os.readlink(filepath)))

	' do the transode '

	' update the database for the new file name '

	' update the symlink for the new file name '
    def test_Dataheap_Job_002_02(self):
        """Test exception of classmethod Job.fromProgram() from 'dataheap'.
        """
        chanid        = self.testenv['DOWNCHANID']
        starttimemyth = self.testenv['DOWNSTARTTIME']

        hostname = self.mydb.getMasterBackend()
        rec = Recorded((chanid, starttimemyth), db = self.mydb)
        prgrm = rec.getProgram()
        self.assertEqual(RECSTATUS.rsRecorded, prgrm.recstatus)

        # set recstatus to 'rsUnknown' and capture the error:
        prgrm.recstatus = RECSTATUS.rsUnknown
        with self.assertRaises(MythError):
            myjob = Job.fromProgram(prgrm, JOBTYPE.COMMFLAG,
                                    hostname=hostname, flags=JOBFLAG.REBUILD)
def runjob(jobid=None, chanid=None, starttime=None):
    db = MythDB()
    if jobid:
        job = Job(jobid, db=db)
        chanid = job.chanid
        starttime = job.starttime
    rec = Recorded((chanid, starttime), db=db)

    sg = findfile(rec.basename, rec.storagegroup, db=db)
    if sg is None:
        print 'Local access to recording not found.'
        sys.exit(1)

    infile = os.path.join(sg.dirname, rec.basename)
    outfile = '%s.mkv' % infile.rsplit('.',1)[0]

    task = Grabber(path=transcoder)
    try:
##############################################
#### probably need to adjust this one too ####
        task.command('"%s"' % infile,
                     '"%s"' % outfile)
##############################################
    except MythError, e:
        print 'Command failed with output:\n%s' % e.stderr
        sys.exit(e.returncode)
Exemple #11
0
def getjob(jobid=None, chanid=None, starttime=None):
    db = MythDB()
    if jobid:
        job = Job(jobid, db=db)
        chanid = job.chanid
        starttime = job.starttime
    return Recorded((chanid, starttime), db=db)
 def parse_args(self):
     """
     """
     self.args = self.parser.parse_args()
     self.jobid = self.args.jobid
     self.job = Job(self.jobid)
     self.verify_job()
     self.recorded = Recorded((self.job.chanid,self.job.starttime))
Exemple #13
0
    def __init__(self, opts, jobid=None):
        if jobid:
            self.job = Job(jobid)
            self.chanid = self.job.chanid
            self.starttime = self.job.starttime
            self.job.update(status=3)
        else:
            self.job = None
            self.chanid = opts.chanid
            self.starttime = opts.starttime

        self.opts = opts
        self.db = MythDB()
        self.log = MythLog(module='mythvidexport.py', db=self.db)

        # load setting strings
        self.get_format()

        # prep objects
        self.rec = Recorded((self.chanid, self.starttime), db=self.db)
        self.log(MythLog.IMPORTANT, 'Using recording',
                 '%s - %s' % (self.rec.title, self.rec.subtitle))
        self.vid = Video(db=self.db).create({
            'title': '',
            'filename': '',
            'host': gethostname()
        })

        # process data
        self.get_meta()
        self.get_dest()

        # save file
        self.copy()
        if opts.seekdata:
            self.copy_seek()
        if opts.skiplist:
            self.copy_markup(static.MARKUP.MARK_COMM_START,
                             static.MARKUP.MARK_COMM_END)
        if opts.cutlist:
            self.copy_markup(static.MARKUP.MARK_CUT_START,
                             static.MARKUP.MARK_CUT_END)
        self.vid.update()
    def __init__(self, opts, jobid=None):
        if jobid:
            self.job = Job(jobid)
            self.chanid = self.job.chanid
            self.starttime = self.job.starttime
            self.job.update(status=Job.STARTING)
        else:
            self.job = None
            self.chanid = opts.chanid
            self.starttime = opts.starttime

        self.opts = opts
        self.db = MythDB()
        self.log = MythLog(module='mythvidexport.py', db=self.db)

        # load setting strings
        self.get_format()

        # prep objects
        self.rec = Recorded((self.chanid,self.starttime), db=self.db)
        self.log(MythLog.GENERAL, MythLog.INFO, 'Using recording',
                        '%s - %s' % (self.rec.title.encode('utf-8'), 
                                     self.rec.subtitle.encode('utf-8')))
        self.vid = Video(db=self.db).create({'title':'', 'filename':'',
                                             'host':gethostname()})

        # process data
        self.get_meta()
        self.get_dest()
        # bug fix to work around limitation in the bindings where DBDataRef classes
        # are mapped to the filename at time of Video element creation. since the
        # filename is specified as blank when the video is created, the markup
        # handler is not properly initialized
        self.vid.markup._refdat = (self.vid.filename,)

        # save file
        self.copy()
        if opts.seekdata:
            self.copy_seek()
        if opts.skiplist:
            self.copy_markup(static.MARKUP.MARK_COMM_START,
                             static.MARKUP.MARK_COMM_END)
        if opts.cutlist:
            self.copy_markup(static.MARKUP.MARK_CUT_START,
                             static.MARKUP.MARK_CUT_END)
        self.vid.update()

        # delete old file
        if opts.delete:
            self.rec.delete()
Exemple #15
0
def runjob(jobid=None, chanid=None, starttime=None):
    db = MythDB()
    if jobid:
        job = Job(jobid, db=db)
        chanid = job.chanid
        starttime = job.starttime
    rec = Recorded((chanid, starttime), db=db)

    timestr = time.strftime("%m-%d-%y %H:%M:%S")
    title_san = re.sub("\s", ".", rec.title)
    print title_san
    try:
        os.mkdir(log_dir)
    except OSError, e:
        pass
Exemple #16
0
    def __init__(self, opts, jobid=None):

        # Setup for the job to run
        if jobid:
            self.thisJob = Job(jobid)
            self.chanID = self.thisJob.chanid
            self.startTime = self.thisJob.starttime
            self.thisJob.update(status=Job.STARTING)

        # If no job ID given, must be a command line run
        else:
            self.thisJob = jobid
            self.chanID = opts.chanid
            self.startTime = opts.startdate + " " + opts.starttime + opts.offset
        self.opts = opts
        self.type = "none"
        self.db = MythDB()
        self.log = MythLog(module='Myth-Rec-to-Vid.py', db=self.db)

        # Capture the backend host name
        self.host = self.db.gethostname()

        # prep objects
        self.rec = Recorded((self.chanID, self.startTime), db=self.db)
        self.log(
            MythLog.GENERAL, MythLog.INFO, 'Using recording',
            '%s - %s' % (self.rec.title.encode('utf-8'),
                         self.rec.subtitle.encode('utf-8')))

        self.vid = Video(db=self.db).create({
            'title': '',
            'filename': '',
            'host': self.host
        })

        self.bend = MythBE(db=self.db)
Exemple #17
0
    def test_Dataheap_Recorded_001_01(self):
        """Test property 'artwork' in class 'Recorded' from 'dataheap'.
        """
        chanid        = self.testenv['RECCHANID']
        starttimeutc  = self.testenv['RECSTARTTIMEUTC']
        starttimemyth = self.testenv['RECSTARTTIMEMYTH']
        title         = self.testenv['RECTITLE']
        basename      = self.testenv['RECBASENAME']
        recordedid    = self.testenv['RECRECORDID']
        inetref       = self.testenv['RECINETREF']

        rec = Recorded((chanid, starttimemyth), db = self.mydb)
        artwork = rec.artwork
        self.assertTrue(isinstance(artwork, RecordedArtwork))
        self.assertTrue(isinstance(artwork.coverart, Artwork))
    def test_Methodheap_MythDB_002_searchOldRecorded_03(self):
        """Test 'searchOldRecorded' method from MythTV.MythDB()
           via 'chanid' and 'starttime'-
        """

        # get the 'progstart' time of the 'recorded' table:
        rec = Recorded((int(self.testenv['RECCHANID']), int(self.testenv['RECSTARTTIMEMYTH'])), db = self.mydb)
        starttime_utc_iso = rec.progstart.utcisoformat() +"Z"
        # use 'progstart' as 'starttime' in the oldrecored table, like '2019-03-05T12:55:00Z':
        recs  =  self.mydb.searchOldRecorded( chanid    = self.testenv['RECCHANID']
                                            , starttime = starttime_utc_iso
                                            )
        rec02 =  next(recs)
        self.assertTrue(isinstance(rec02, OldRecorded))
        # check if accessing a property works
        self.assertTrue(rec02.recordid == int(self.testenv['RECRECORDID']))
Exemple #19
0
def runjob(jobid=None, chanid=None, starttime=None):
    db = MythDB()
    if jobid:
        job = Job(jobid, db=db)
        chanid = job.chanid
        starttime = job.starttime
    else:
        starttime = datetime.strptime(str(starttime),
                                      "%Y%m%d%H%M%S") + timedelta(hours=-5)
    rec = Recorded((chanid, starttime), db=db)

    timestr = time.strftime("%m-%d-%y %H:%M:%S")
    title_san = re.sub("\s", ".", rec.title)
    print title_san
    try:
        os.mkdir(log_dir)
    except OSError, e:
        pass
Exemple #20
0
    def __init__(self, opts, jobid=None):
        if jobid:
            self.job = Job(jobid)
            self.chanid = self.job.chanid
            self.starttime = self.job.starttime
            self.job.update(status=3)
        else:
            self.job = None
            self.chanid = opts.chanid
            self.starttime = opts.starttime

        self.opts = opts
        self.db = MythDB()
        self.log = MythLog(module='mythvidexport.py', db=self.db)

        # load setting strings
        self.get_format()

        # prep objects
        self.rec = Recorded((self.chanid, self.starttime), db=self.db)
        self.log(MythLog.IMPORTANT, 'Using recording',
                        '%s - %s' % (self.rec.title, self.rec.subtitle))
        self.vid = Video(db=self.db).create({'title':'', 'filename':'', 'host':gethostname()})

        # process data
        self.get_meta()
        self.get_dest()
        # kludgy fix for issue with altered filenames in the bindings
        self.vid.markup._refdat = (self.vid.filename,)

        # save file
        self.copy()
        if opts.seekdata:
            self.copy_seek()
        if opts.skiplist:
            self.copy_markup(static.MARKUP.MARK_COMM_START,
                             static.MARKUP.MARK_COMM_END)
        if opts.cutlist:
            self.copy_markup(static.MARKUP.MARK_CUT_START,
                             static.MARKUP.MARK_CUT_END)
        self.vid.update()
def runjob(jobid=None, chanid=None, starttime=None):
    db = MythDB()
    if jobid:
        job = Job(jobid, db=db)
        chanid = job.chanid
        starttime = job.starttime
    rec = Recorded((chanid, starttime), db=db)

    sg = findfile('/' + rec.basename, rec.storagegroup, db=db)
    if sg is None:
        print 'Local access to recording not found.'
        sys.exit(1)

    infile = os.path.join(sg.dirname, rec.basename)
    tmpfile = '%s.tmp' % infile.rsplit('.', 1)[0]
    outfile = '%s.mp4' % infile.rsplit('.', 1)[0]

    # reformat 'starttime' for use with mythtranscode/ffmpeg/mythcommflag
    starttime = str(starttime.utcisoformat().replace(u':', '').replace(
        u' ', '').replace(u'T', '').replace('-', ''))

    # Lossless transcode to strip cutlist
    if rec.cutlist == 1:
        if jobid:
            job.update({'status': 4, 'comment': 'Removing Cutlist'})

        task = System(path='mythtranscode', db=db)
        try:
            output = task('--chanid "%s"' % chanid,
                          '--starttime "%s"' % starttime, '--mpeg2',
                          '--honorcutlist', '-o "%s"' % tmpfile,
                          '2> /dev/null')
        except MythError, e:
            print 'Command failed with output:\n%s' % e.stderr
            if jobid:
                job.update({
                    'status': 304,
                    'comment': 'Removing Cutlist failed'
                })
            sys.exit(e.retcode)
#!/usr/bin/env python

"""
Provides user job base class to be used to build user job scripts.
db = MythDB()
j = Job(2353)
Recorded((j.get('chanid'),j.get('starttime')))
"""

from MythTV import MythDB
from MythTV import Job
from MythTV import Recorded
from argparse import ArgumentParser


class UserJob(object):
    """
    When subclassing, always call super(SubClassName,self).method() early in any overridden methods.
    
    Subclasses must provide: 
        <something>
            <purpose of something>
        <something2>
            <purpose of something2>

    Subclasses may provide:
        <something_else>
            <purpose of something_else>
        <something_else2>
            <purpose of something_else2>
Exemple #23
0
    def test_Dataheap_Recorded_003_01(self):
        """Test creation of a Recoreded and
           writing/reading to the 'recordedcredits' table.
           it tests the entries of the 'people' table as well.
           UUT: class DBDataCRef
        """
        class People(DBDataWrite):
            """
            People(data=None, db=None) --> People object to
            database table 'people', data is a `name` string.

            - get information about the table:
              $ mysql -h <master-backend-ip> -u mythtv -p<password-from-config.xml> mythconverg

              MariaDB [mythconverg]> describe people;
                +-------------+-----------------------+------+-----+---------+----------------+
                | Field       | Type                  | Null | Key | Default | Extra          |
                +-------------+-----------------------+------+-----+---------+----------------+
                | person      | mediumint(8) unsigned | NO   | PRI | NULL    | auto_increment |
                | name        | varchar(128)          | NO   | UNI |         |                |
                +-------------+-----------------------+------+-----+---------+----------------+
                2 rows in set (0.00 sec)

            """
            _table = 'people'
            _key = ['name']

            _defaults = {u'name': ''}

            ### end class People

        # a recording with french accents in the cast

        title = self.testenv[
            'RECFRTITLE']  # "Le Dernier Métro", "Die letzte Metro"
        chanid = self.testenv['RECFRCHANID']
        starttimemyth = self.testenv['RECFRSTARTTIMEMYTH']

        #print(title)

        castlist = [(u'Catherine Deneuve', u'actor'),
                    (u"Gérard Depardieu", u'actor'),
                    (u"Andréa Ferréol", u'actor'), (u"Jean Poiret", 'actor'),
                    (u"François Truffaut", u'director')]

        # ensure, that "Jean Poiret" is already in the 'people' table
        try:
            p = People(u"Jean Poiret", db=self.mydb)
        except:
            p = People(db=self.mydb).create({'name': u"Jean Poiret"})
            p.update()

        # get a recording, search for the title
        recs = self.mydb.searchRecorded(title=title)
        rec = next(recs)
        self.assertEqual(rec.chanid, int(chanid))

        # backup the cast of this recording
        org_cast = rec.cast

        # add castlist to cast
        for c in castlist:
            rec.cast.add(*c)  # need to de-reference the tuple
        print(rec.cast)
        #sys.exit(1)
        rec.update()

        # check again if the members of the cast are listed
        # in the 'people' table
        cast_found = False
        for c in castlist:
            try:
                cname = People(c[0])
                cast_found = True
            except:
                pass
        # now cast should be listed in the people table
        self.assertTrue(cast_found)

        # get the len of the rec.casts
        c1_length = len(rec.cast)
        # delete on entry
        rec.cast.delete(*castlist[2])
        rec.update()
        self.assertEqual(c1_length - 1, len(rec.cast))

        # delete all entries
        rec.cast.clean()  # this does a commit as well
        self.assertEqual(len(rec.cast), 0)

        # add the previously saved cast back
        # to a new instance of that recording
        recn = Recorded((rec.chanid, rec.starttime), db=self.mydb)
        for cs in org_cast:
            recn.cast.add(cs)
        recn.update

        self.assertEqual(len(recn.cast), len(org_cast))

        p.delete()
        p.update()
Exemple #24
0
    def test_Dataheap_Recorded_001_06(self):
        """Test method 'Recorded.importMetadata()' and 'Recorded.update()'
           in class 'Recorded' from 'dataheap'.
           Test Case:
           - get a recording
           - save the 'stars' value of the recording for later use
           - save the dictdata of the recording for later use
           - export the metadata to xml and save for later use
           - check xml metatdata structure for the 'stars' i.e. 'userrating' value
           - change the 'stars' value and save it for later use
           - update (save to database) the recording with the new 'stars' value
           - get the recording again to a new instance
           - check the updated 'stars' value
           - import the saved metadata back to the reocrding
           - check the reverted 'stars' value
           - check that the dictionary from the new Recorded instance is compatible to the original one:
           - update Recorded.stars to the original value
           - check for correct value of stars in final instance of Recoreded
        """
        chanid        = self.testenv['RECCHANID']
        starttimeutc  = self.testenv['RECSTARTTIMEUTC']
        starttimemyth = self.testenv['RECSTARTTIMEMYTH']
        title         = self.testenv['RECTITLE']
        basename      = self.testenv['RECBASENAME']
        recordedid    = self.testenv['RECRECORDID']
        inetref       = self.testenv['RECINETREF']
        stars         = self.testenv['RECSTARS']

        # Update database in case of any errors from previous test runs
        reczero= Recorded((chanid, starttimemyth), db = self.mydb)
        reczero.stars = stars
        reczero.update()

        rec = Recorded((chanid, starttimemyth), db = self.mydb)
        # save the 'stars' value i.e. 'userrating'
        recstars = rec.stars
        self.assertEqual("%.1f" %recstars, stars)
        # Recorded._origdata holds the dictionary pulled from database
        recdict = {}
        for key, value in rec._origdata.items():
            if isinstance(value, datetime):
                recdict[key] = value.mythformat()
            else:
                recdict[key] = value
        # export the metadata to xml and save for later use
        recmd = rec.exportMetadata()
        recmdxml = recmd.toXML()
        # check xml metadata structure for the 'stars' i.e. 'userrating' value
        # see https://www.mythtv.org/wiki/MythTV_Universal_Metadata_Format
        tree = recmdxml.getroottree()
        ### pprint(tree)    # lxml stuff
        recmdxml_stars = next(tree.iter('userrating')).text
        self.assertEqual("%.1f" %float(recmdxml_stars), stars)
        # change the 'stars' value and save it for later use
        rec.stars += 0.1
        recstars_updated = rec.stars
        # update (save to database) the recording with the new 'stars' value
        rec.update()
        # get the recording again to a new instance
        recnew = Recorded((chanid, starttimemyth), db = self.mydb)
        # check the updated 'stars' value
        self.assertEqual(recnew.stars, recstars_updated)
        # import the saved metadata back to the reocrding
        # Note: Recorded.importMetadata() make an implicit Recorded.update()
        recnew.importMetadata(recmd, overwrite=True)
        # check the reverted 'stars' value
        self.assertEqual("%.1f" %recnew.stars, stars)
        # check that the dictionary from the new Recorded instance is compatible to the original one:
        for key, value in recdict.items():
            if isinstance(recnew._origdata[key], datetime):
                # don't act on 'lastmodified' entry, because we changed the rec in between:
                if key != 'lastmodified':
                    self.assertEqual(recdict[key], recnew._origdata[key].mythformat())

        self.assertEqual(len(recdict), len(recnew._origdata))
        # update Recorded.stars to the original value
        recnew.stars = recstars
        recnew.update()
        # check for correct value of stars in final instance of Recoreded
        reclast = Recorded((chanid, starttimemyth), db = self.mydb)
        self.assertEqual("%.1f" %reclast.stars, stars)
Exemple #25
0
    def test_Dataheap_Recorded_002_02(self):
        """Test creation of a Recoreded and
           writing/reading to the 'recordedcredits' table.
           it tests the entries of the 'people' table as well.
           UUT: class DBDataCRef
        """
        """
        $ python - --nodblog --loglevel debug --verbose all --logfile /tmp/my_logfile
        Python 2.7.15+ (default, Oct  7 2019, 17:39:04)
        [GCC 7.4.0] on linux2
        Type "help", "copyright", "credits" or "license" for more information.
        >>> from MythTV import MythDB, Video, Recorded
        >>> d = MythDB()
        _initlogger call
        _parseinput call
        >>> rec =d.searchRecorded(title = 'Die letzte Metro')
        >>> r = next(rec)
        >>> r
        b'<Recorded 'Die letzte Metro','2014-10-16 22:16:00+02:00' at 0x7f96bde242a0>'
        >>> r.cast
        []
        >>> r.cast._refdat
        [11301L, datetime(2014, 10, 16, 20, 18, 21)]
        >>> r.cast._datfields
        [u'name', u'role']

        >>> r.cast.add('Catherine Deneuve', 'actor')
        >>> r.cast
        [[(u'name', 'Catherine Deneuve'), (u'role', 'actor')]]
        >>> r.cast.add(u"Gérard Depardieu", 'actor')
        >>> r.cast.add(u"Andréa Ferréol", 'actor')
        >>> r.cast.add(u"François Truffaut", 'director')
        >>> r.cast
        [[(u'name', 'Catherine Deneuve'), (u'role', 'actor')], [(u'name', u'G\xe9rard Depardieu'), (u'role', 'actor')], [(u'name', u'Andr\xe9a Ferr\xe9ol'), (u'role', 'actor')], [(u'name', u'Fran\xe7ois Truffaut'), (u'role', 'director')]]
        >>> r.update()

        >>> print(r.cast[1]['name'])
        Gérard Depardieu


        >>> r.cast.add(u"Jean Poiret", 'actor')
        >>> r.cast.add(u"Jean-Louis Richard", 'actor')
        >>> r.cast
        [[(u'name', 'Catherine Deneuve'), (u'role', 'actor')], [(u'name', u'G\xe9rard Depardieu'), (u'role', 'actor')], [(u'name', u'Andr\xe9a Ferr\xe9ol'), (u'role', 'actor')], [(u'name', u'Fran\xe7ois Truffaut'), (u'role', 'director')], [(u'name', u'Jean Poiret'), (u'role', 'actor')], [(u'name', u'Jean-Louis Richard'), (u'role', 'actor')]]
        >>> r.update()


        >>> r1 = Recorded((r.chanid, r.starttime), db =d)
        >>> r1
        b'<Recorded 'Die letzte Metro','2014-10-16 22:16:00+02:00' at 0x7f96bde2d868>'
        >>> r1.cast
        [[(u'name', u'Catherine Deneuve'), (u'role', u'actor')], [(u'name', u'G\xe9rard Depardieu'), (u'role', u'actor')], [(u'name', u'Andr\xe9a Ferr\xe9ol'), (u'role', u'actor')], [(u'name', u'Fran\xe7ois Truffaut'), (u'role', u'director')], [(u'name', u'Jean Poiret'), (u'role', u'actor')], [(u'name', u'Jean-Louis Richard'), (u'role', u'actor')]]
        >>> r1.cast.delete(u'Jean-Louis Richard', u'actor')
        >>> r1.cast
        [[(u'name', u'Catherine Deneuve'), (u'role', u'actor')], [(u'name', u'G\xe9rard Depardieu'), (u'role', u'actor')], [(u'name', u'Andr\xe9a Ferr\xe9ol'), (u'role', u'actor')], [(u'name', u'Fran\xe7ois Truffaut'), (u'role', u'director')], [(u'name', u'Jean Poiret'), (u'role', u'actor')]]
        >>> r1.update()
        >>> r1.cast
        [[(u'name', u'Catherine Deneuve'), (u'role', u'actor')], [(u'name', u'G\xe9rard Depardieu'), (u'role', u'actor')], [(u'name', u'Andr\xe9a Ferr\xe9ol'), (u'role', u'actor')], [(u'name', u'Fran\xe7ois Truffaut'), (u'role', u'director')], [(u'name', u'Jean Poiret'), (u'role', u'actor')]]


        Attention: Recorded.cast.delete() deletes the entries in the 'people' table as well !!

        >>> r1.cast.delete(u"Jean Poiret", 'actor')
        >>> r1.update()
        >>> r1.cast.delete(u"François Truffaut", 'director')
        >>> r1.update()
        >>> r1.cast
        [[(u'name', u'Catherine Deneuve'), (u'role', u'actor')], [(u'name', u'G\xe9rard Depardieu'), (u'role', u'actor')], [(u'name', u'Andr\xe9a Ferr\xe9ol'), (u'role', u'actor')]]


        """
        class People(DBData):
            """
            People(data=None, db=None) --> People object to
            database table 'people', data is a `name` string.

            - get information about the table:
              $ mysql -h <master-backend-ip> -u mythtv -p<password-from-config.xml> mythconverg

              MariaDB [mythconverg]> describe people;
                +-------------+-----------------------+------+-----+---------+----------------+
                | Field       | Type                  | Null | Key | Default | Extra          |
                +-------------+-----------------------+------+-----+---------+----------------+
                | person      | mediumint(8) unsigned | NO   | PRI | NULL    | auto_increment |
                | name        | varchar(128)          | NO   | UNI |         |                |
                +-------------+-----------------------+------+-----+---------+----------------+
                2 rows in set (0.00 sec)

            """
            _table = 'people'
            _key = ['name']

            ### end class Person

        # a recording with french accents in the cast

        title = self.testenv['RECFRTITLE']  # "Le Dernier Métro"
        chanid = self.testenv['RECFRCHANID']
        starttimemyth = self.testenv['RECFRSTARTTIMEMYTH']

        print(title)

        castlist = [(u'Catherine Deneuve', u'actor'),
                    (u"Gérard Depardieu", u'actor'),
                    (u"Andréa Ferréol", u'actor'),
                    (u"François Truffaut", u'director')]

        # get a recording, search for the title
        recs = self.mydb.searchRecorded(title=title)
        rec = next(recs)
        self.assertEqual(rec.chanid, int(chanid))

        # backup the cast of this recording
        org_cast = rec.cast

        #         ## backup the people table
        #         #org_people = People(db=self.mydb)

        #         # check if entries in castlist does not occur in cast
        #         for name,role in castlist:
        #             print(name)
        #             print(role)

        # #            if  in rec.cast:
        # #                rec.cast.delete(*c)      # need to dereference the tuple
        #         sys.exit(1)
        #         rec.update()
        #         # remember length
        #         cast_length = len(rec.cast)
        #         # check if the members of the cast are listed
        #         # in the 'people' table
        #         cast_found = False
        #         for c in castlist:
        #             try:
        #                 cname = People(c[0])
        #                 cast_found = True
        #             except:
        #                 pass
        #         # cast should no be listed in the people table
        #         self.assertFalse(cast_found)

        # add castlist to cast
        for c in castlist:
            rec.cast.add(*c)  # need to dereference the tuple
        print(rec.cast)
        #sys.exit(1)
        rec.update()

        # check again if the members of the cast are listed
        # in the 'people' table
        cast_found = False
        for c in castlist:
            try:
                cname = People(c[0])
                cast_found = True
            except:
                pass
        # now cast should be listed in the people table
        self.assertTrue(cast_found)

        # get the len of the rec.casts
        c1_length = len(rec.cast)
        # delete on entry
        rec.cast.delete(*castlist[2])
        rec.update()
        self.assertEqual(c1_length - 1, len(rec.cast))

        # delete all entries
        rec.cast.clean()  # this does a commit as well
        self.assertEqual(len(rec.cast), 0)

        # add the previously saved cast back
        # to a new instance of that recording
        recn = Recorded((rec.chanid, rec.starttime), db=self.mydb)
        for cs in org_cast:
            recn.cast.add(cs)
        recn.update

        self.assertEqual(len(recn.cast), len(org_cast))
Exemple #26
0
    def test_Dataheap_Recorded_002_01(self):
        """Test creation of a Recoreded and
           writing/reading to the 'recordedrating' table.
           UUT: class DBDataRef
           Caution: recn.update() does not delete a removed entry from the 'recordedrating' table !
           Only recn.rating.clean() removes all entries.
        """

        chanid = self.testenv['DOWNCHANID']
        starttimemyth = self.testenv['DOWNSTARTTIME']

        rec = Recorded((chanid, starttimemyth), db=self.mydb)

        # Recorded.rating is a list of lists of tuples
        # [[(u'system', u'ABCD'), (u'rating', '08.15')], [(u'system', u'WXYZ'), (u'rating', u'0.11')]]

        # add ratings to the recorded instance:
        rec.rating.add(u'ABCD', u'41.98')
        rec.rating.add(u'WXYZ', u'0.11')

        # check the ratings:
        #print(rec.rating)
        s0_found = s1_found = False
        r0_found = r1_found = False
        for (s, r) in rec.rating:
            # print(s)
            # print(r)
            if s == u'ABCD':
                s0_found = True
            if s == u'WXYZ':
                s1_found = True
            if r == u'41.98':
                r0_found = True
            if r == u'0.11':
                r1_found = True
        self.assertTrue(s0_found)
        self.assertTrue(s1_found)
        self.assertTrue(r0_found)
        self.assertTrue(r1_found)

        # revert last changes:
        rec.rating.revert()
        # check for an empty list:
        #print(rec.rating)
        self.assertEqual(len(rec.rating), 0)

        # add ratings again:
        rec.rating.add('ABCD', '41.98')
        rec.rating.add('QWERTZ', 'blah')
        rec.rating.add('WXYZ', '0.11')
        # commit these updates:
        rec.update()

        # get the recorded data again:
        recn = Recorded((chanid, starttimemyth), db=self.mydb)
        # edit existing rating data:
        for i, (s, r) in enumerate(recn.rating):
            if s == 'ABCD':
                break
        if i is not None:
            recn.rating[i]['rating'] = u'08.15'
        # commit that change:
        recn.update()
        # check the changed value:
        #print(rec.rating)
        rn_found = False
        for (s, r) in recn.rating:
            if r == u'08.15':
                rn_found = True
        self.assertTrue(rn_found)

        # delete a rating:
        recn.rating.delete(u'WXYZ', u'0.11')
        recn.update()
        #print(recn.rating)
        sn_found = False
        for (s, r) in recn.rating:
            if s == u'WXYZ':
                sn_found = True
        self.assertFalse(sn_found)

        # clean all ratings for this recorded instance:
        recn.rating.clean()
        recn.update()
        self.assertEqual(len(recn.rating), 0)
class VIDEO:
    def __init__(self, opts, jobid=None):
        if jobid:
            self.job = Job(jobid)
            self.chanid = self.job.chanid
            self.starttime = self.job.starttime
            self.job.update(status=Job.STARTING)
        else:
            self.job = None
            self.chanid = opts.chanid
            self.starttime = opts.starttime

        self.opts = opts
        self.db = MythDB()
        self.log = MythLog(module='mythvidexport.py', db=self.db)

        # load setting strings
        self.get_format()

        # prep objects
        self.rec = Recorded((self.chanid, self.starttime), db=self.db)
        self.log(
            MythLog.GENERAL, MythLog.INFO, 'Using recording',
            '%s - %s' % (self.rec.title.encode('utf-8'),
                         self.rec.subtitle.encode('utf-8')))
        self.vid = Video(db=self.db).create({
            'title': '',
            'filename': '',
            'host': gethostname()
        })

        # process data
        self.get_meta()
        self.get_dest()
        # bug fix to work around limitation in the bindings where DBDataRef classes
        # are mapped to the filename at time of Video element creation. since the
        # filename is specified as blank when the video is created, the markup
        # handler is not properly initialized
        self.vid.markup._refdat = (self.vid.filename, )

        # save file
        self.copy()
        if opts.seekdata:
            self.copy_seek()
        if opts.skiplist:
            self.copy_markup(static.MARKUP.MARK_COMM_START,
                             static.MARKUP.MARK_COMM_END)
        if opts.cutlist:
            self.copy_markup(static.MARKUP.MARK_CUT_START,
                             static.MARKUP.MARK_CUT_END)
        self.vid.update()

        # delete old file
        if opts.delete:
            self.rec.delete()

    def get_format(self):
        host = self.db.gethostname()
        # TV Format
        if self.opts.tformat:
            self.tfmt = self.opts.tformat
        elif self.db.settings[host]['mythvideo.TVexportfmt']:
            self.tfmt = self.db.settings[host]['mythvideo.TVexportfmt']
        else:
            self.tfmt = 'Television/%TITLE%/Season %SEASON%/'+\
                            '%TITLE% - S%SEASON%E%EPISODEPAD% - %SUBTITLE%'

        # Movie Format
        if self.opts.mformat:
            self.mfmt = self.opts.mformat
        elif self.db.settings[host]['mythvideo.MOVIEexportfmt']:
            self.mfmt = self.db.settings[host]['mythvideo.MOVIEexportfmt']
        else:
            self.mfmt = 'Movies/%TITLE%'

        # Generic Format
        if self.opts.gformat:
            self.gfmt = self.opts.gformat
        elif self.db.settings[host]['mythvideo.GENERICexportfmt']:
            self.gfmt = self.db.settings[host]['mythvideo.GENERICexportfmt']
        else:
            self.gfmt = 'Videos/%TITLE%'

    def get_meta(self):
        self.vid.hostname = self.db.gethostname()
        if self.rec.inetref:
            # good data is available, use it
            if self.rec.season > 0 or self.rec.episode > 0:
                self.log(self.log.GENERAL, self.log.INFO,
                         'Performing TV export with local data.')
                self.type = 'TV'
                grab = VideoGrabber(self.type)
                metadata = grab.grabInetref(self.rec.inetref, self.rec.season,
                                            self.rec.episode)
            else:
                self.log(self.log.GENERAL, self.log.INFO,
                         'Performing Movie export with local data.')
                self.type = 'MOVIE'
                grab = VideoGrabber(self.type)
                metadata = grab.grabInetref(self.rec.inetref)
        elif self.opts.listingonly:
            # force use of local data
            if self.rec.subtitle:
                self.log(self.log.GENERAL, self.log.INFO,
                         'Forcing TV export with local data.')
                self.type = 'TV'
            else:
                self.log(self.log.GENERAL, self.log.INFO,
                         'Forcing Movie export with local data.')
                self.type = 'MOVIE'
            metadata = self.rec.exportMetadata()
        else:
            if self.rec.subtitle:
                # subtitle exists, assume tv show
                self.type = 'TV'
                self.log(self.log.GENERAL, self.log.INFO,
                         'Attempting TV export.')
                grab = VideoGrabber(self.type)
                match = grab.sortedSearch(self.rec.title, self.rec.subtitle)
            else:  # assume movie
                self.type = 'MOVIE'
                self.log(self.log.GENERAL, self.log.INFO,
                         'Attempting Movie export.')
                grab = VideoGrabber(self.type)
                match = grab.sortedSearch(self.rec.title)

            if len(match) == 0:
                # no match found
                self.log(self.log.GENERAL, self.log.INFO,
                         'Falling back to generic export.')
                self.type = 'GENERIC'
                metadata = self.rec.exportMetadata()
            elif (len(match) > 1) & (match[0].levenshtein > 0):
                # multiple matches found, and closest is not exact
                self.vid.delete()
                raise MythError('Multiple metadata matches found: '\
                                                   +self.rec.title)
            else:
                self.log(self.log.GENERAL, self.log.INFO,
                         'Importing content from', match[0].inetref)
                metadata = grab.grabInetref(match[0])

        self.vid.importMetadata(metadata)
        self.log(self.log.GENERAL, self.log.INFO, 'Import complete')

    def get_dest(self):
        if self.type == 'TV':
            self.vid.filename = self.process_fmt(self.tfmt)
        elif self.type == 'MOVIE':
            self.vid.filename = self.process_fmt(self.mfmt)
        elif self.type == 'GENERIC':
            self.vid.filename = self.process_fmt(self.gfmt)

    def process_fmt(self, fmt):
        # replace fields from viddata
        #print self.vid.data
        ext = '.' + self.rec.basename.rsplit('.', 1)[1]
        rep = (('%TITLE%', 'title', '%s'), ('%SUBTITLE%', 'subtitle', '%s'),
               ('%SEASON%', 'season', '%d'), ('%SEASONPAD%', 'season', '%02d'),
               ('%EPISODE%', 'episode', '%d'), ('%EPISODEPAD%', 'episode',
                                                '%02d'),
               ('%YEAR%', 'year', '%s'), ('%DIRECTOR%', 'director', '%s'))
        for tag, data, format in rep:
            if self.vid[data]:
                fmt = fmt.replace(tag, format % self.vid[data])
            else:
                fmt = fmt.replace(tag, '')

        # replace fields from program data
        rep = (('%HOSTNAME%', 'hostname', '%s'), ('%STORAGEGROUP%',
                                                  'storagegroup', '%s'))
        for tag, data, format in rep:
            data = getattr(self.rec, data)
            fmt = fmt.replace(tag, format % data)

#       fmt = fmt.replace('%CARDID%',self.rec.cardid)
#       fmt = fmt.replace('%CARDNAME%',self.rec.cardid)
#       fmt = fmt.replace('%SOURCEID%',self.rec.cardid)
#       fmt = fmt.replace('%SOURCENAME%',self.rec.cardid)
#       fmt = fmt.replace('%CHANNUM%',self.rec.channum)
#       fmt = fmt.replace('%CHANNAME%',self.rec.cardid)

        if len(self.vid.genre):
            fmt = fmt.replace('%GENRE%', self.vid.genre[0].genre)
        else:
            fmt = fmt.replace('%GENRE%', '')


#       if len(self.country):
#           fmt = fmt.replace('%COUNTRY%',self.country[0])
#       else:
#           fmt = fmt.replace('%COUNTRY%','')
        return fmt + ext

    def copy(self):
        stime = time.time()
        srcsize = self.rec.filesize
        htime = [stime, stime, stime, stime]

        self.log(MythLog.GENERAL|MythLog.FILE, MythLog.INFO, "Copying myth://%s@%s/%s"\
               % (self.rec.storagegroup, self.rec.hostname, self.rec.basename)\
                                                    +" to myth://Videos@%s/%s"\
                                          % (self.vid.host, self.vid.filename))
        srcfp = self.rec.open('r')
        dstfp = self.vid.open('w', nooverwrite=True)

        if self.job:
            self.job.setStatus(Job.RUNNING)
        tsize = 2**24
        while tsize == 2**24:
            tsize = min(tsize, srcsize - dstfp.tell())
            dstfp.write(srcfp.read(tsize))
            htime.append(time.time())
            rate = float(tsize * 4) / (time.time() - htime.pop(0))
            remt = (srcsize - dstfp.tell()) / rate
            if self.job:
                self.job.setComment("%02d%% complete - %d seconds remaining" %\
                            (dstfp.tell()*100/srcsize, remt))
        srcfp.close()
        dstfp.close()

        self.vid.hash = self.vid.getHash()

        self.log(MythLog.GENERAL | MythLog.FILE, MythLog.INFO,
                 "Transfer Complete",
                 "%d seconds elapsed" % int(time.time() - stime))

        if self.opts.reallysafe:
            if self.job:
                self.job.setComment("Checking file hashes")
            self.log(MythLog.GENERAL | MythLog.FILE, MythLog.INFO,
                     "Checking file hashes.")
            srchash = hashfile(self.rec.open('r'))
            dsthash = hashfile(self.vid.open('r'))
            if srchash != dsthash:
                raise MythError('Source hash (%s) does not match destination hash (%s)' \
                            % (srchash, dsthash))
        elif self.opts.safe:
            self.log(MythLog.GENERAL | MythLog.FILE, MythLog.INFO,
                     "Checking file sizes.")
            be = MythBE(db=self.vid._db)
            try:
                srcsize = be.getSGFile(self.rec.hostname, self.rec.storagegroup, \
                                       self.rec.basename)[1]
                dstsize = be.getSGFile(self.vid.host, 'Videos',
                                       self.vid.filename)[1]
            except:
                raise MythError('Could not query file size from backend')
            if srcsize != dstsize:
                raise MythError('Source size (%d) does not match destination size (%d)' \
                            % (srcsize, dstsize))

        if self.job:
            self.job.setComment("Complete - %d seconds elapsed" % \
                            (int(time.time()-stime)))
            self.job.setStatus(Job.FINISHED)

    def copy_seek(self):
        for seek in self.rec.seek:
            self.vid.markup.add(seek.mark, seek.offset, seek.type)

    def copy_markup(self, start, stop):
        for mark in self.rec.markup:
            if mark.type in (start, stop):
                self.vid.markup.add(mark.mark, 0, mark.type)
def runjob(jobid=None, chanid=None, starttime=None, tzoffset=None):
    global estimateBitrate
    db = MythDB()

    if jobid:
        job = Job(jobid, db=db)
        chanid = job.chanid
        utcstarttime = job.starttime
    else:
        job=None;
        #utcstarttime = datetime.strptime(starttime, "%Y%m%d%H%M%S%z")
        utcstarttime = parse(starttime)
        utcstarttime = utcstarttime + timedelta(hours=tzoffset)

    if debug:
        print('chanid "%s"' % chanid)
        print('utcstarttime "%s"' % utcstarttime)

    rec = Recorded((chanid, utcstarttime), db=db);
    utcstarttime = rec.starttime;
    starttime_datetime = utcstarttime
   
    # reformat 'starttime' for use with mythtranscode/ffmpeg/mythcommflag
    starttime = str(utcstarttime.utcisoformat().replace(':', '').replace(' ', '').replace('T', '').replace('-', ''))
    if debug:
        print('mythtv format starttime "%s"' % starttime)
    input_filesize = rec.filesize
    
    if rec.commflagged:
        if debug:
            print('Recording has been scanned to detect commerical breaks.')
        waititer=1
        keepWaiting = True
        while keepWaiting == True:
            keepWaiting=False;
            for index,jobitem in reversed(list(enumerate(db.searchJobs(chanid=chanid, starttime=starttime_datetime)))):
                if jobitem.type == jobitem.COMMFLAG:  # Commercial flagging job
                    if debug:
                        print('Commercial flagging job detected with status %s' % jobitem.status)
                    if jobitem.status == jobitem.RUNNING: # status = RUNNING?
                        job.update({'status':job.PAUSED, 
                                    'comment':'Waited %d secs for the commercial flagging job' % (waititer*POLL_INTERVAL) \
                                     + ' currently running on this recording to complete.'})
                        if debug:
                            print('Waited %d secs for the commercial flagging job' % (waititer*POLL_INTERVAL) \
                                  + ' currently running on this recording to complete.')
                        time.sleep(POLL_INTERVAL);
                        keepWaiting=True
                        waititer = waititer + 1
                        break
    else:
        if debug:
            print('Recording has not been scanned to detect/remove commercial breaks.')
        if require_commflagged:
            if jobid:
                job.update({'status':job.RUNNING, 'comment':'Required commercial flagging for this file is not found.'
                            + 'Flagging commercials and cancelling any queued commercial flagging.'})
            # cancel any queued job to flag commercials for this recording and run commercial flagging in this script
            for index,jobitem in reversed(list(enumerate(db.searchJobs(chanid=chanid,starttime=starttime_datetime)))):
                if debug:
                    if index==0:
                        print(list(jobitem.keys()))
                    print(index,jobitem.id,jobitem.chanid)

                if jobitem.type == jobitem.COMMFLAG:  # Commercial flagging job
                    if jobitem.status == jobitem.RUNNING: # status = RUNNING?
                        jobitem.cmds = jobitem.STOP # stop command from the frontend to stop the commercial flagging job
                    #jobitem.setStatus(jobitem.CANCELLED)
                    #jobitem.setComment('Cancelled: Transcode command ran commercial flagging for this recording.')
                    jobitem.update({'status':jobitem.CANCELLED, 
                                    'comment':'A user transcode job ran commercial flagging for'
                                    + ' this recording and cancelled this job.'})
            if debug:
                print('Flagging Commercials...')
            # Call "mythcommflag --chanid $CHANID --starttime $STARTTIME"
            task = System(path='mythcommflag', db=db)
            try:
                output = task('--chanid "%s"' % chanid,
                              '--starttime "%s"' % starttime,
                              '2> /dev/null')
            except MythError as e:
                # it seems mythcommflag always exits with an decoding error "eno: Unknown error 541478725 (541478725)"
                pass
                #print 'Command failed with output:\n%s' % e.stderr
                #if jobid:
                #    job.update({'status':304, 'comment':'Flagging commercials failed'})
                #sys.exit(e.retcode)


    sg = findfile(rec.basename, rec.storagegroup, db=db)
    if sg is None:
        print('Local access to recording not found.')
        sys.exit(1)

    infile = os.path.join(sg.dirname, rec.basename)
    tmpfile = '%s.tmp' % infile.rsplit('.',1)[0]
 #   tmpfile = infile
    outfile = '%s.mp4' % infile.rsplit('.',1)[0]
    if debug:
        print('tmpfile "%s"' % tmpfile)


    clipped_bytes=0;
    # If selected, create a cutlist to remove commercials via mythtranscode by running:
    # mythutil --gencutlist --chanid $CHANID --starttime $STARTTIME
    if generate_commcutlist:
        if jobid:
            job.update({'status':job.RUNNING, 'comment':'Generating Cutlist for commercial removal'})
        task = System(path='mythutil', db=db)
        try:
            output = task('--gencutlist',
                          '--chanid "%s"' % chanid,
                          '--starttime "%s"' % starttime)
#                          '--loglevel debug',
#                          '2> /dev/null')
        except MythError as e:
            print('Command "mythutil --gencutlist" failed with output:\n%s' % e.stderr)
            if jobid:
                job.update({'status':job.ERRORED, 'comment':'Generation of commercial Cutlist failed'})
            sys.exit(e.retcode)

    # Lossless transcode to strip cutlist
    if generate_commcutlist or rec.cutlist==1:
        if jobid:
            job.update({'status':job.RUNNING, 'comment':'Removing Cutlist'})
        task = System(path='mythtranscode', db=db)
        try:
            output = task('--chanid "%s"' % chanid,
                          '--starttime "%s"' % starttime,
                          '--mpeg2',
                          '--honorcutlist',
                          '-o "%s"' % tmpfile,
                          '1>&2')
#                          '2> /dev/null')
            clipped_filesize = os.path.getsize(tmpfile)
            clipped_bytes = input_filesize - clipped_filesize
            clipped_compress_pct = float(clipped_bytes)/input_filesize 
            rec.commflagged = 0
        except MythError as e:
            print('Command "mythtranscode --honorcutlist" failed with output:\n%s' % e.stderr)
            if jobid:
                job.update({'status':job.ERRORED, 'comment':'Removing Cutlist failed. Copying file instead.'})
#            sys.exit(e.retcode)
            copyfile('%s' % infile, '%s' % tmpfile)
            clipped_filesize = input_filesize
            clipped_bytes = 0
            clipped_compress_pct = 0
            pass
    else:
        if jobid:
            job.update({'status':job.RUNNING, 'comment':'Creating temporary file for transcoding.'})
        copyfile('%s' % infile, '%s' % tmpfile)
        clipped_filesize = input_filesize
        clipped_bytes = 0
        clipped_compress_pct = 0

    duration_secs = 0
    # Estimate bitrate, and detect duration and number of frames
    if estimateBitrate:
        if jobid:
            job.update({'status':job.RUNNING, 'comment':'Estimating bitrate; detecting frames per second, and resolution.'})

        duration_secs, e = get_duration(db, rec, transcoder, tmpfile);
        if duration_secs>0:
            bitrate = int(clipped_filesize*8/(1024*duration_secs))
        else:
            print('Estimate bitrate failed falling back to constant rate factor encoding.\n')
            estimateBitrate = False
            duration_secs = 0
        print(e.stderr.decode('utf-8'))
        # get framerate of mpeg2 video stream and detect if stream is HD
        r = re.compile('mpeg2video (.*?) fps,')
        m = r.search(e.stderr.decode('utf-8'))
        strval = m.group(1)
        if debug:
            print(strval)
        isHD = False
        if "1920x1080" in strval or "1280x720" in strval or "2560x1440" in strval:
            if debug:
                print('Stream is HD')
            isHD = True
        else:
            if debug:
                print('Stream is not HD')
        framerate = float(m.group(1).split(' ')[-1])
        if debug:
            print('Framerate %s' % framerate)

    # Setup transcode video bitrate and quality parameters
    # if estimateBitrate is true and the input content is HD:
    #     encode 'medium' preset and vbitrate = inputfile_bitrate*compressionRatio
    # else:
    #     encode at user default preset and constant rate factor ('slow' and 20) 
    preset = preset_nonHD
    if estimateBitrate:
        if isHD:
            h264_bitrate = int(bitrate*compressionRatio)
            # HD coding with specified target bitrate (CRB encoding)
            if hdvideo_tgt_bitrate > 0 and h264_bitrate > hdvideo_tgt_bitrate:
                h264_bitrate = hdvideo_tgt_bitrate;
                vbitrate_param = '-b:v %dk' % h264_bitrate
            else:   # HD coding with disabled or acceptable target bitrate (CRF encoding)
                vbitrate_param = '-crf:v %s' % crf
            preset = preset_HD
        else: # non-HD encoding (CRF encoding)
            vbitrate_param = '-crf:v %s' % crf            
    else:
        vbitrate_param = '-crf:v %s' % crf
    if hdvideo_min_bitrate > 0:
        vbitrate_param = vbitrate_param + ' -minrate %sk' % hdvideo_min_bitrate
    if hdvideo_max_bitrate > 0:
        vbitrate_param = vbitrate_param + ' -maxrate %sk' % hdvideo_max_bitrate
    if hdvideo_max_bitrate > 0 or hdvideo_min_bitrate > 0:
        vbitrate_param = vbitrate_param + ' -bufsize %sk' % device_bufsize

    if debug:
        print('Video bitrate parameter "%s"' % vbitrate_param)
        print('Video h264 preset parameter "%s"' % preset)

    # Setup transcode audio bitrate and quality parameters
    # Right now, the setup is as follows:
    # if input is HD: 
    #    copy audio streams to output, i.e., input=output audio
    # else:
    #    output is libfdk_aac encoded at 128kbps 
    if isHD:
        abitrate_param = abitrate_param_HD  # preserve 5.1 audio
    else:
        abitrate_param = abitrate_param_nonHD
    if debug:
        print('Audio bitrate parameter "%s"' % abitrate_param)

    # Transcode to mp4
#    if jobid:
#        job.update({'status':4, 'comment':'Transcoding to mp4'})

    # ffmpeg output is redirected to the temporary file tmpstatusfile and
    # a second thread continuously reads this file while
    # the transcode is in-process. see while loop below for the monitoring thread
    tf = tempfile.NamedTemporaryFile()
    tmpstatusfile = tf.name
#    tmpstatusfile = '/tmp/ffmpeg-transcode.txt'
    if debug:
        print('Using temporary file "%s" for ffmpeg status updates.' % tmpstatusfile)
    res = []
    # create a thread to perform the encode
    ipq = queue.Queue()
    t = threading.Thread(target=wrapper, args=(encode, 
                        (jobid, db, job, ipq, preset, vbitrate_param, abitrate_param,
                         tmpfile, outfile, tmpstatusfile,), res))
    t.start()
    # wait for ffmpeg to open the file and emit its initialization information 
    # before we start the monitoring process
    time.sleep(1) 
    # open the temporary file having the ffmeg output text and process it to generate status updates
    hangiter=0;
    with open(tmpstatusfile) as f:
        # read all the opening ffmpeg status/analysis lines
        lines = f.readlines()
        # set initial progress to -1
        prev_progress=-1
        framenum=0
        fps=1.0
        while t.is_alive():
            # read all output since last readline() call
            lines = f.readlines()
            if len(lines) > 0:
                # every ffmpeg output status line ends with a carriage return '\r'
                # split the last read line at these locations
                lines=lines[-1].split('\r')
#                if debug:
#                    print lines;
                hangiter=0
                if len(lines) > 1 and lines[-2].startswith('frame'):
                    # since typical reads will have the last line ending with \r the last status
                    # message is at index=[-2] start processing this line
                    # replace multiple spaces with one space
                    lines[-2] = re.sub(' +',' ',lines[-2])
                    # remove any spaces after equals signs
                    lines[-2] = re.sub('= +','=',lines[-2])
                    # split the fields at the spaces the first two fields for typical
                    # status lines will be framenum=XXXX and fps=YYYY parse the values
                    values = lines[-2].split(' ')
                    if len(values) > 1:
                        if debug:
                            print('values %s' % values)
                        prev_framenum = framenum
                        prev_fps = fps
                        try:
                            # framenum = current frame number being encoded
                            framenum = int(values[0].split('=')[1])
                            # fps = frames per second for the encoder
                            fps = float(values[1].split('=')[1])
                        except ValueError as e:
                            print('ffmpeg status parse exception: "%s"' % e)
                            framenum = prev_framenum
                            fps = prev_fps
                            pass
                    # progress = 0-100 represent percent complete for the transcode
                    progress = int((100*framenum)/(duration_secs*framerate))
                    # eta_secs = estimated number of seconds until transcoding is complete
                    eta_secs = int((float(duration_secs*framerate)-framenum)/fps)
                    # pct_realtime = how many real seconds it takes to encode 1 second of video
                    pct_realtime = float(fps/framerate) 
                    if debug:
                        print('framenum = %d fps = %.2f' % (framenum, fps))                
                    if progress != prev_progress:
                        if debug:
                            print('Progress %d%% encoding %.1f frames per second ETA %d mins' \
                                  % ( progress, fps, float(eta_secs)/60))
                        if jobid:
                            progress_str = 'Transcoding to mp4 %d%% complete ETA %d mins fps=%.1f.' \
                                  % ( progress, float(eta_secs)/60, fps)
                            job.update({'status':job.RUNNING, 'comment': progress_str})
                        prev_progress = progress
                elif len(lines) > 1:
                    if debug:
                        print('Read pathological output %s' % lines[-2])
            else:
                if debug:
                    print('Read no lines of ffmpeg output for %s secs. Possible hang?' % (POLL_INTERVAL*hangiter))
                hangiter = hangiter + 1
                if jobid:
                    progress_str = 'Read no lines of ffmpeg output for %s secs. Possible hang?' % (POLL_INTERVAL*hangiter)
                    job.update({'status':job.RUNNING, 'comment': progress_str})
            time.sleep(POLL_INTERVAL)
        if debug:
            print('res = "%s"' % res)

    t.join(1)
    try:
        if ipq.get_nowait() == CleanExit:
            sys.exit()
    except queue.Empty:
        pass

    if flush_commskip:
        task = System(path='mythutil')
        task.command('--chanid %s' % chanid,
                     '--starttime %s' % starttime,
                     '--clearcutlist',
                     '2> /dev/null')
        task = System(path='mythutil')
        task.command('--chanid %s' % chanid,
                     '--starttime %s' % starttime,
                     '--clearskiplist',
                     '2> /dev/null')

    if flush_commskip:
        for index,mark in reversed(list(enumerate(rec.markup))):
            if mark.type in (rec.markup.MARK_COMM_START, rec.markup.MARK_COMM_END):
                del rec.markup[index]
        rec.bookmark = 0
        rec.cutlist = 0
        rec.markup.commit()

#    tf.close();
#    os.remove(tmpstatusfile);
    rec.basename = os.path.basename(outfile)
    rec.filesize = os.path.getsize(outfile)
#    rec.commflagged = 0
    rec.transcoded = 1
    rec.seek.clean()
    rec.update()

    os.remove(infile)
    # Cleanup the old *.png files
    for filename in glob('%s*.png' % infile):
        os.remove(filename)
    os.remove(tmpfile)
    try:
        os.remove('%s.map' % tmpfile)
    except OSError:
        pass

    output_filesize = rec.filesize
    if duration_secs > 0:
        output_bitrate = int(output_filesize*8/(1024*duration_secs)) # kbps
    actual_compression_ratio = 1 - float(output_filesize)/clipped_filesize
    compressed_pct = 1 - float(output_filesize)/input_filesize

    if build_seektable:
        if jobid:
            job.update({'status':job.RUNNING, 'comment':'Rebuilding seektable'})
        task = System(path='mythcommflag')
        task.command('--chanid %s' % chanid,
                     '--starttime %s' % starttime,
                     '--rebuild',
                     '2> /dev/null')

    # fix during in the recorded markup table this will be off if commercials are removed
    duration_msecs, e = get_duration(db, rec, transcoder, outfile)
    duration_msecs = 1000*duration_msecs
    for index,mark in reversed(list(enumerate(rec.markup))):
        # find the duration markup entry and correct any error in the video duration that might be there
        if mark.type == 33:
            if debug:
                print('Markup Duration in milliseconds "%s"' % mark.data)
            error = mark.data - duration_msecs
            if error != 0:
                if debug:
                    print('Markup Duration error is "%s"msecs' % error)
                mark.data = duration_msecs
                #rec.bookmark = 0
                #rec.cutlist = 0
                rec.markup.commit()

    if jobid:
        if output_bitrate:
            job.update({'status':job.FINISHED, 'comment':'Transcode Completed @ %dkbps, compressed file by %d%% (clipped %d%%, transcoder compressed %d%%)' % (output_bitrate,int(compressed_pct*100),int(clipped_compress_pct*100),int(actual_compression_ratio*100))})
        else:
            job.update({'status':job.FINISHED, 'comment':'Transcode Completed'})
Exemple #29
0
        task = System(path=TRANSCODER, db=self.db())
        try:
            outfile = task(infile).strip()
            if not os.path.exists(outfile):
                raise OSError('output file %s not found' % repr(outfile))
        except MythError, e:
            self.log('Transcode failed with output:', LOGLEVEL.ERR,
                     task.stderr)
            sys.exit(task.returncode)
        except OSError, e:
            self.log('Transcode failed to produce an output file:',
                     LOGLEVEL.ERR, repr(e))
            sys.exit(1)

        rec = Recorded((chanid, starttime), db=self.db())
        rec.basename = os.path.basename(outfile)
        os.remove(infile)
        rec.filesize = os.path.getsize(outfile)
        rec.transcoded = 1
        rec.seek.clean()

        if flush_commskip:
            for index,mark in reversed(list(enumerate(rec.markup))):
                if mark.type in (rec.markup.MARK_COMM_START, 
                                 rec.markup.MARK_COMM_END):
                    del rec.markup[index]
            rec.bookmark = 0
            rec.cutlist = 0
            rec.markup.commit()
Exemple #30
0
def runjob(jobid=None, chanid=None, starttime=None, tzoffset=None):
    global estimateBitrate
    db = MythDB()

    if jobid:
        job = Job(jobid, db=db)
        chanid = job.chanid
        utcstarttime = job.starttime
    else:
        job=None;
        utcstarttime = datetime.strptime(starttime, "%Y%m%d%H%M%S")
	utcstarttime = utcstarttime + timedelta(hours=tzoffset)

    if debug:
        print 'chanid "%s"' % chanid
        print 'utcstarttime "%s"' % utcstarttime

    rec = Recorded((chanid, utcstarttime), db=db);
    utcstarttime = rec.starttime;
    starttime_datetime = utcstarttime
   
    # reformat 'starttime' for use with mythtranscode/ffmpeg/mythcommflag
    starttime = str(utcstarttime.utcisoformat().replace(u':', '').replace(u' ', '').replace(u'T', '').replace('-', ''))
    if debug:
        print 'mythtv format starttime "%s"' % starttime
    input_filesize = rec.filesize
    
    if rec.commflagged:
        if debug:
            print 'Recording has been scanned to detect commerical breaks.'
        waititer=1
        keepWaiting = True
        while keepWaiting == True:
            keepWaiting=False;
            for index,jobitem in reversed(list(enumerate(db.searchJobs(chanid=chanid, starttime=starttime_datetime)))):
                if jobitem.type == jobitem.COMMFLAG:  # Commercial flagging job
                    if debug:
	                print 'Commercial flagging job detected with status %s' % jobitem.status
                    if jobitem.status == jobitem.RUNNING: # status = RUNNING?
                        job.update({'status':job.PAUSED, 
                                    'comment':'Waited %d secs for the commercial flagging job' % (waititer*POLL_INTERVAL) \
                                     + ' currently running on this recording to complete.'})
                        if debug:
                            print 'Waited %d secs for the commercial flagging job' % (waititer*POLL_INTERVAL) \
                                  + ' currently running on this recording to complete.'
                        time.sleep(POLL_INTERVAL);
                        keepWaiting=True
                        waititer = waititer + 1
                        break
    else:
        if debug:
            print 'Recording has not been scanned to detect/remove commercial breaks.'
        if require_commflagged:
            if jobid:
                job.update({'status':job.RUNNING, 'comment':'Required commercial flagging for this file is not found.'
                            + 'Flagging commercials and cancelling any queued commercial flagging.'})
            # cancel any queued job to flag commercials for this recording and run commercial flagging in this script
            for index,jobitem in reversed(list(enumerate(db.searchJobs(chanid=chanid,starttime=starttime_datetime)))):
                if debug:
                    if index==0:
                        print jobitem.keys()
                    print index,jobitem.id,jobitem.chanid

                if jobitem.type == jobitem.COMMFLAG:  # Commercial flagging job
                    if jobitem.status == jobitem.RUNNING: # status = RUNNING?
                        jobitem.cmds = jobitem.STOP # stop command from the frontend to stop the commercial flagging job
                    #jobitem.setStatus(jobitem.CANCELLED)
                    #jobitem.setComment('Cancelled: Transcode command ran commercial flagging for this recording.')
                    jobitem.update({'status':jobitem.CANCELLED, 
                                    'comment':'A user transcode job ran commercial flagging for'
                                    + ' this recording and cancelled this job.'})
            if debug:
                print 'Flagging Commercials...'
            # Call "mythcommflag --chanid $CHANID --starttime $STARTTIME"
            task = System(path='mythcommflag', db=db)
            try:
                output = task('--chanid "%s"' % chanid,
                              '--starttime "%s"' % starttime,
                              '2> /dev/null')
            except MythError, e:
                # it seems mythcommflag always exits with an decoding error "eno: Unknown error 541478725 (541478725)"
                pass
Exemple #31
0
class VIDEO:
    def __init__(self, opts, jobid=None):
        if jobid:
            self.job = Job(jobid)
            self.chanid = self.job.chanid
            self.starttime = self.job.starttime
            self.job.update(status=3)
        else:
            self.job = None
            self.chanid = opts.chanid
            self.starttime = opts.starttime

        self.opts = opts
        self.db = MythDB()
        self.log = MythLog(module='mythvidexport.py', db=self.db)

        # load setting strings
        self.get_format()

        # prep objects
        self.rec = Recorded((self.chanid, self.starttime), db=self.db)
        self.log(MythLog.IMPORTANT, 'Using recording',
                 '%s - %s' % (self.rec.title, self.rec.subtitle))
        self.vid = Video(db=self.db).create({
            'title': '',
            'filename': '',
            'host': gethostname()
        })

        # process data
        self.get_meta()
        self.get_dest()

        # save file
        self.copy()
        if opts.seekdata:
            self.copy_seek()
        if opts.skiplist:
            self.copy_markup(static.MARKUP.MARK_COMM_START,
                             static.MARKUP.MARK_COMM_END)
        if opts.cutlist:
            self.copy_markup(static.MARKUP.MARK_CUT_START,
                             static.MARKUP.MARK_CUT_END)
        self.vid.update()

    def get_format(self):
        host = self.db.gethostname()
        # TV Format
        if self.opts.tformat:
            self.tfmt = self.opts.tformat
        elif self.db.settings[host]['mythvideo.TVexportfmt']:
            self.tfmt = self.db.settings[host]['mythvideo.TVexportfmt']
        else:
            self.tfmt = 'Television/%TITLE%/Season %SEASON%/'+\
                            '%TITLE% - S%SEASON%E%EPISODEPAD% - %SUBTITLE%'

        # Movie Format
        if self.opts.mformat:
            self.mfmt = self.opts.mformat
        elif self.db.settings[host]['mythvideo.MOVIEexportfmt']:
            self.mfmt = self.db.settings[host]['mythvideo.MOVIEexportfmt']
        else:
            self.mfmt = 'Movies/%TITLE%'

        # Generic Format
        if self.opts.gformat:
            self.gfmt = self.opts.gformat
        elif self.db.settings[host]['mythvideo.GENERICexportfmt']:
            self.gfmt = self.db.settings[host]['mythvideo.GENERICexportfmt']
        else:
            self.gfmt = 'Videos/%TITLE%'

    def get_meta(self):
        self.vid.hostname = self.db.gethostname()

        if self.rec.subtitle:  # subtitle exists, assume tv show
            self.type = 'TV'
            self.log(self.log.IMPORTANT, 'Attempting TV export.')
            if self.opts.listingonly:
                self.log(self.log.IMPORTANT, 'Forcing listing data only.')
                self.get_generic(False)
                return
            grab = VideoGrabber(self.type)
            match = grab.sortedSearch(self.rec.title, self.rec.subtitle)
        else:  # assume movie
            self.type = 'MOVIE'
            self.log(self.log.IMPORTANT, 'Attempting Movie export.')
            if self.opts.listingonly:
                self.log(self.log.IMPORTANT, 'Forcing listing data only.')
                self.get_generic(False)
                return
            grab = VideoGrabber(self.type)
            match = grab.sortedSearch(self.rec.title)

        if len(match) == 0:
            # no match found
            self.log(self.log.IMPORTANT, 'Falling back to generic export.')
            self.get_generic()
        elif (len(match) > 1) & (match[0].levenshtein > 0):
            # multiple matches found, and closest is not exact
            self.vid.delete()
            raise MythError('Multiple metadata matches found: '\
                                                   +self.rec.title)
        else:
            self.log(self.log.IMPORTANT, 'Importing content from',
                     match[0].inetref)
            self.vid.importMetadata(grab.grabInetref(match[0]))

    def get_generic(self, name_as_generic=True):
        self.vid.title = self.rec.title
        if self.rec.subtitle:
            self.vid.subtitle = self.rec.subtitle
        if self.rec.description:
            self.vid.plot = self.rec.description
        if self.rec.originalairdate:
            self.vid.year = self.rec.originalairdate.year
            self.vid.releasedate = self.rec.originalairdate
        lsec = (self.rec.endtime - self.rec.starttime).seconds
        self.vid.length = str(lsec / 60)
        for member in self.rec.cast:
            if member.role == 'director':
                self.vid.director = member.name
            elif member.role == 'actor':
                self.vid.cast.append(member.name)
        if name_as_generic:
            self.type = 'GENERIC'

    def get_dest(self):
        if self.type == 'TV':
            self.vid.filename = self.process_fmt(self.tfmt)
        elif self.type == 'MOVIE':
            self.vid.filename = self.process_fmt(self.mfmt)
        elif self.type == 'GENERIC':
            self.vid.filename = self.process_fmt(self.gfmt)

    def process_fmt(self, fmt):
        # replace fields from viddata
        #print self.vid.data
        ext = '.' + self.rec.basename.rsplit('.', 1)[1]
        rep = (('%TITLE%', 'title', '%s'), ('%SUBTITLE%', 'subtitle', '%s'),
               ('%SEASON%', 'season', '%d'), ('%SEASONPAD%', 'season', '%02d'),
               ('%EPISODE%', 'episode', '%d'), ('%EPISODEPAD%', 'episode',
                                                '%02d'),
               ('%YEAR%', 'year', '%s'), ('%DIRECTOR%', 'director', '%s'))
        for tag, data, format in rep:
            if self.vid[data]:
                fmt = fmt.replace(tag, format % self.vid[data])
            else:
                fmt = fmt.replace(tag, '')

        # replace fields from program data
        rep = (('%HOSTNAME', 'hostname', '%s'), ('%STORAGEGROUP%',
                                                 'storagegroup', '%s'))
        for tag, data, format in rep:
            data = getattr(self.rec, data)
            fmt = fmt.replace(tag, format % data)

#       fmt = fmt.replace('%CARDID%',self.rec.cardid)
#       fmt = fmt.replace('%CARDNAME%',self.rec.cardid)
#       fmt = fmt.replace('%SOURCEID%',self.rec.cardid)
#       fmt = fmt.replace('%SOURCENAME%',self.rec.cardid)
#       fmt = fmt.replace('%CHANNUM%',self.rec.channum)
#       fmt = fmt.replace('%CHANNAME%',self.rec.cardid)

        if len(self.vid.genre):
            fmt = fmt.replace('%GENRE%', self.vid.genre[0].genre)
        else:
            fmt = fmt.replace('%GENRE%', '')


#       if len(self.country):
#           fmt = fmt.replace('%COUNTRY%',self.country[0])
#       else:
#           fmt = fmt.replace('%COUNTRY%','')
        return fmt + ext

    def copy(self):
        stime = time.time()
        srcsize = self.rec.filesize
        htime = [stime, stime, stime, stime]

        self.log.log(MythLog.IMPORTANT|MythLog.FILE, "Copying myth://%s@%s/%s"\
               % (self.rec.storagegroup, self.rec.hostname, self.rec.basename)\
                                                    +" to myth://Videos@%s/%s"\
                                          % (self.vid.host, self.vid.filename))
        srcfp = self.rec.open('r')
        dstfp = self.vid.open('w')

        if self.job:
            self.job.setStatus(4)
        tsize = 2**24
        while tsize == 2**24:
            tsize = min(tsize, srcsize - dstfp.tell())
            dstfp.write(srcfp.read(tsize))
            htime.append(time.time())
            rate = float(tsize * 4) / (time.time() - htime.pop(0))
            remt = (srcsize - dstfp.tell()) / rate
            if self.job:
                self.job.setComment("%02d%% complete - %d seconds remaining" %\
                            (dstfp.tell()*100/srcsize, remt))
        srcfp.close()
        dstfp.close()

        self.vid.hash = self.vid.getHash()

        self.log(MythLog.IMPORTANT | MythLog.FILE, "Transfer Complete",
                 "%d seconds elapsed" % int(time.time() - stime))
        if self.job:
            self.job.setComment("Complete - %d seconds elapsed" % \
                            (int(time.time()-stime)))
            self.job.setStatus(256)

    def copy_seek(self):
        for seek in self.rec.seek:
            self.vid.markup.add(seek.mark, seek.offset, seek.type)

    def copy_markup(self, start, stop):
        for mark in self.rec.markup:
            if mark.type in (start, stop):
                self.vid.markup.add(mark.mark, 0, mark.type)
Exemple #32
0
def runjob(jobid=None,
           chanid=None,
           starttime=None,
           tzoffset=None,
           maxWidth=maxWidth,
           maxHeight=maxHeight,
           sdonly=0,
           burncc=0,
           usemkv=0,
           overwrite=1):
    global estimateBitrate
    db = MythDB()

    try:
        if jobid:
            job = Job(jobid, db=db)
            chanid = job.chanid
            utcstarttime = job.starttime
        else:
            job = None
            utcstarttime = datetime.strptime(starttime, "%Y%m%d%H%M%S")
            utcstarttime = utcstarttime + timedelta(hours=tzoffset)

        if debug:
            print 'chanid "%s"' % chanid
            print 'utcstarttime "%s"' % utcstarttime

        rec = Recorded((chanid, utcstarttime), db=db)

        utcstarttime = rec.starttime
        starttime_datetime = utcstarttime

        # reformat 'starttime' for use with mythtranscode/HandBrakeCLI/mythcommflag
        starttime = str(utcstarttime.utcisoformat().replace(u':', '').replace(
            u' ', '').replace(u'T', '').replace('-', ''))
        if debug:
            print 'mythtv format starttime "%s"' % starttime
        input_filesize = rec.filesize

        if rec.commflagged:
            if debug:
                print 'Recording has been scanned to detect commerical breaks.'
            waititer = 1
            keepWaiting = True
            while keepWaiting == True:
                keepWaiting = False
                for index, jobitem in reversed(
                        list(
                            enumerate(
                                db.searchJobs(chanid=chanid,
                                              starttime=starttime_datetime)))):
                    if jobitem.type == jobitem.COMMFLAG:  # Commercial flagging job
                        if debug:
                            print 'Commercial flagging job detected with status %s' % jobitem.status
                        if jobitem.status == jobitem.RUNNING:  # status = RUNNING?
                            job.update({'status':job.PAUSED,
                                        'comment':'Waited %d secs for the commercial flagging job' % (waititer*POLL_INTERVAL) \
                                         + ' currently running on this recording to complete.'})
                            if debug:
                                print 'Waited %d secs for the commercial flagging job' % (waititer*POLL_INTERVAL) \
                                      + ' currently running on this recording to complete.'
                            time.sleep(POLL_INTERVAL)
                            keepWaiting = True
                            waititer = waititer + 1
                            break
        else:
            if debug:
                print 'Recording has not been scanned to detect/remove commercial breaks.'
            if require_commflagged:
                if jobid:
                    job.update({
                        'status':
                        job.RUNNING,
                        'comment':
                        'Required commercial flagging for this file is not found.'
                        +
                        'Flagging commercials and cancelling any queued commercial flagging.'
                    })
                # cancel any queued job to flag commercials for this recording and run commercial flagging in this script
                for index, jobitem in reversed(
                        list(
                            enumerate(
                                db.searchJobs(chanid=chanid,
                                              starttime=starttime_datetime)))):
                    if debug:
                        if index == 0:
                            print jobitem.keys()
                        print index, jobitem.id, jobitem.chanid

                    if jobitem.type == jobitem.COMMFLAG:  # Commercial flagging job
                        if jobitem.status == jobitem.RUNNING:  # status = RUNNING?
                            jobitem.cmds = jobitem.STOP  # stop command from the frontend to stop the commercial flagging job
                        #jobitem.setStatus(jobitem.CANCELLED)
                        #jobitem.setComment('Cancelled: Transcode command ran commercial flagging for this recording.')
                        jobitem.update({
                            'status':
                            jobitem.CANCELLED,
                            'comment':
                            'A user transcode job ran commercial flagging for'
                            + ' this recording and cancelled this job.'
                        })
                if debug:
                    print 'Flagging Commercials...'
                # Call "mythcommflag --chanid $CHANID --starttime $STARTTIME"
                task = System(path='mythcommflag', db=db)
                try:
                    output = task('--chanid "%s"' % chanid,
                                  '--starttime "%s"' % starttime,
                                  '2> /dev/null')
                except MythError, e:
                    # it seems mythcommflag always exits with an decoding error "eno: Unknown error 541478725 (541478725)"
                    pass
                    #print 'Command failed with output:\n%s' % e.stderr
                    #if jobid:
                    #    job.update({'status':304, 'comment':'Flagging commercials failed'})
                    #sys.exit(e.retcode)

        sg = findfile('/' + rec.basename, rec.storagegroup, db=db)
        if sg is None:
            print 'Local access to recording not found.'
            sys.exit(1)

        infile = os.path.join(sg.dirname, rec.basename)
        #TODO: set overWrite to 0 if infile is m4v or mkv (already converted)
        #tmpfile = '%s.tmp' % infile.rsplit('.',1)[0]
        outtitle = rec.title.replace("&", "and")
        outtitle = re.sub('[^A-Za-z0-9 ]+', '', outtitle)
        filetype = 'm4v'
        #DEBUG CODE TO FIND OBJECT STRUCT:
        #print '{}'.format(dir(rec.getProgram()))
        #print '{}'.format(rec.getProgram().year)
        if usemkv == 1:
            filetype = 'mkv'
        #print '!{}!'.format(rec.programid[0:2])
        if rec.season > 0 and rec.episode > 0:  #if there are seasons and episode numbers in the recording data
            outtitle = '{0:s} S{1:d} E{2:02d}'.format(outtitle, rec.season,
                                                      rec.episode)
        elif rec.programid[0:2] == 'MV' and str(rec.getProgram().year).isdigit(
        ):  #if it's a movie and has an original air date for when it came out
            outtitle = '{} ({})'.format(outtitle, rec.getProgram().year)
        elif rec.programid[
                0:
                2] == 'MV' and rec.originalairdate != None and rec.originalairdate > datetime.date(
                    datetime(1, 1, 1, 0, 0)
                ):  #if it's a movie and has an original air date for when it came out
            outtitle = '{} ({})'.format(outtitle, rec.originalairdate.year)
        elif 'Sports' in rec.category:  #if it's sports
            outtitle = '{}-{}-{}'.format(
                outtitle, re.sub('[^A-Za-z0-9 ]+', '', rec.subtitle),
                str(rec.starttime.strftime("%Y%m%d")))
        elif rec.programid[0:2] == 'SH' and (' News ' in rec.title
                                             or rec.category
                                             == 'News'):  #if it's a news show
            outtitle = '{}-{}'.format(outtitle,
                                      str(rec.starttime.strftime("%Y%m%d")))
        elif rec.originalairdate != None and rec.originalairdate > datetime.date(
                datetime(1, 1, 1, 0, 0)):  #if it has an original air date
            outtitle = '{} {}'.format(
                outtitle, str(rec.originalairdate.strftime("%Y%m%d")))
        else:
            outtitle = '{} {}'.format(outtitle,
                                      str(rec.starttime.strftime("%Y%m%d")))
        outtitle = '{}.{}'.format(outtitle, filetype)

        outfile = os.path.join(sg.dirname, outtitle)
        tmpfile = '{}.{}'.format(
            outfile.rsplit('.', 1)[0],
            infile.rsplit('.', 1)[1])
        if tmpfile == infile:
            tmpfile = '{}.tmp'.format(infile.rsplit('.', 1)[0])

        if (overwrite == 0):
            # If not overwritting the file, use the export folder
            outfile = os.path.join(exportFolder, outtitle)
            if debug:
                print 'overwrite is 0. outfile "{}"'.format(outfile)
        if os.path.isfile(outfile) or infile == outfile:
            # If outfile exists already, create a new name for the file.
            outfile = '{}-{}.{}'.format(
                outfile.rsplit('.', 1)[0],
                str(rec.starttime.strftime("%Y%m%d")), filetype)
        if os.path.isfile(tmpfile):
            # If the infile and tmpfile are the same, create a new name for the tmpfile
            tmpfile = '{}-{}.tmp'.format(
                outfile.rsplit('.', 1)[0],
                str(rec.starttime.strftime("%Y%m%d")))
        if os.path.isfile(tmpfile):
            # If tmp exists already, create a new name for the file.
            outfile = '{}-{}.tmp'.format(
                tmpfile.rsplit('.', 1)[0],
                str(rec.starttime.strftime("%Y%m%d")))
            if debug:
                print 'tmp exists. outfile "{}"'.format(outfile)
        if debug:
            print 'infile  "{}"'.format(infile)
            print 'tmpfile "{}"'.format(tmpfile)
            print 'outfile "{}"'.format(outfile)

        #add_metadata(db, jobid, debug, job, rec, filetype, tmpfile)

        clipped_bytes = 0
        # If selected, create a cutlist to remove commercials via mythtranscode by running:
        # mythutil --gencutlist --chanid $CHANID --starttime $STARTTIME
        if generate_commcutlist:
            if jobid:
                job.update({
                    'status':
                    job.RUNNING,
                    'comment':
                    'Generating Cutlist for commercial removal'
                })
            task = System(path='mythutil', db=db)
            try:
                output = task('--gencutlist', '--chanid "%s"' % chanid,
                              '--starttime "%s"' % starttime)
            except MythError, e:
                print 'Command "mythutil --gencutlist" failed with output:\n%s' % e.stderr
                if jobid:
                    job.update({
                        'status':
                        job.ERRORED,
                        'comment':
                        'Generation of commercial Cutlist failed'
                    })
                sys.exit(e.retcode)
Exemple #33
0
def runjob(jobid=None, chanid=None, starttime=None, tzoffset=None):
    global estimateBitrate
    db = MythDB()

    if jobid:
        job = Job(jobid, db=db)
        chanid = job.chanid
        utcstarttime = job.starttime
    else:
        print 'Job id not found.'
        sys.exit(1)

    if debug:
        print 'chanid "%s"' % chanid
        print 'utcstarttime "%s"' % utcstarttime

    rec = Recorded((chanid, utcstarttime), db=db)
    utcstarttime = rec.starttime
    starttime_datetime = utcstarttime

    # reformat 'starttime' for use with mythtranscode/ffmpeg/mythcommflag
    starttime = str(utcstarttime.utcisoformat().replace(u':', '').replace(
        u' ', '').replace(u'T', '').replace('-', ''))
    if debug:
        print 'mythtv format starttime "%s"' % starttime
    input_filesize = rec.filesize
    #sys.exit(e.retcode)

    print 'Transcoding. ' + rec.basename + " " + rec.storagegroup

    sg = findfile('/' + rec.basename, rec.storagegroup, db=db)
    if sg is None:
        print 'Local access to recording not found.'
        sys.exit(1)

    infile = os.path.join(sg.dirname, rec.basename)
    outfile = '%s.mp4' % os.path.join(sg.dirname, "proxy",
                                      rec.basename).rsplit('.', 1)[0]
    # outfile = '%s.mp4' % infile.rsplit('.',1)[0]

    framerate = 59.94

    clipped_bytes = 0

    duration_secs, e = get_duration(db, rec, transcoder, infile)
    abitrate_param = abitrate_param_HD  # preserve 5.1 audio

    if debug:
        print 'Audio bitrate parameter "%s"' % abitrate_param

    # Transcode to mp4
    #if jobid:
    #     job.update({'status':4, 'comment':'Transcoding to mp4'})

    # ffmpeg output is redirected to the temporary file tmpstatusfile and
    # a second thread continuously reads this file while
    # the transcode is in-process. see while loop below for the monitoring thread
    tf = tempfile.NamedTemporaryFile()
    tmpstatusfile = tf.name
    # tmpstatusfile = '/tmp/ffmpeg-transcode.txt'
    if debug:
        print 'Using temporary file "%s" for ffmpeg status updates.' % tmpstatusfile
    res = []
    # create a thread to perform the encode
    ipq = Queue.Queue()
    t = threading.Thread(target=wrapper,
                         args=(encode, (
                             jobid,
                             db,
                             job,
                             ipq,
                             preset,
                             vbitrate_param,
                             abitrate_param,
                             infile,
                             outfile,
                             tmpstatusfile,
                         ), res))
    t.start()
    # wait for ffmpeg to open the file and emit its initialization information
    # before we start the monitoring process
    time.sleep(1)
    # open the temporary file having the ffmeg output text and process it to generate status updates
    hangiter = 0
    with open(tmpstatusfile) as f:
        # read all the opening ffmpeg status/analysis lines
        lines = f.readlines()
        # set initial progress to -1
        prev_progress = -1
        framenum = 0
        fps = 1.0
        while t.is_alive():
            # read all output since last readline() call
            lines = f.readlines()
            if len(lines) > 0:
                # every ffmpeg output status line ends with a carriage return '\r'
                # split the last read line at these locations
                lines = lines[-1].split('\r')
                hangiter = 0
                if len(lines) > 1 and lines[-2].startswith('frame'):
                    # since typical reads will have the last line ending with \r the last status
                    # message is at index=[-2] start processing this line
                    # replace multiple spaces with one space
                    lines[-2] = re.sub(' +', ' ', lines[-2])
                    # remove any spaces after equals signs
                    lines[-2] = re.sub('= +', '=', lines[-2])
                    # split the fields at the spaces the first two fields for typical
                    # status lines will be framenum=XXXX and fps=YYYY parse the values
                    values = lines[-2].split(' ')
                    if len(values) > 1:
                        if debug:
                            print 'values %s' % values
                        prev_framenum = framenum
                        prev_fps = fps
                        try:
                            # framenum = current frame number being encoded
                            framenum = int(values[0].split('=')[1])
                            # fps = frames per second for the encoder
                            fps = float(values[1].split('=')[1])
                        except ValueError, e:
                            print 'ffmpeg status parse exception: "%s"' % e
                            framenum = prev_framenum
                            fps = prev_fps
                            pass
                    # progress = 0-100 represent percent complete for the transcode
                    progress = int(
                        (100 * framenum) / (duration_secs * framerate))
                    # eta_secs = estimated number of seconds until transcoding is complete
                    eta_secs = int(
                        (float(duration_secs * framerate) - framenum) / fps)
                    # pct_realtime = how many real seconds it takes to encode 1 second of video
                    pct_realtime = float(fps / framerate)
                    if debug:
                        print 'framenum = %d fps = %.2f' % (framenum, fps)
                    if progress != prev_progress:
                        if debug:
                            print 'Progress %d%% encoding %.1f frames per second ETA %d mins' \
                                  % ( progress, fps, float(eta_secs)/60)
                        if jobid:
                            progress_str = 'Transcoding to mp4 %d%% complete ETA %d mins fps=%.1f.' \
                                  % ( progress, float(eta_secs)/60, fps)
                            job.update({
                                'status': job.RUNNING,
                                'comment': progress_str
                            })
                        prev_progress = progress
                elif len(lines) > 1:
                    if debug:
                        print 'Read pathological output %s' % lines[-2]
            else:
                if debug:
                    print 'Read no lines of ffmpeg output for %s secs. Possible hang?' % (
                        POLL_INTERVAL * hangiter)
                hangiter = hangiter + 1
                if jobid:
                    progress_str = 'Read no lines of ffmpeg output for %s secs. Possible hang?' % (
                        POLL_INTERVAL * hangiter)
                    job.update({
                        'status': job.RUNNING,
                        'comment': progress_str
                    })
            time.sleep(POLL_INTERVAL)
        if debug:
            print 'res = "%s"' % res
Exemple #34
0
class VIDEO:
    def __init__(self, opts, jobid=None):
        if jobid:
            self.job = Job(jobid)
            self.chanid = self.job.chanid
            self.starttime = self.job.starttime
            self.job.update(status=3)
        else:
            self.job = None
            self.chanid = opts.chanid
            self.starttime = opts.starttime

        self.opts = opts
        self.db = MythDB()
        self.log = MythLog(module='mythvidexport.py', db=self.db)

        # load setting strings
        self.get_format()

        # prep objects
        self.rec = Recorded((self.chanid, self.starttime), db=self.db)
        self.log(MythLog.IMPORTANT, 'Using recording',
                        '%s - %s' % (self.rec.title, self.rec.subtitle))
        self.vid = Video(db=self.db).create({'title':'', 'filename':'', 'host':gethostname()})

        # process data
        self.get_meta()
        self.get_dest()
        # kludgy fix for issue with altered filenames in the bindings
        self.vid.markup._refdat = (self.vid.filename,)

        # save file
        self.copy()
        if opts.seekdata:
            self.copy_seek()
        if opts.skiplist:
            self.copy_markup(static.MARKUP.MARK_COMM_START,
                             static.MARKUP.MARK_COMM_END)
        if opts.cutlist:
            self.copy_markup(static.MARKUP.MARK_CUT_START,
                             static.MARKUP.MARK_CUT_END)
        self.vid.update()

    def get_format(self):
        host = self.db.gethostname()
        # TV Format
        if self.opts.tformat:
            self.tfmt = self.opts.tformat
        elif self.db.settings[host]['mythvideo.TVexportfmt']:
            self.tfmt = self.db.settings[host]['mythvideo.TVexportfmt']
        else:
            self.tfmt = 'Television/%TITLE%/Season %SEASON%/' + \
                            '%TITLE% - S%SEASON%E%EPISODEPAD% - %SUBTITLE%'

        # Movie Format
        if self.opts.mformat:
            self.mfmt = self.opts.mformat
        elif self.db.settings[host]['mythvideo.MOVIEexportfmt']:
            self.mfmt = self.db.settings[host]['mythvideo.MOVIEexportfmt']
        else:
            self.mfmt = 'Movies/%TITLE%'

        # Generic Format
        if self.opts.gformat:
            self.gfmt = self.opts.gformat
        elif self.db.settings[host]['mythvideo.GENERICexportfmt']:
            self.gfmt = self.db.settings[host]['mythvideo.GENERICexportfmt']
        else:
            self.gfmt = 'Videos/%TITLE%'

    def get_meta(self):
        self.vid.hostname = self.db.gethostname()

        if self.rec.subtitle:  # subtitle exists, assume tv show
            self.type = 'TV'
            self.log(self.log.IMPORTANT, 'Attempting TV export.')
            if self.opts.listingonly:
                self.log(self.log.IMPORTANT, 'Forcing listing data only.')
                self.get_generic(False)
                return
            grab = VideoGrabber(self.type)
            match = grab.sortedSearch(self.rec.title, self.rec.subtitle)
        else:  # assume movie
            self.type = 'MOVIE'
            self.log(self.log.IMPORTANT, 'Attempting Movie export.')
            if self.opts.listingonly:
                self.log(self.log.IMPORTANT, 'Forcing listing data only.')
                self.get_generic(False)
                return
            grab = VideoGrabber(self.type)
            match = grab.sortedSearch(self.rec.title)

        if len(match) == 0:
            # no match found
            self.log(self.log.IMPORTANT, 'Falling back to generic export.')
            self.get_generic()
        elif (len(match) > 1) & (match[0].levenshtein > 0):
            # multiple matches found, and closest is not exact
            self.vid.delete()
            raise MythError('Multiple metadata matches found: '\
                                                   + self.rec.title)
        else:
            self.log(self.log.IMPORTANT, 'Importing content from', match[0].inetref)
            self.vid.importMetadata(grab.grabInetref(match[0]))

    def get_generic(self, name_as_generic=True):
        self.vid.title = self.rec.title
        if self.rec.subtitle:
            self.vid.subtitle = self.rec.subtitle
        if self.rec.description:
            self.vid.plot = self.rec.description
        if self.rec.originalairdate:
            self.vid.year = self.rec.originalairdate.year
            self.vid.releasedate = self.rec.originalairdate
        lsec = (self.rec.endtime - self.rec.starttime).seconds
        self.vid.length = str(lsec / 60)
        for member in self.rec.cast:
            if member.role == 'director':
                self.vid.director = member.name
            elif member.role == 'actor':
                self.vid.cast.append(member.name)
        if name_as_generic:
            self.type = 'GENERIC'

    def get_dest(self):
        if self.type == 'TV':
            self.vid.filename = self.process_fmt(self.tfmt)
        elif self.type == 'MOVIE':
            self.vid.filename = self.process_fmt(self.mfmt)
        elif self.type == 'GENERIC':
            self.vid.filename = self.process_fmt(self.gfmt)

    def process_fmt(self, fmt):
        # replace fields from viddata
        # print self.vid.data
        ext = '.' + self.rec.basename.rsplit('.', 1)[1]
        rep = (('%TITLE%', 'title', '%s'), ('%SUBTITLE%', 'subtitle', '%s'),
            ('%SEASON%', 'season', '%d'), ('%SEASONPAD%', 'season', '%02d'),
            ('%EPISODE%', 'episode', '%d'), ('%EPISODEPAD%', 'episode', '%02d'),
            ('%YEAR%', 'year', '%s'), ('%DIRECTOR%', 'director', '%s'))
        for tag, data, format in rep:
            if self.vid[data]:
                fmt = fmt.replace(tag, format % self.vid[data])
            else:
                fmt = fmt.replace(tag, '')

        # replace fields from program data
        rep = (('%HOSTNAME', 'hostname', '%s'), ('%STORAGEGROUP%', 'storagegroup', '%s'))
        for tag, data, format in rep:
            data = getattr(self.rec, data)
            fmt = fmt.replace(tag, format % data)

#       fmt = fmt.replace('%CARDID%',self.rec.cardid)
#       fmt = fmt.replace('%CARDNAME%',self.rec.cardid)
#       fmt = fmt.replace('%SOURCEID%',self.rec.cardid)
#       fmt = fmt.replace('%SOURCENAME%',self.rec.cardid)
#       fmt = fmt.replace('%CHANNUM%',self.rec.channum)
#       fmt = fmt.replace('%CHANNAME%',self.rec.cardid)

        if len(self.vid.genre):
            fmt = fmt.replace('%GENRE%', self.vid.genre[0].genre)
        else:
            fmt = fmt.replace('%GENRE%', '')
#       if len(self.country):
#           fmt = fmt.replace('%COUNTRY%',self.country[0])
#       else:
#           fmt = fmt.replace('%COUNTRY%','')
        return fmt + ext

    def copy(self):
        stime = time.time()
        srcsize = self.rec.filesize
        htime = [stime, stime, stime, stime]

        self.log.log(MythLog.IMPORTANT | MythLog.FILE, "Copying myth://%s@%s/%s"\
               % (self.rec.storagegroup, self.rec.hostname, self.rec.basename)\
                                                    + " to myth://Videos@%s/%s"\
                                          % (self.vid.host, self.vid.filename))
        srcfp = self.rec.open('r')
        dstfp = self.vid.open('w')

        if self.job:
            self.job.setStatus(4)
        tsize = 2 ** 24
        while tsize == 2 ** 24:
            tsize = min(tsize, srcsize - dstfp.tell())
            dstfp.write(srcfp.read(tsize))
            htime.append(time.time())
            rate = float(tsize * 4) / (time.time() - htime.pop(0))
            remt = (srcsize - dstfp.tell()) / rate
            if self.job:
                self.job.setComment("%02d%% complete - %d seconds remaining" % \
                            (dstfp.tell() * 100 / srcsize, remt))
        srcfp.close()
        dstfp.close()

        self.vid.hash = self.vid.getHash()

        self.log(MythLog.IMPORTANT | MythLog.FILE, "Transfer Complete",
                            "%d seconds elapsed" % int(time.time() - stime))
        if self.job:
            self.job.setComment("Complete - %d seconds elapsed" % \
                            (int(time.time() - stime)))
            self.job.setStatus(256)

    def copy_seek(self):
        for seek in self.rec.seek:
            self.vid.markup.add(seek.mark, seek.offset, seek.type)

    def copy_markup(self, start, stop):
        for mark in self.rec.markup:
            if mark.type in (start, stop):
                self.vid.markup.add(mark.mark, 0, mark.type)
Exemple #35
0
class VIDEO:
    def __init__(self, opts, jobid=None):

        # Setup for the job to run
        if jobid:
            self.thisJob = Job(jobid)
            self.chanID = self.thisJob.chanid
            self.startTime = self.thisJob.starttime
            self.thisJob.update(status=Job.STARTING)

        # If no job ID given, must be a command line run
        else:
            self.thisJob = jobid
            self.chanID = opts.chanid
            self.startTime = opts.startdate + " " + opts.starttime + opts.offset
        self.opts = opts
        self.type = "none"
        self.db = MythDB()
        self.log = MythLog(module='Myth-Rec-to-Vid.py', db=self.db)

        # Capture the backend host name
        self.host = self.db.gethostname()

        # prep objects
        self.rec = Recorded((self.chanID, self.startTime), db=self.db)
        self.log(
            MythLog.GENERAL, MythLog.INFO, 'Using recording',
            '%s - %s' % (self.rec.title.encode('utf-8'),
                         self.rec.subtitle.encode('utf-8')))

        self.vid = Video(db=self.db).create({
            'title': '',
            'filename': '',
            'host': self.host
        })

        self.bend = MythBE(db=self.db)

    def check_hash(self):
        self.log(self.log.GENERAL, self.log.INFO,
                 'Performing copy validation.')
        srchash = self.bend.getHash(self.rec.basename, self.rec.storagegroup)
        dsthash = self.bend.getHash(self.vid.filename, 'Videos')
        if srchash != dsthash:
            return False
        else:
            return True

    def copy(self):
        stime = time.time()
        srcsize = self.rec.filesize
        htime = [stime, stime, stime, stime]

        self.log(MythLog.GENERAL|MythLog.FILE, MythLog.INFO, "Copying myth://%s@%s/%s"\
               % (self.rec.storagegroup, self.rec.hostname, self.rec.basename)\
                                                    +" to myth://Videos@%s/%s"\
                                          % (self.host, self.vid.filename))

        srcfp = self.rec.open('r')
        dstfp = self.vid.open('w')

        if self.thisJob:
            self.set_job_status(Job.RUNNING)
        tsize = 2**24
        while tsize == 2**24:
            tsize = min(tsize, srcsize - dstfp.tell())
            dstfp.write(srcfp.read(tsize))
            htime.append(time.time())
            rate = float(tsize * 4) / (time.time() - htime.pop(0))
            remt = (srcsize - dstfp.tell()) / rate
            if self.thisJob:
                self.thisJob.setComment("%02d%% complete - %d seconds remaining" %\
                                      (dstfp.tell()*100/srcsize, remt))
        srcfp.close()
        dstfp.close()

        self.vid.hash = self.vid.getHash()

        self.log(MythLog.GENERAL | MythLog.FILE, MythLog.INFO,
                 "Transfer Complete",
                 "%d seconds elapsed" % int(time.time() - stime))

        if self.thisJob:
            self.thisJob.setComment("Complete - %d seconds elapsed" % \
               (int(time.time()-stime)))

    def copy_markup(self, start, stop):
        for mark in self.rec.markup:
            if mark.type in (start, stop):
                self.vid.markup.add(mark.mark, 0, mark.type)

    def copy_seek(self):
        for seek in self.rec.seek:
            self.vid.markup.add(seek.mark, seek.offset, seek.type)

    def delete_vid(self):
        self.vid.delete()

    def delete_rec(self):
        self.rec.delete()

    def dup_check(self):
        self.log(MythLog.GENERAL, MythLog.INFO, 'Processing new file name ',
                 '%s' % (self.vid.filename))
        self.log(
            MythLog.GENERAL, MythLog.INFO, 'Checking for duplication of ',
            '%s - %s' % (self.rec.title.encode('utf-8'),
                         self.rec.subtitle.encode('utf-8')))
        if self.bend.fileExists(self.vid.filename, 'Videos'):
            self.log(MythLog.GENERAL, MythLog.INFO,
                     'Recording already exists in Myth Videos')
            if self.thisJob:
                self.thisJob.setComment(
                    "Action would result in duplicate entry")
            return True

        else:
            self.log(
                MythLog.GENERAL, MythLog.INFO, 'No duplication found for ',
                '%s - %s' % (self.rec.title.encode('utf-8'),
                             self.rec.subtitle.encode('utf-8')))
            return False

    def get_dest(self):
        if self.type == 'TV':
            self.vid.filename = self.process_fmt(TVFMT)
        elif self.type == 'MOVIE':
            self.vid.filename = self.process_fmt(MVFMT)

        self.vid.markup._refdat = (self.vid.filename, )

    def get_meta(self):
        import_info = 'Listing only MetaData import complete'
        metadata = self.rec.exportMetadata()
        yrInfo = self.rec.getProgram()
        metadata['year'] = yrInfo.get('year')
        self.vid.importMetadata(metadata)
        if self.type == 'MOVIE':
            grab = VideoGrabber('Movie')
            results = grab.sortedSearch(self.rec.title)
            if len(results) > 0:
                for i in results:
                    if i.year == yrInfo.get(
                            'year') and i.title == self.rec.get('title'):
                        self.vid.importMetadata(i)
                        match = grab.grabInetref(i.get('inetref'))
                        length = len(match.people)
                        for p in range(length - 2):
                            self.vid.cast.add(match.people[p].get('name'))
                        self.vid.director = match.people[length -
                                                         1].get('name')
                        import_info = 'Full MetaData Import complete'
        else:
            grab = VideoGrabber('TV')
            results = grab.sortedSearch(self.rec.title, self.rec.subtitle)
            if len(results) > 0:
                for i in results:
                    if i.title == self.rec.get(
                            'title') and i.subtitle == self.rec.get(
                                'subtitle'):
                        self.vid.importMetadata(i)
                        match = grab.grabInetref(grab.grabInetref(i.get('inetref'), \
                                season=i.get('season'),episode=i.get('episode')))
                        length = len(match.people)
                        for p in range(length - 2):
                            self.vid.cast.add(match.people[p].get('name'))
                        self.vid.director = match.people[length -
                                                         1].get('name')
                        import_info = 'Full MetaData Import complete'

        self.vid.category = self.rec.get('category')

        self.log(self.log.GENERAL, self.log.INFO, import_info)

    def get_type(self):
        if self.rec.seriesid != None and self.rec.programid[:2] != 'MV':
            self.type = 'TV'
            self.log(self.log.GENERAL, self.log.INFO,
                     'Performing TV type migration.')
        else:
            self.type = 'MOVIE'
            self.log(self.log.GENERAL, self.log.INFO,
                     'Performing Movie type migration.')

    def process_fmt(self, fmt):
        # replace fields from viddata

        ext = '.' + self.rec.basename.rsplit('.', 1)[1]
        rep = (('%TITLE%', 'title', '%s'), ('%SUBTITLE%', 'subtitle', '%s'),
               ('%SEASON%', 'season', '%d'), ('%SEASONPAD%', 'season', '%02d'),
               ('%EPISODE%', 'episode', '%d'), ('%EPISODEPAD%', 'episode',
                                                '%02d'),
               ('%YEAR%', 'year', '%s'), ('%DIRECTOR%', 'director', '%s'))
        for tag, data, format in rep:
            if self.vid[data]:
                fmt = fmt.replace(tag, format % self.vid[data])
            else:
                fmt = fmt.replace(tag, '')

        # replace fields from program data
        rep = (('%HOSTNAME%', 'hostname', '%s'), ('%STORAGEGROUP%',
                                                  'storagegroup', '%s'))
        for tag, data, format in rep:
            data = getattr(self.rec, data)
            fmt = fmt.replace(tag, format % data)

        if len(self.vid.genre):
            fmt = fmt.replace('%GENRE%', self.vid.genre[0].genre)
        else:
            fmt = fmt.replace('%GENRE%', '')
        return fmt + ext

    def set_job_status(self, status):
        self.thisJob.setStatus(status)

    def set_vid_hash(self):
        self.vid.hash = self.vid.getHash()

    def update_vid(self):
        self.vid.update()
class VIDEO:
    def __init__(self, opts, jobid=None):
        if jobid:
            self.job = Job(jobid)
            self.chanid = self.job.chanid
            self.starttime = self.job.starttime
            self.job.update(status=Job.STARTING)
        else:
            self.job = None
            self.chanid = opts.chanid
            self.starttime = opts.starttime

        self.opts = opts
        self.db = MythDB()
        self.log = MythLog(module='mythvidexport.py', db=self.db)

        # load setting strings
        self.get_format()

        # prep objects
        self.rec = Recorded((self.chanid,self.starttime), db=self.db)
        self.log(MythLog.GENERAL, MythLog.INFO, 'Using recording',
                        '%s - %s' % (self.rec.title, self.rec.subtitle))
        self.vid = Video(db=self.db).create({'title':'', 'filename':'',
                                             'host':gethostname()})

        # process data
        self.get_meta()
        self.get_dest()
        # bug fix to work around limitation in the bindings where DBDataRef classes
        # are mapped to the filename at time of Video element creation. since the
        # filename is specified as blank when the video is created, the markup
        # handler is not properly initialized
        self.vid.markup._refdat = (self.vid.filename,)

        # save file
        self.copy()
        if opts.seekdata:
            self.copy_seek()
        if opts.skiplist:
            self.copy_markup(static.MARKUP.MARK_COMM_START,
                             static.MARKUP.MARK_COMM_END)
        if opts.cutlist:
            self.copy_markup(static.MARKUP.MARK_CUT_START,
                             static.MARKUP.MARK_CUT_END)
        self.vid.update()

        # delete old file
        if opts.delete:
            self.rec.delete()

    def get_format(self):
        host = self.db.gethostname()
        # TV Format
        if self.opts.tformat:
            self.tfmt = self.opts.tformat
        elif self.db.settings[host]['mythvideo.TVexportfmt']:
            self.tfmt = self.db.settings[host]['mythvideo.TVexportfmt']
        else:
            self.tfmt = 'Television/%TITLE%/Season %SEASON%/'+\
                            '%TITLE% - S%SEASON%E%EPISODEPAD% - %SUBTITLE%'

        # Movie Format
        if self.opts.mformat:
            self.mfmt = self.opts.mformat
        elif self.db.settings[host]['mythvideo.MOVIEexportfmt']:
            self.mfmt = self.db.settings[host]['mythvideo.MOVIEexportfmt']
        else:
            self.mfmt = 'Movies/%TITLE%'

        # Generic Format
        if self.opts.gformat:
            self.gfmt = self.opts.gformat
        elif self.db.settings[host]['mythvideo.GENERICexportfmt']:
            self.gfmt = self.db.settings[host]['mythvideo.GENERICexportfmt']
        else:
            self.gfmt = 'Videos/%TITLE%'

    def get_meta(self):
        self.vid.hostname = self.db.gethostname()
        if self.rec.inetref:
            # good data is available, use it
            if self.rec.season is not None:
                self.log(self.log.GENERAL, self.log.INFO,
                        'Performing TV export with local data.')
                self.type = 'TV'
            else:
                self.log(self.log.GENERAL, self.log.INFO,
                        'Performing Movie export with local data.')
                self.type = 'MOVIE'
            metadata = self.rec.exportMetadata()
        elif self.opts.listingonly:
            # force use of local data
            if self.rec.subtitle:
                self.log(self.log.GENERAL, self.log.INFO,
                        'Forcing TV export with local data.')
                self.type = 'TV'
            else:
                self.log(self.log.GENERAL, self.log.INFO,
                        'Forcing Movie export with local data.')
                self.type = 'MOVIE'
            metadata = self.rec.exportMetadata()
        else:
            if self.rec.subtitle:
                # subtitle exists, assume tv show
                self.type = 'TV'
                self.log(self.log.GENERAL, self.log.INFO,
                        'Attempting TV export.')
                grab = VideoGrabber(self.type)
                match = grab.sortedSearch(self.rec.title, self.rec.subtitle)
            else:                   # assume movie
                self.type = 'MOVIE'
                self.log(self.log.GENERAL, self.log.INFO,
                        'Attempting Movie export.')
                grab = VideoGrabber(self.type)
                match = grab.sortedSearch(self.rec.title)

            if len(match) == 0:
                # no match found
                self.log(self.log.GENERAL, self.log.INFO,
                        'Falling back to generic export.')
                self.type = 'GENERIC'
                metadata = self.rec.exportMetadata()
            elif (len(match) > 1) & (match[0].levenshtein > 0):
                # multiple matches found, and closest is not exact
                self.vid.delete()
                raise MythError('Multiple metadata matches found: '\
                                                   +self.rec.title)
            else:
                self.log(self.log.GENERAL, self.log.INFO,
                        'Importing content from', match[0].inetref)
                metadata = grab.grabInetref(match[0])

        self.vid.importMetadata(metadata)
        self.log(self.log.GENERAL, self.log.INFO, 'Import complete')

    def get_dest(self):
        if self.type == 'TV':
            self.vid.filename = self.process_fmt(self.tfmt)
        elif self.type == 'MOVIE':
            self.vid.filename = self.process_fmt(self.mfmt)
        elif self.type == 'GENERIC':
            self.vid.filename = self.process_fmt(self.gfmt)

    def process_fmt(self, fmt):
        # replace fields from viddata
        #print self.vid.data
        ext = '.'+self.rec.basename.rsplit('.',1)[1]
        rep = ( ('%TITLE%','title','%s'),   ('%SUBTITLE%','subtitle','%s'),
            ('%SEASON%','season','%d'),     ('%SEASONPAD%','season','%02d'),
            ('%EPISODE%','episode','%d'),   ('%EPISODEPAD%','episode','%02d'),
            ('%YEAR%','year','%s'),         ('%DIRECTOR%','director','%s'))
        for tag, data, format in rep:
            if self.vid[data]:
                fmt = fmt.replace(tag,format % self.vid[data])
            else:
                fmt = fmt.replace(tag,'')

        # replace fields from program data
        rep = ( ('%HOSTNAME%',    'hostname',    '%s'),
                ('%STORAGEGROUP%','storagegroup','%s'))
        for tag, data, format in rep:
            data = getattr(self.rec, data)
            fmt = fmt.replace(tag,format % data)

#       fmt = fmt.replace('%CARDID%',self.rec.cardid)
#       fmt = fmt.replace('%CARDNAME%',self.rec.cardid)
#       fmt = fmt.replace('%SOURCEID%',self.rec.cardid)
#       fmt = fmt.replace('%SOURCENAME%',self.rec.cardid)
#       fmt = fmt.replace('%CHANNUM%',self.rec.channum)
#       fmt = fmt.replace('%CHANNAME%',self.rec.cardid)

        if len(self.vid.genre):
            fmt = fmt.replace('%GENRE%',self.vid.genre[0].genre)
        else:
            fmt = fmt.replace('%GENRE%','')
#       if len(self.country):
#           fmt = fmt.replace('%COUNTRY%',self.country[0])
#       else:
#           fmt = fmt.replace('%COUNTRY%','')
        return fmt+ext

    def copy(self):
        stime = time.time()
        srcsize = self.rec.filesize
        htime = [stime,stime,stime,stime]

        self.log(MythLog.GENERAL|MythLog.FILE, MythLog.INFO, "Copying myth://%s@%s/%s"\
               % (self.rec.storagegroup, self.rec.hostname, self.rec.basename)\
                                                    +" to myth://Videos@%s/%s"\
                                          % (self.vid.host, self.vid.filename))
        srcfp = self.rec.open('r')
        dstfp = self.vid.open('w')

        if self.job:
            self.job.setStatus(Job.RUNNING)
        tsize = 2**24
        while tsize == 2**24:
            tsize = min(tsize, srcsize - dstfp.tell())
            dstfp.write(srcfp.read(tsize))
            htime.append(time.time())
            rate = float(tsize*4)/(time.time()-htime.pop(0))
            remt = (srcsize-dstfp.tell())/rate
            if self.job:
                self.job.setComment("%02d%% complete - %d seconds remaining" %\
                            (dstfp.tell()*100/srcsize, remt))
        srcfp.close()
        dstfp.close()

        self.vid.hash = self.vid.getHash()

        self.log(MythLog.GENERAL|MythLog.FILE, MythLog.INFO, "Transfer Complete",
                            "%d seconds elapsed" % int(time.time()-stime))

        if self.opts.reallysafe:
            if self.job:
                self.job.setComment("Checking file hashes")
            self.log(MythLog.GENERAL|MythLog.FILE, MythLog.INFO, "Checking file hashes.")
            srchash = hashfile(self.rec.open('r'))
            dsthash = hashfile(self.rec.open('r'))
            if srchash != dsthash:
                raise MythError('Source hash (%s) does not match destination hash (%s)' \
                            % (srchash, dsthash))
        elif self.opts.safe:
            self.log(MythLog.GENERAL|MythLog.FILE, MythLog.INFO, "Checking file sizes.")
            be = MythBE(db=self.vid._db)
            try:
                srcsize = be.getSGFile(self.rec.hostname, self.rec.storagegroup, \
                                       self.rec.basename)[1]
                dstsize = be.getSGFile(self.vid.host, 'Videos', self.vid.filename)[1]
            except:
                raise MythError('Could not query file size from backend')
            if srcsize != dstsize:
                raise MythError('Source size (%d) does not match destination size (%d)' \
                            % (srcsize, dstsize))

        if self.job:
            self.job.setComment("Complete - %d seconds elapsed" % \
                            (int(time.time()-stime)))
            self.job.setStatus(Job.FINISHED)

    def copy_seek(self):
        for seek in self.rec.seek:
            self.vid.markup.add(seek.mark, seek.offset, seek.type)

    def copy_markup(self, start, stop):
        for mark in self.rec.markup:
            if mark.type in (start, stop):
                self.vid.markup.add(mark.mark, 0, mark.type)
class VIDEO:
    def __init__(self, opts, jobid=None):                           
        
        # Setup for the job to run
        if jobid:
            self.thisJob = Job(jobid)
            self.chanID = self.thisJob.chanid
            self.startTime = self.thisJob.starttime
            self.thisJob.update(status=Job.STARTING)
        
        # If no job ID given, must be a command line run
        else:
            self.thisJob = jobid
            self.chanID = opts.chanid
            self.startTime = opts.startdate + " " + opts.starttime + opts.offset
        self.opts = opts
        self.type = "none"
        self.db = MythDB()
        self.log = MythLog(module='Myth-Rec-to-Vid.py', db=self.db)
        

        # Capture the backend host name
        self.host = self.db.gethostname()

        # prep objects
        self.rec = Recorded((self.chanID,self.startTime), db=self.db)
        self.log(MythLog.GENERAL, MythLog.INFO, 'Using recording',
                        '%s - %s' % (self.rec.title.encode('utf-8'), 
                                     self.rec.subtitle.encode('utf-8')))

        self.vid = Video(db=self.db).create({'title':'', 'filename':'',
                                             'host':self.host})

        self.bend = MythBE(db=self.db)
        
        
    def check_hash(self):
        self.log(self.log.GENERAL, self.log.INFO,
                 'Performing copy validation.')
        srchash = self.bend.getHash(self.rec.basename, self.rec.storagegroup)
        dsthash = self.bend.getHash(self.vid.filename, 'Videos')
        if srchash != dsthash:
            return False
        else:
            return True
               
    def copy(self):
        stime = time.time()
        srcsize = self.rec.filesize
        htime = [stime,stime,stime,stime]

        self.log(MythLog.GENERAL|MythLog.FILE, MythLog.INFO, "Copying myth://%s@%s/%s"\
               % (self.rec.storagegroup, self.rec.hostname, self.rec.basename)\
                                                    +" to myth://Videos@%s/%s"\
                                          % (self.host, self.vid.filename))
        
 
        srcfp = self.rec.open('r')
        dstfp = self.vid.open('w')

        if self.thisJob:
            self.set_job_status(Job.RUNNING)
        tsize = 2**24
        while tsize == 2**24:
            tsize = min(tsize, srcsize - dstfp.tell())
            dstfp.write(srcfp.read(tsize))
            htime.append(time.time())
            rate = float(tsize*4)/(time.time()-htime.pop(0))
            remt = (srcsize-dstfp.tell())/rate
            if self.thisJob:
                self.thisJob.setComment("%02d%% complete - %d seconds remaining" %\
                                      (dstfp.tell()*100/srcsize, remt))
        srcfp.close()
        dstfp.close()
        
        self.vid.hash = self.vid.getHash()
        
        self.log(MythLog.GENERAL|MythLog.FILE, MythLog.INFO, "Transfer Complete",
        			      "%d seconds elapsed" % int(time.time()-stime))

        if self.thisJob:
            self.thisJob.setComment("Complete - %d seconds elapsed" % \
        	      (int(time.time()-stime)))

    def copy_markup(self, start, stop):
        for mark in self.rec.markup:
            if mark.type in (start, stop):
                self.vid.markup.add(mark.mark, 0, mark.type)

    def copy_seek(self):
        for seek in self.rec.seek:
            self.vid.markup.add(seek.mark, seek.offset, seek.type)
                
    def delete_vid(self):
        self.vid.delete()
        
    def delete_rec(self):
        self.rec.delete()
        
    def dup_check(self):
        self.log(MythLog.GENERAL, MythLog.INFO, 'Processing new file name ',
                    '%s' % (self.vid.filename))
        self.log(MythLog.GENERAL, MythLog.INFO, 'Checking for duplication of ',
                    '%s - %s' % (self.rec.title.encode('utf-8'), 
                                 self.rec.subtitle.encode('utf-8')))
        if self.bend.fileExists(self.vid.filename, 'Videos'):
            self.log(MythLog.GENERAL, MythLog.INFO, 'Recording already exists in Myth Videos')
            if self.thisJob:
                self.thisJob.setComment("Action would result in duplicate entry" )
            return True
          
        else:
            self.log(MythLog.GENERAL, MythLog.INFO, 'No duplication found for ',
                    '%s - %s' % (self.rec.title.encode('utf-8'), 
                                 self.rec.subtitle.encode('utf-8')))
            return False 

    def get_dest(self):
        if self.type == 'TV':
            self.vid.filename = self.process_fmt(TVFMT)
        elif self.type == 'MOVIE':
            self.vid.filename = self.process_fmt(MVFMT)
        
        self.vid.markup._refdat = (self.vid.filename,)

    def get_meta(self):
        import_info = 'Listing only MetaData import complete'
        metadata = self.rec.exportMetadata()
        yrInfo = self.rec.getProgram()
        metadata['year'] = yrInfo.get('year')
        self.vid.importMetadata(metadata)
        if self.type == 'MOVIE':
            grab = VideoGrabber('Movie')
            results = grab.sortedSearch(self.rec.title)
            if len(results) > 0:
                for i in results:
                    if i.year == yrInfo.get('year') and i.title == self.rec.get('title'):
                        self.vid.importMetadata(i)
                        match = grab.grabInetref(i.get('inetref'))
                        length = len(match.people)
                        for p in range(length-2):
                            self.vid.cast.add(match.people[p].get('name'))
                        self.vid.director = match.people[length - 1].get('name')
                        import_info = 'Full MetaData Import complete'
        else:
            grab = VideoGrabber('TV')
            results = grab.sortedSearch(self.rec.title, self.rec.subtitle)
            if len(results) > 0:
                for i in results:
                    if  i.title == self.rec.get('title') and i.subtitle == self.rec.get('subtitle'):
                        self.vid.importMetadata(i)
                        match = grab.grabInetref(grab.grabInetref(i.get('inetref'), \
                                season=i.get('season'),episode=i.get('episode')))
                        length = len(match.people)
                        for p in range(length-2):
                            self.vid.cast.add(match.people[p].get('name'))
                        self.vid.director = match.people[length - 1].get('name')
                        import_info = 'Full MetaData Import complete'
            
        
        self.vid.category = self.rec.get('category')

        self.log(self.log.GENERAL, self.log.INFO, import_info)

    def get_type(self):
        if self.rec.seriesid != None and self.rec.programid[:2] != 'MV':
            self.type = 'TV'
            self.log(self.log.GENERAL, self.log.INFO,
                    'Performing TV type migration.')
        else:
            self.type = 'MOVIE'
            self.log(self.log.GENERAL, self.log.INFO,
                    'Performing Movie type migration.')

    def process_fmt(self, fmt):
        # replace fields from viddata

        ext = '.'+self.rec.basename.rsplit('.',1)[1]
        rep = ( ('%TITLE%','title','%s'),   ('%SUBTITLE%','subtitle','%s'),
            ('%SEASON%','season','%d'),     ('%SEASONPAD%','season','%02d'),
            ('%EPISODE%','episode','%d'),   ('%EPISODEPAD%','episode','%02d'),
            ('%YEAR%','year','%s'),         ('%DIRECTOR%','director','%s'))
        for tag, data, format in rep:
            if self.vid[data]:
                fmt = fmt.replace(tag,format % self.vid[data])
            else:
                fmt = fmt.replace(tag,'')

        # replace fields from program data
        rep = ( ('%HOSTNAME%',    'hostname',    '%s'),
                ('%STORAGEGROUP%','storagegroup','%s'))
        for tag, data, format in rep:
            data = getattr(self.rec, data)
            fmt = fmt.replace(tag,format % data)


        if len(self.vid.genre):
            fmt = fmt.replace('%GENRE%',self.vid.genre[0].genre)
        else:
            fmt = fmt.replace('%GENRE%','')
        return fmt+ext

    def set_job_status(self, status):
        self.thisJob.setStatus(status)
        
    def set_vid_hash(self):
        self.vid.hash = self.vid.getHash()
    
    def update_vid(self):
        self.vid.update()