Example #1
0
    def test_post_render_actions(self):
        self.hbatch_farm = Houdini.HbatchFarm(self.farm, self.rop)
        job_name         = self.hbatch_farm.parms['job_name'] + "_mantra"
        self.mantra_farm = Houdini.MantraFarm(self.farm, self.rop, job_name)

        posts = Houdini.post_render_actions(self.farm, [self.hbatch_farm, self.mantra_farm], '3d')
        self.assertTrue(len(posts), 3)
        debuger, merger, moviem = posts

        # mp4:
        image       = utils.padding(self.rop.parm('vm_picture').eval(), 'nuke')[0]
        base, ext   = os.path.splitext(image)
        path, file  = os.path.split(base)
        path        = os.path.join(path,  const.PROXY_POSTFIX)
        proxy       = os.path.join(path, file +'.jpg')
    
        self.assertTrue(proxy in moviem.parms['command_arg'][0])
        self.assertEqual('ffmpeg ', moviem.parms['command'])
        self.assertTrue(moviem.parms['start_frame'] == moviem.parms['end_frame'] == 1)

        # merger
        image      = utils.padding(self.rop.parm('vm_picture').eval(), 'shell')[0]
        path, file = os.path.split(image)
        path       = os.path.join(path, const.DEBUG_POSTFIX)
        report     = os.path.join(path, file + '.json')

        self.assertEqual(report, merger.parms['scene_file'])
        self.assertTrue('$HAFARM_HOME/scripts/generate_render_report.py' in merger.parms['command'])
        self.assertTrue(merger.parms['start_frame'] == merger.parms['end_frame'] == 1)
               
        #debuger
        self.assertTrue('$HAFARM_HOME/scripts/debug_images.py' in debuger.parms['command'])
        self.assertTrue(const.TASK_ID_PADDED in debuger.parms['scene_file'])
        self.assertEqual(debuger.parms['start_frame'], self.rop.parm('f1').eval())
        self.assertEqual(debuger.parms['end_frame'], self.rop.parm('f2').eval())
Example #2
0
 def make_movie(self, filename):
     '''Make a movie from custom files. 
     '''
     # Input filename with proxy correction:
     details = utils.padding(filename, 'nuke')
     base, file = os.path.split(details[0])
     file, ext = os.path.splitext(file)
     inputfile = os.path.join(base, const.PROXY_POSTFIX, file + '.jpg')
     outputfile = os.path.join(base, utils.padding(filename)[0] + 'mp4')
     command = "-y -r 25 -i %s -an -vcodec libx264 -vpre slow -crf 26 -threads 1 %s" % (
         inputfile, outputfile)
     self.parms['command'] = 'ffmpeg '
     self.parms['command_arg'] = [command]
     self.parms['start_frame'] = 1
     self.parms['end_frame'] = 1
def merge_reports(db, reports):
    """ Instead of images to analize, use previously generated data in *.json format,
        and merge them to produce single report.
    """
    # TODO Partial report should have freedom to keep single or group of frames...
    # Whole report database should be little more generic, not hard coded.
    first = 1
    last = 1
    keys = []
    # We want to retrieve data from segments:
    if not 'file_sizes' in db.keys():
        db['file_sizes'] = []
    if not 'missing_frames' in db.keys():
        db['missing_frames'] = []

    for frame in range(len(reports)):
        file = open(reports[frame])
        data = json.load(file)
        for key in data['frames']:
            db['frames'][int(key)] = data['frames'][
                key]  # Json turns any key into string.
            db['file_sizes'] += data['file_sizes']
            db['missing_frames'] += data['missing_frames']
            keys.append(int(key))

    keys.sort()
    db['first_frame'] = keys[0]
    db['last_frame'] = keys[-1]
    db['pattern'] = utils.padding(data['pattern'], 'shell')[0]
    db['job_name'] = data['job_name']

    return db
Example #4
0
 def iinfo_images(self, filename):
     '''By using iinfo utility inspect filename (usually renders).
     '''
     details = utils.padding(filename, 'shell')
     self.parms['command'] = const.IINFO
     self.parms['command_arg'] = [
         '`ls %s | grep -v "%s" ` | grep File ' %
         (details[0], const.TILE_ID)
     ]
     self.parms['start_frame'] = 1
     self.parms['end_frame'] = 1
     self.parms['email_stdout'] = True
Example #5
0
def mantra_render_from_ifd(node, frames, job_name=None):
    """Separated path for renderig directly from provided ifd files."""
    import glob
    mantra_frames = []

    # Params from hafarm node:
    ifds  = node.parm("ifd_files").eval()
    start = node.parm("ifd_range1").eval() #TODO make automatic range detection
    end   = node.parm("ifd_range2").eval() #TODO as above

    # Rediscover ifds:
    # FIXME: should be simple unexpandedString()
    seq_details = utils.padding(ifds)

    #job name = ifd file name + current ROP name.
    if not job_name:
        job_name = os.path.split(seq_details[0])[1] + "from" + node.name()

    # Find real file sequence on disk. Param could have $F4...
    real_ifds = glob.glob(seq_details[0] + "*" + seq_details[-1])

    # No ifds found:
    if not real_ifds: 
        print "Can't find ifds files: %s" % ifds
        return []

    if not frames:
        mantra_farm = MantraFarm(node, '', job_name)
        mantra_farm.parms['start_frame'] = node.parm("ifd_range1").eval() #TODO make automatic range detection
        mantra_farm.parms['end_frame']   = node.parm("ifd_range2").eval() #TODO as above
        mantra_farm.parms['step_frame']  = node.parm("ifd_range3").eval()
        mantra_farm.parms['scene_file']  = seq_details[0] + const.TASK_ID + '.ifd'
        mantra_frames.append(mantra_farm)

    # Proceed with farme list:
    else:
        for frame in frames:
            mantra_farm = MantraFarm(node, '', job_name+str(frame))
            mantra_farm.parms['start_frame']  = frame
            mantra_farm.parms['end_frame']    = frame
            mantra_farm.parms['scene_file']  = seq_details[0] + const.TASK_ID + '.ifd'
            mantra_frames.append(mantra_farm)


    # Detect output image. Uses grep ray_image on ifd file:
    image = utils.get_ray_image_from_ifd(real_ifds[0])
    for frame in mantra_frames:
        frame.parms['output_picture'] = image 

    return mantra_frames
def get_ifd_stats(job_name, ifd_path):
    stats = {}
    stats['frames'] = {}
    # Lets find our ifd files:
    job_name = job_name.rstrip("_mantra")
    pattern = os.path.join(ifd_path, job_name)
    pattern += ".*.ifd"
    ifds = glob.glob(pattern)
    ifds.sort()
    for ifd in ifds:
        seq = utils.padding(ifd)
        stats['frames'][seq[1]] = {}
        size = os.path.getsize(ifd)
        stats['frames'][seq[1]]['ifd_size'] = size
    return stats
Example #7
0
    def join_tiles(self, filename, start, end, ntiles):
        '''Creates a command specificly for merging tiled rendering with oiiotool.'''

        # Retrive full frame name (without _tile%i)
        if const.TILE_ID in filename:
            base, rest = filename.split(const.TILE_ID)
            tmp, ext = os.path.splitext(filename)
            filename = base + ext
        else:
            base, ext = os.path.splitext(filename)

        details = utils.padding(filename, format='nuke')
        base = os.path.splitext(details[0])[0]
        base, file = os.path.split(base)
        base = os.path.join(base, const.TILES_POSTFIX, file)
        reads = [
            base + const.TILE_ID + '%s' % str(tile) + ext
            for tile in range(ntiles)
        ]

        # Reads:
        command = ' '
        command += '%s ' % reads[0]
        command += '%s ' % reads[1]
        command += '--over '

        for read in reads[2:]:
            command += "%s " % read
            command += '--over '

        # Final touch:
        command += '-o %s ' % details[0]
        command += '--frames %s-%s ' % (start, end)

        # Additional path for proxy images (to be created from joined tiles)
        if self.parms['make_proxy']:
            path, file = os.path.split(details[0])
            path = os.path.join(path, const.PROXY_POSTFIX)

            # FIXME: It shouldn't be here at all.
            if not os.path.isdir(path):
                try:
                    os.mkdir(path)
                except OSError, why:
                    return why

            proxy = os.path.join(path, os.path.splitext(file)[0] + '.jpg')
            command += '--tocolorspace "sRGB" -ch "R,G,B" -o %s ' % proxy
Example #8
0
    def debug_image(self, filename, start=None, end=None):
        '''By using iinfo utility inspect filename (usually renders).
        '''
        # TODO: Need to rethink that
        job_name = self.parms['job_name'].replace("_debug", "")
        details = utils.padding(filename)

        # Make place for debug scripts, as usual this shouldn't be here.
        # I don't like random scripts making folders in where ever they wish:
        # Maybe this is a reason for making some-sort-of file handler?
        path, file = os.path.split(filename)
        path = os.path.join(path, const.DEBUG_POSTFIX)
        if not os.path.isdir(path):
            try:
                os.mkdir(path)
            except OSError, why:
                return why
def find_last_jobScript(db, pattern='*_mantra.json'):
    '''Iterate over json parms files, find which ones were rendering into same
       image pattern, then choose the youngest one.
       This is something we would like to get rid of, once we
       will have database in place.
    '''
    import hafarm
    from operator import itemgetter

    #
    script_path = const.hafarm_defaults['script_path']
    script_path = os.path.expandvars(script_path)
    parms_files = os.path.join(script_path, pattern)
    # List of real files on disk:
    parms_files = glob.glob(parms_files)
    parms_dict = {}

    # pattern like *.json
    pattern_padded = db['pattern']
    print "Pattern to match: %s " % pattern_padded
    # Go through files on disk:
    for file_name in parms_files:
        with open(file_name, 'r') as file:
            print 'opening %s' % file_name
            jfile = json.load(file)
            if 'parms' in jfile.keys():
                if 'output_picture' in jfile['parms'].keys():
                    output_pattern = utils.padding(
                        jfile['parms']['output_picture'], 'shell')[0]
                    if pattern_padded == output_pattern:
                        print 'Adding file %s to a list of candidates' % file_name
                        jfile['json_file_name'] = file_name
                        parms_dict[jfile['parms']['submission_time']] = jfile

    #
    if not parms_dict:
        return None

    # Params sorted with submission time. The last one will be the youngest one.
    submissions = sorted(parms_dict.keys())
    candidate = parms_dict[submissions[-1]]
    print 'Assuming parms from %s ' % str(
        time.ctime(candidate['parms']['submission_time']))
    print candidate
    return candidate['json_file_name']
Example #10
0
def build_dictionary(fields):
    store = {}
    fields = fields.split("%")
    # User specified fields:
    for field in fields:
        store[field] = mantra.property(field)[0]
    # Some stuff we always want to log:
    store['filename'] = mantra.property('image:filename')[0]
    store['hostname'] = os.popen('hostname').readlines()[0].strip()
    store['start_time'] = time()
    store['type'] = 'frame'
    store['asset_type'] = getenv("JOB_ASSET_TYPE", "")
    store['asset_name'] = getenv("JOB_ASSET_NAME", "")
    store['user'] = getenv("USER", "")
    store['insider'] = "HaFilterIFD"
    # Sequence id helps to find it in db,
    # Path + name -padding + extension:
    seed = utils.padding(store['filename'])
    store['seq_id'] = md5(seed[0][:-1] + seed[-1]).hexdigest()
    return store
Example #11
0
 def merge_reports(self,
                   filename,
                   ifd_path=None,
                   send_email=True,
                   mad_threshold=5.0,
                   resend_frames=False):
     ''' Merges previously generated debug reports per frame, and do various things
         with that, send_emials, save on dist as json/html etc.
     '''
     #
     send_email = '--send_email'  # ON BY DEFAULT if send_email else ""
     ifd_path = '--ifd_path %s' % ifd_path if ifd_path else ""
     resend_frames = '--resend_frames' if resend_frames else ""
     #
     path, filename = os.path.split(filename)
     details = utils.padding(filename, 'shell')
     log_path = os.path.join(path, const.DEBUG_POSTFIX)
     self.parms['scene_file'] = os.path.join(log_path, details[0]) + '.json'
     self.parms[
         'command'] = '$HAFARM_HOME/scripts/generate_render_report.py %s %s %s --mad_threshold %s --save_html ' % (
             send_email, ifd_path, resend_frames, mad_threshold)
     self.parms['start_frame'] = 1
     self.parms['end_frame'] = 1
def main():
    options, args = parseOptions()
    html = ""
    render_stats = None
    ifd_stats = None

    # Early quit:
    if not args:
        print help()
        sys.exit()

    # Our main container:
    # TODO: Make it custom class Sequance(dict)
    db = {
        'first_frame': 0,
        'last_frame': 0,
        'pattern': utils.padding(args[0], 'shell'),
        'job_name': '',
        'frames': {},
        'time_stamp': time.time(),
        'resent_frames': []
    }

    # Merge json files previsouly generated:
    reports = [report for report in args if report.endswith('.json')]
    db = merge_reports(db, reports)
    # Get render statistics:
    if options.render_stats:
        render_stats = get_render_stats(db['job_name'])
    # Find suspicion small files:
    #db = check_small_frames(db, options.mad_threshold)

    # TODO: This works atm but I'm disableing it for a sake of health care
    # Get IFD (Mantra specific) statistics:
    #if options.ifd_path:
    #    ifd_stats = get_ifd_stats(db['job_name'], options.ifd_path)

    if options.resend_frames:
        resend_frames_on_farm(db)

    # # Present report:
    if options.save_html or options.send_email:
        html = generate_html(db, render_stats, ifd_stats)

    # Send report by email:
    if options.send_email:
        send_debug(db['job_name'],
                   [utils.get_email_address()] + const.RENDER_WRANGERS, html)

    # Saving to files:
    if options.save_html or options.save_json:
        path, report_file = os.path.split(reports[0])
        report_file = os.path.join(path, db['job_name']) + ".%s"

        # Write it down:
        if options.save_html:
            with open(report_file % 'html', 'w') as file:
                file.write(html)

        if options.save_json:
            with open(report_file % 'json', 'w') as file:
                json.dump(db, file, indent=2)

        if options.save_html and options.display_report:
            os.popen("gnome-open %s " % report_file)
def generate_html(db, render_stats=None, ifd_stats=None, errors_only=False):
    """Interate over rows in db and generate html document from that.
    """
    from time import ctime

    def bytes_to_megabytes(bytes, rounded=3):
        return round(int(bytes) / (1024.0 * 1024.0), rounded)

    html = ""
    html += HEAD
    html += "<body>"
    table = ""
    table += TABLE_HEADER

    #globals
    render_times = []

    # Fallbacks:
    r_stat = {
        'hostname': '',
        'qsub_time': 'Sat Sep 12 17:24:32 2015',
        'cpu': 0.0,
        'mem': 0.0,
        'owner': "",
        'start_time': 'Sat Sep 12 17:24:32 2015',
        'end_time': 'Sat Sep 12 17:24:32 2015'
    }
    i_stat = {'ifd_size': 0.0}

    # *_TAGS alter rows colors...
    for frame_num in sorted(db['frames']):
        frame = db['frames'][frame_num]
        # Get handle to per frame render stats
        if render_stats:
            if frame_num in render_stats['frames']:
                r_stat = render_stats['frames'][frame_num]
        # Get handle to per ifd render stats:
        if ifd_stats:
            if frame_num in ifd_stats['frames']:
                i_stat = ifd_stats['frames'][frame_num]

        # Set color for problematic fields:
        if not frame['exists']:
            table += MISSING_FILE_TAG
        elif not frame['integrity']:
            table += BAD_FILE_TAG
        elif frame['small_frame']:
            table += SMALL_FILE_TAG
        else:
            table += NORMAL_FILE_TAG

        # TODO: This is SGE specific.
        # Convert details returend by qaact into seconds and then compute render time
        # represented as pretty string.
        start_time = utils.convert_asctime_to_seconds(r_stat['start_time'])
        end_time = utils.convert_asctime_to_seconds(r_stat['end_time'])
        render_time = utils.compute_time_lapse(start_time, end_time)
        render_times += [start_time, end_time]

        # Generate row:
        table += ROW % (frame_num, frame['exists'], frame['integrity'],
                        frame['nans'], frame['infs'],
                        str(bytes_to_megabytes(frame['size'])) + ' MB',
                        frame['small_frame'], r_stat['hostname'], render_time,
                        str(round(float(r_stat['mem']) / 1024, 2)) + " GB",
                        str(bytes_to_megabytes(i_stat['ifd_size'], 5)) + ' MB')

    # More info for an user:
    render_times.sort()
    info = ""
    thumbs = ""

    # Retrive mp4 if any:
    sequence = glob.glob(db['pattern'])
    sequence.sort()
    proxies = []

    for image in sequence:
        path, image = os.path.split(image)
        image, ext = os.path.splitext(image)
        proxy = 'proxy/' + image + '.jpg'
        proxy = os.path.join(path, proxy)
        proxies.append(proxy)
    mp4 = utils.padding(sequence[0])[0] + "mp4"

    info += INFO_TABLE_HEADER
    info += INFO_ROW % ('Job', db['job_name'])
    info += INFO_ROW % ('User', r_stat['owner'])
    info += INFO_ROW % ('Submitted', r_stat['qsub_time'])
    info += INFO_ROW % ('Started', ctime(render_times[0]))
    info += INFO_ROW % ('Ended: ', ctime(render_times[-1]))
    info += INFO_ROW % ('Missing', ", ".join(
        [str(f) for f in db['missing_frames']]))
    info += INFO_ROW % ('Resent', ", ".join(
        [str(f) for f in db['resent_frames']]))
    info += INFO_ROW % ('Path', LINK_FILE %
                        ('file://' + db['pattern'], db['pattern']))

    # Links to additional fiels on disk
    # Limit proxies to nthumbs
    nthumbs = max(len(proxies) / 10, 1)
    for thumb in proxies[::nthumbs]:
        if os.path.isfile(thumb):
            thumbs += LINK_IMAGE % ('file://' + thumb, 'file://' + thumb,
                                    17 * 3, 10 * 3, thumb)
    info += INFO_ROW % ('PROXY', thumbs)
    if os.path.isfile(mp4):
        info += INFO_ROW % (
            'MP4', LINK_IMAGE %
            ('file://' + mp4, 'file://' + thumb, 17 * 3, 10 * 3, mp4))

    # Finally add main table and return
    html += info
    html += table
    html += FOOT
    return html
Example #14
0
def main():
    """Run over files that match pattern to scan their qualities with
    command line image tools chain. Stores result in html and sand optionally
    via email.
    """
    options, args = parseOptions()
    single_frame  = False
    render_stats  = None
    ifd_stats     = None
    db = {'first_frame': 0,
      'last_frame' : 0,
      'pattern'    : options.image_pattern,
      'job_name'   : options.job_name,
      'frames'     : {} }

    if not options.job_name:
        options.job_name = os.getenv("JOB_NAME", "")

    # Image is required:
    if not options.image_pattern and not options.merge_reports:
        print help()
        sys.exit()

    # Find images matching pattern, 
    # the real sequence on disk:
    # TODO Add argument to overwrite framge range on disk.
    pattern       = os.path.abspath(options.image_pattern)
    pattern       = os.path.expandvars(pattern)
    images        = glob.glob(pattern)
    images.sort()

    # In case image pattern was blank:
    if not images:
        tmp = utils.padding(pattern)
        print "No image found: %s" % options.image_pattern
        db['frames'][tmp[1]] = {'exists': False,
                     'integrity': False,
                     'file': 0,
                     'nans': 0,
                     'infs': 0,
                     'resolution': (0,0),
                     'size': 0,
                     'small_frame': False}
        db['file_sizes'] = [0]
        db['missing_frames'] = [tmp[1]]

        if options.save_json:
            save_json(options, db, [pattern])
        else:
            print db
        return

    # Get info
    tmp         = utils.padding(images[0])
    sequence    = utils.padding(images[-1])
    first_frame = tmp[1]
    last_frame  = sequence[1]

    # If pattern returned single frame, we assume user 
    # wants to examine single file from a siquence. 
    if len(images) == 1:
        single_frame = True

    # Some feedback to user. 
    print sys.argv[0] + " proceeds %s files: %s" % (len(images), images[0])


    # Our main container:
    # TODO: Make it custom class Sequance(dict)
    db['first_frame'] = first_frame
    db['last_frame']  = last_frame

    
    # Run over all frames to gather per-frame information:
    db = proceed_sequence(sequence, db, first_frame, last_frame)


    # Present report:
    if options.save_json:
        save_json(options, db, images)
    else:
        print db