def loop_interchange(outer_stmts, inner_stmts): for outer_stmt in outer_stmts: if not isinstance(outer_stmt, Do): Logger.warn('Outer statment is not Do type: %s' % outer_stmt.__class__) continue for inner_stmt in inner_stmts: if not isinstance(inner_stmt, Do): Logger.warn('Inner statment is not Do type: %s' % inner_stmt.__class__) continue lines = [] for stmt, depth in walk(outer_stmt, -1): if stmt is outer_stmt: lines.append(inner_stmt.tooc()) elif stmt is inner_stmt: lines.append(outer_stmt.tooc()) elif stmt is inner_stmt.content[-1]: lines.append(outer_stmt.content[-1].tooc()) elif stmt is outer_stmt.content[-1]: lines.append(inner_stmt.content[-1].tooc()) else: lines.append(stmt.tooc()) if lines: parsed = parse('\n'.join(lines), analyze=False) if len(parsed.content) > 0: parsed.content[0].parent = outer_stmt.parent for stmt, depth in walk(parsed, -1): stmt.parse_f2003() insert_content(outer_stmt, parsed.content)
def loop_interchange(outer_stmts, inner_stmts): for outer_stmt in outer_stmts: if not isinstance(outer_stmt, Do): Logger.warn('Outer statment is not Do type: %s'%outer_stmt.__class__) continue for inner_stmt in inner_stmts: if not isinstance(inner_stmt, Do): Logger.warn('Inner statment is not Do type: %s'%inner_stmt.__class__) continue lines = [] for stmt, depth in walk(outer_stmt, -1): if stmt is outer_stmt: lines.append(inner_stmt.tooc()) elif stmt is inner_stmt: lines.append(outer_stmt.tooc()) elif stmt is inner_stmt.content[-1]: lines.append(outer_stmt.content[-1].tooc()) elif stmt is outer_stmt.content[-1]: lines.append(inner_stmt.content[-1].tooc()) else: lines.append(stmt.tooc()) if lines: parsed = parse('\n'.join(lines), analyze=False) if len(parsed.content)>0: parsed.content[0].parent = outer_stmt.parent for stmt, depth in walk(parsed, -1): stmt.parse_f2003() insert_content(outer_stmt, parsed.content)
def execute_refcase(): mgr = State.cases['mgr'] refcase = mgr.get_refcase() refcase.execute() if refcase.result != Case.VERIFIED: print 'WARNING: Reference case is not correctly executed' else: refperfvals = [float(val) for val in refcase.measured[mgr.rank_var]] refperfval = sum(refperfvals) / len(refperfvals) Logger.info('\nReference performance: %e' % refperfval, stdout=True)
def execute_refcase(): mgr = State.cases['mgr'] refcase = mgr.get_refcase() refcase.execute() if refcase.result!=Case.VERIFIED: print 'WARNING: Reference case is not correctly executed' else: refperfvals = [ float(val) for val in refcase.measured[mgr.rank_var] ] refperfval = sum(refperfvals)/len(refperfvals) Logger.info('\nReference performance: %e'%refperfval, stdout=True)
def loop_split(stmts, add_stmt, before=True): for stmt in stmts: parent = stmt.parent if not isinstance(parent, Do): Logger.warn('Parent of statment is not Do type: %s' % parent.__class__) continue doblk1 = [] doblk2 = [] #if add_stmt: doblk1.append(add_stmt[0]) doblk1.append(parent.tooc()) if add_stmt: doblk2.append(add_stmt[0]) doblk2.append(parent.tooc(remove_label=True)) enddo_stmt = parent.content[-1] doblk = doblk1 remove_label = False for childstmt, depth in walk(parent, -1): if childstmt not in [parent, enddo_stmt]: if not before: doblk.append(childstmt.tooc(remove_label=remove_label)) if childstmt == stmt: doblk = doblk2 remove_label = True if before: doblk.append(childstmt.tooc(remove_label=remove_label)) doblk1.append(enddo_stmt.tooc()) doblk2.append(enddo_stmt.tooc(remove_label=True)) if doblk1: parsed = parse('\n'.join(doblk1), analyze=False, ignore_comments=False) if len(parsed.content) > 0: parsed.content[0].parent = parent.parent for stmt, depth in walk(parsed, -1): stmt.parse_f2003() insert_content(parent, parsed.content, remove_olditem=False) if doblk2: parsed = parse('\n'.join(doblk2), analyze=False, ignore_comments=False) if len(parsed.content) > 0: parsed.content[0].parent = parent.parent for stmt, depth in walk(parsed, -1): stmt.parse_f2003() insert_content(parent, parsed.content, remove_olditem=True)
def loop_split(stmts, add_stmt, before=True): for stmt in stmts: parent = stmt.parent if not isinstance(parent, Do): Logger.warn('Parent of statment is not Do type: %s'%parent.__class__) continue doblk1 = [] doblk2 = [] #if add_stmt: doblk1.append(add_stmt[0]) doblk1.append(parent.tooc()) if add_stmt: doblk2.append(add_stmt[0]) doblk2.append(parent.tooc(remove_label=True)) enddo_stmt = parent.content[-1] doblk = doblk1 remove_label = False for childstmt, depth in walk(parent, -1): if childstmt not in [ parent, enddo_stmt]: if not before: doblk.append(childstmt.tooc(remove_label=remove_label)) if childstmt==stmt: doblk = doblk2 remove_label = True if before: doblk.append(childstmt.tooc(remove_label=remove_label)) doblk1.append(enddo_stmt.tooc()) doblk2.append(enddo_stmt.tooc(remove_label=True)) if doblk1: parsed = parse('\n'.join(doblk1), analyze=False, ignore_comments=False) if len(parsed.content)>0: parsed.content[0].parent = parent.parent for stmt, depth in walk(parsed, -1): stmt.parse_f2003() insert_content(parent, parsed.content, remove_olditem=False) if doblk2: parsed = parse('\n'.join(doblk2), analyze=False, ignore_comments=False) if len(parsed.content)>0: parsed.content[0].parent = parent.parent for stmt, depth in walk(parsed, -1): stmt.parse_f2003() insert_content(parent, parsed.content, remove_olditem=True)
def resolve(self, request): from oc_state import ResState from oc_namesearch import f2003_search_unknowns if request is None: return Logger.info('%s is being resolved'%request.uname.firstpartname(), name=request.uname, stmt=self) # if resolved, return if request.state == ResState.RESOLVED: Logger.info('%s is already resolved'%request.uname.firstpartname(), name=request.uname, stmt=self) return if request.uname.firstpartname()==self.name: Logger.info('The request is being resolved by an interface', name=request.uname, stmt=self) request.res_stmt = self request.state = ResState.RESOLVED request.res_stmt.add_geninfo(request.uname) #self.check_spec_stmts(request.uname, request.res_stmt) for _stmt, _depth in walk(request.res_stmt, -1): if not hasattr(_stmt, 'unknowns'): f2003_search_unknowns(_stmt, _stmt.f2003) for unk, req in _stmt.unknowns.iteritems(): if req.state != ResState.RESOLVED: _stmt.resolve(req) # defer to super if request.state != ResState.RESOLVED: super(Interface, self).resolve(request)
def loop_merge(from_stmts, to_stmts): for from_stmt in from_stmts: if not isinstance(from_stmt, Do): Logger.warn('From statment is not Do type: %s'%from_stmt.__class__) continue from_f2003 = from_stmt.f2003 if isinstance(from_f2003, Nonlabel_Do_Stmt): from_loop_control = from_f2003.items[1] from_loop_var = from_loop_control.items[0].string.lower() from_start_idx = from_loop_control.items[1][0] from_end_idx = from_loop_control.items[1][1] if len(from_loop_control.items[1])==3: from_step = Int_Literal_Constant(str(1)) else: from_step = loop_control.items[1][2] else: raise ProgramException('Not supported type: %s'%from_f2003.__class__) # collect loop controls through static analysis from_start_num = from_stmt.get_param(from_start_idx) from_end_num = from_stmt.get_param(from_end_idx) from_step_num = from_stmt.get_param(from_step) try: from_loop_indices = range(from_start_num, from_end_num+1, from_step_num) except: from_loop_indices = None for to_stmt in to_stmts: if not isinstance(to_stmt, Do): Logger.warn('To statment is not Do type: %s'%to_stmt.__class__) continue to_f2003 = to_stmt.f2003 if isinstance(to_f2003, Nonlabel_Do_Stmt): to_loop_control = to_f2003.items[1] to_loop_var = to_loop_control.items[0].string.lower() to_start_idx = to_loop_control.items[1][0] to_end_idx = to_loop_control.items[1][1] if len(to_loop_control.items[1])==3: to_step = Int_Literal_Constant(str(1)) else: to_step = to_loop_control.items[1][2] else: raise ProgramException('Not supported type: %s'%to_f2003.__class__) # collect loop controls through static analysis to_start_num = to_stmt.get_param(to_start_idx) to_end_num = to_stmt.get_param(to_end_idx) to_step_num = to_stmt.get_param(to_step) try: to_loop_indices = range(to_start_num, to_end_num+1, to_step_num) except: to_loop_indices = None if ( from_loop_indices and to_loop_indices and from_loop_indices==to_loop_indices ) or \ ( from_loop_indices is None and to_loop_indices is None and from_loop_var==to_loop_var and \ from_start_idx==to_start_idx and from_end_idx==to_end_idx and from_step==to_step ): insert_content(to_stmt.content[-1], from_stmt.content[:-1], remove_olditem=False) remove_content(from_stmt) else: Logger.warn('Can not merge due to different loop control')
def get_name_or_defer(stmt, node, resolvers, defer=True): from oc_utils import OCName, pathname from oc_state import ResState if node is None: return if isinstance(node, Fortran2003.Name): # skip if intrinsic if node.string.lower() in Intrinsic_Procedures: if Config.search['skip_intrinsic'] and not is_except(node, stmt):return elif not Config.search['skip_intrinsic'] and is_except(node, stmt): return ukey = OCName(pathname(stmt, node.string.lower()), node=node, stmt=stmt) if resolvers is None: stmt.unknowns[ukey] = ResState(ukey, stmt, res_default) else: stmt.unknowns[ukey] = ResState(ukey, stmt, resolvers) Logger.info('%s is saved as unknown' % node.string.lower(), name=ukey, stmt=stmt) elif defer: f2003_search_unknowns(stmt, node, resolvers)
def main(): parse_srcfiles() Logger.info('Source files are parsed.', stdout=True) configure_searching() Logger.info('Searching is configured.', stdout=True) copytree(Config.path['refdir'], Config.path['workdir']) execute_refcase() Logger.info('Reference case is executed.', stdout=True) chdir(Config.path['workdir']) continued = True while continued: continued = execute_nextcase() Logger.info('Completed.', stdout=True)
def __init__(self, srcpath): import re import os.path from oc_utils import exec_cmd # set default values self.prep = None self.tree = None self.srcpath = srcpath self.filename = os.path.basename(self.srcpath) self.abspath = os.path.abspath(self.srcpath) self.relpath = os.path.relpath(self.abspath, Config.path['refdir']) self.searchtree = [] self.direct = {} # prepare include paths and macro definitions path_src = [] macros_src = '' if Config.include['file'].has_key(self.abspath): path_src = Config.include['file'][self.abspath]['path'] macros_src = ' '.join([ '-D%s=%s' % (k, v) for k, v in Config.include['file'][ self.abspath]['macro'].iteritems() ]) includes = '-I' + ' -I'.join(Config.include['path'] + path_src + ['.']) macros = ' '.join([ '-D%s=%s' % (k, v) for k, v in Config.include['macro'].iteritems() ]) + ' ' + macros_src # execute preprocessing Logger.info('Reading %s' % self.srcpath, stdout=True) prep = Config.bin['pp'] if prep.endswith('fpp'): flags = Config.bin['fpp_flags'] elif prep.endswith('cpp'): flags = Config.bin['cpp_flags'] else: raise UserException('Preprocessor is not either fpp or cpp') output = exec_cmd('%s %s %s %s %s' % (prep, flags, includes, macros, self.abspath)) # convert the preprocessed for fparser self.prep = map( lambda l: '!__OPENCASE_COMMENT' + l if l.startswith('#') else l, output.split('\n')) # fparse self.tree = parse('\n'.join(self.prep), ignore_comments=False, analyze=True, isfree=True, isstrict=False, \ include_dirs=None, source_only=None ) # parse f2003 lineno = 0 linediff = 0 pending_directs = [] for stmt, depth in walk(self.tree, -1): if isinstance(stmt, Comment) and stmt.item.comment.startswith( '!__OPENCASE_COMMENT#'): comment_split = stmt.item.comment.split(' ') lineno = int(comment_split[1]) stmt.item.span = (0, 0) else: if lineno > 0: linediff = stmt.item.span[0] - lineno lineno = 0 stmt.item.span = (stmt.item.span[0] - linediff, stmt.item.span[1] - linediff) if isinstance(stmt, Comment): match = re.match( r'\$opencase\s*(\w+)\s*([\(\{\[\<])(.+)([\)\}\]\>]\s*\*?\+?\d?)', stmt.content, re.I) if match: name = match.group(1).lower() value = match.group(3) if name == 'include': if value: casefile = value.strip() if casefile[0] == '/': inc_path = os.path.abspath(casefile) else: inc_path = os.path.join( os.path.dirname(self.abspath), value) if os.path.exists(inc_path): finc = open(inc_path, 'r') inc_directs = re.findall( r'(\!?)\s*(\w+)\s*([\(\{\[\<])(.+)([\)\}\]\>]\s*\*?\+?\d?)\s*\n', finc.read(), re.I) finc.close() for direct in inc_directs: if direct[0]: continue direct_line = ''.join(direct) direct_name = direct[1].lower() direct_tree = generate_searchtree( self._strmap(direct_line)) assert len( direct_tree ) == 1, 'Only one element is allowed in direct_tree' self.searchtree.extend(direct_tree) if direct_name in global_directs: if not State.direct.has_key( direct_name): State.direct[direct_name] = [] State.direct[direct_name].append( (direct_tree[0], stmt, stmt.item.span)) elif direct_name in local_directs: if not self.direct.has_key( direct_name): self.direct[direct_name] = [] self.direct[direct_name].append( (direct_tree[0], stmt, stmt.item.span)) else: raise UserException( 'Can not find caes file: %s' % inc_path) else: direct_line = match.group(0) direct_tree = generate_searchtree( self._strmap(direct_line[10:])) self.searchtree.extend(direct_tree) if name in global_directs: if not State.direct.has_key(name): State.direct[name] = [] State.direct[name].append( (direct_tree[0], stmt, stmt.item.span)) elif name in local_directs: if not self.direct.has_key(name): self.direct[name] = [] self.direct[name].append( (direct_tree[0], stmt, stmt.item.span)) #if match.group(1).lower() in ['refcase']: # State.direct[match.group(1).lower()] = direct_tree stmt.parse_f2003() # rename reader.id self.tree.reader.id = self.abspath
Logger.info('Completed.', stdout=True) # starts HERE if __name__ == "__main__": try: print '' State.operation['begin'] = time() if Config.misc['timeout']: with Timeout(seconds=Config.misc['timeout']): main() else: main() except UserException as e: print 'ERROR: %s'%str(e) Logger.info(e) except ProgramException as e: Logger.critical(e) except TimeoutException as e: Logger.critical(e) except Exception as e: Logger.critical(e) finally: State.operation['end'] = time() generate_output()
def generate_output(): Logger.info("\n******** SUMMARY **********", stdout=True) Logger.info("\nINPUT FILES:", stdout=True) # list source files for srcfile in State.inputfile: Logger.info(TAB + srcfile.tree.reader.id, stdout=True) if Config.debug["enabled"]: # for directline in srcfile.directlines: # Logger.info(TAB*2+directline, stdout=True) pass Logger.info("TOTAL %d cases" % State.cases["size"], stdout=True) # ranking topN = min(10, State.cases["size"]) Logger.info("\nRANKING - top %d:" % topN, stdout=True) mgr = State.cases["mgr"] if mgr: if mgr.refcase.result == Case.VERIFIED: refperfvals = [float(val) for val in mgr.refcase.measured[mgr.rank_var]] refperfval = sum(refperfvals) / len(refperfvals) Logger.info("\nReference performance: %e" % refperfval, stdout=True) else: Logger.info("\nReference performance: not available", stdout=True) Logger.info("\nranking\tcase-number\tcase-order\tperformance", stdout=True) for i, rank in enumerate(mgr.ranking[:topN]): Logger.info("%d\t\t%d\t\t%d\t\t%e" % ((i + 1,) + rank), stdout=True) # summarize operation begin = datetime.datetime.fromtimestamp(State.operation["begin"]).strftime("%Y-%m-%d %H:%M:%S") diffsecs = State.operation["end"] - State.operation["begin"] Logger.info("\nELAPSED TIME:", stdout=True) Logger.info(TAB + "%s from %s" % (str(datetime.timedelta(seconds=diffsecs)), begin), stdout=True) # how much quality has improved(compared to reference???) # what algorithm is used(with parameters) # what are the common features of the quality cases # write ranking into a file with open(Config.path["outdir"] + "/perf.log", "wb") as f: mgr = State.cases["mgr"] if mgr: if mgr.refcase.result == Case.VERIFIED: refperfvals = [float(val) for val in mgr.refcase.measured[mgr.rank_var]] refperfval = sum(refperfvals) / len(refperfvals) f.write("Reference performance: %e\n" % refperfval) else: f.write("Reference performance: not available\n") f.write("\nranking\tcase-number\tcase-order\tperformance\n") for i, rank in enumerate(mgr.ranking): f.write("%d\t\t%d\t\t%d\t\t%e\n" % ((i + 1,) + rank)) for i, failed in enumerate(mgr.failed): f.write("%d\t\t%d\t\t%d\t\t%e\n" % ((-1,) + failed)) Logger.info("", stdout=True)
def loop_unroll(targets, factor, method): for target_stmt in targets: if not isinstance(target_stmt, Do): Logger.warn('Target statment is not Do type: %s' % target_stmt.__class__) continue # collect loop control target_f2003 = target_stmt.f2003 if isinstance(target_f2003, Nonlabel_Do_Stmt): loop_control = target_f2003.items[1] loop_var = loop_control.items[0].string.lower() start_idx = loop_control.items[1][0] end_idx = loop_control.items[1][1] if len(loop_control.items[1]) == 3: step = Int_Literal_Constant(str(1)) else: step = loop_control.items[1][2] else: raise ProgramException('Not supported type: %s' % f2003obj.__class__) # collect loop controls through static analysis start_num = target_stmt.get_param(start_idx) end_num = target_stmt.get_param(end_idx) step_num = target_stmt.get_param(step) try: loop_indices = range(start_num, end_num + 1, step_num) except: loop_indices = None # TODO: modify analysis if required lines = [] if factor == 'full': if loop_indices is not None: lines = _unroll(target_stmt.content, loop_var, len(loop_indices), method, start_index=start_num) else: Logger.warn('Loopcontrol is not collected') # save in tree elif factor.isdigit(): factor_num = int(factor) if loop_indices is not None and len(loop_indices) == factor_num: lines = _unroll(target_stmt.content, loop_var, factor_num, method, start_index=start_num) else: # replace end and step newstep = '%s*%s' % (step.tofortran(), factor) newend = '%s-%s' % (end_idx.tofortran(), newstep) lines.append(target_stmt.tooc(do_end=newend, do_step=newstep)) lines.extend( _unroll(target_stmt.content, loop_var, factor_num, method)) lines.append(target_stmt.content[-1].tooc()) # replace start newstart = loop_var lines.append( target_stmt.tooc(do_start=newstart, remove_label=True)) lines.extend(_unroll(target_stmt.content, loop_var, 1, method)) lines.append(target_stmt.content[-1].tooc(remove_label=True)) else: raise UserException('Unknown unroll factor: %s' % factor) if lines: parsed = parse('\n'.join(lines), analyze=False) if len(parsed.content) > 0: for stmt, depth in walk(parsed, -1): stmt.parse_f2003() insert_content(target_stmt, parsed.content)
def loop_unroll(targets, factor, method): for target_stmt in targets: if not isinstance(target_stmt, Do): Logger.warn("Target statment is not Do type: %s" % target_stmt.__class__) continue # collect loop control target_f2003 = target_stmt.f2003 if isinstance(target_f2003, Nonlabel_Do_Stmt): loop_control = target_f2003.items[1] loop_var = loop_control.items[0].string.lower() start_idx = loop_control.items[1][0] end_idx = loop_control.items[1][1] if len(loop_control.items[1]) == 3: step = Int_Literal_Constant(str(1)) else: step = loop_control.items[1][2] else: raise ProgramException("Not supported type: %s" % f2003obj.__class__) # collect loop controls through static analysis start_num = target_stmt.get_param(start_idx) end_num = target_stmt.get_param(end_idx) step_num = target_stmt.get_param(step) try: loop_indices = range(start_num, end_num + 1, step_num) except: loop_indices = None # TODO: modify analysis if required lines = [] if factor == "full": if loop_indices is not None: lines = _unroll(target_stmt.content, loop_var, len(loop_indices), method, start_index=start_num) else: Logger.warn("Loopcontrol is not collected") # save in tree elif factor.isdigit(): factor_num = int(factor) if loop_indices is not None and len(loop_indices) == factor_num: lines = _unroll(target_stmt.content, loop_var, factor_num, method, start_index=start_num) else: # replace end and step newstep = "%s*%s" % (step.tofortran(), factor) newend = "%s-%s" % (end_idx.tofortran(), newstep) lines.append(target_stmt.tooc(do_end=newend, do_step=newstep)) lines.extend(_unroll(target_stmt.content, loop_var, factor_num, method)) lines.append(target_stmt.content[-1].tooc()) # replace start newstart = loop_var lines.append(target_stmt.tooc(do_start=newstart, remove_label=True)) lines.extend(_unroll(target_stmt.content, loop_var, 1, method)) lines.append(target_stmt.content[-1].tooc(remove_label=True)) else: raise UserException("Unknown unroll factor: %s" % factor) if lines: parsed = parse("\n".join(lines), analyze=False) if len(parsed.content) > 0: for stmt, depth in walk(parsed, -1): stmt.parse_f2003() insert_content(target_stmt, parsed.content)
Logger.info('Completed.', stdout=True) # starts HERE if __name__ == "__main__": try: print '' State.operation['begin'] = time() if Config.misc['timeout']: with Timeout(seconds=Config.misc['timeout']): main() else: main() except UserException as e: print 'ERROR: %s' % str(e) Logger.info(e) except ProgramException as e: Logger.critical(e) except TimeoutException as e: Logger.critical(e) except Exception as e: Logger.critical(e) finally: State.operation['end'] = time() generate_output()
def generate_output(): Logger.info('\n******** SUMMARY **********', stdout=True) Logger.info('\nINPUT FILES:', stdout=True) # list source files for srcfile in State.inputfile: Logger.info(TAB + srcfile.tree.reader.id, stdout=True) if Config.debug['enabled']: #for directline in srcfile.directlines: # Logger.info(TAB*2+directline, stdout=True) pass Logger.info('TOTAL %d cases' % State.cases['size'], stdout=True) # ranking topN = min(10, State.cases['size']) Logger.info('\nRANKING - top %d:' % topN, stdout=True) mgr = State.cases['mgr'] if mgr: if mgr.refcase.result == Case.VERIFIED: refperfvals = [ float(val) for val in mgr.refcase.measured[mgr.rank_var] ] refperfval = sum(refperfvals) / len(refperfvals) Logger.info('\nReference performance: %e' % refperfval, stdout=True) else: Logger.info('\nReference performance: not available', stdout=True) Logger.info('\nranking\tcase-number\tcase-order\tperformance', stdout=True) for i, rank in enumerate(mgr.ranking[:topN]): Logger.info('%d\t\t%d\t\t%d\t\t%e' % ((i + 1, ) + rank), stdout=True) # summarize operation begin = datetime.datetime.fromtimestamp( State.operation['begin']).strftime('%Y-%m-%d %H:%M:%S') diffsecs = State.operation['end'] - State.operation['begin'] Logger.info('\nELAPSED TIME:', stdout=True) Logger.info(TAB + '%s from %s' % (str(datetime.timedelta(seconds=diffsecs)), begin), stdout=True) # how much quality has improved(compared to reference???) # what algorithm is used(with parameters) # what are the common features of the quality cases # write ranking into a file with open(Config.path['outdir'] + '/perf.log', 'wb') as f: mgr = State.cases['mgr'] if mgr: if mgr.refcase.result == Case.VERIFIED: refperfvals = [ float(val) for val in mgr.refcase.measured[mgr.rank_var] ] refperfval = sum(refperfvals) / len(refperfvals) f.write('Reference performance: %e\n' % refperfval) else: f.write('Reference performance: not available\n') f.write('\nranking\tcase-number\tcase-order\tperformance\n') for i, rank in enumerate(mgr.ranking): f.write('%d\t\t%d\t\t%d\t\t%e\n' % ((i + 1, ) + rank)) for i, failed in enumerate(mgr.failed): f.write('%d\t\t%d\t\t%d\t\t%e\n' % ((-1, ) + failed)) Logger.info('', stdout=True)