def execute_refcase(): mgr = State.cases['mgr'] refcase = mgr.get_refcase() refcase.execute() if refcase.result != Case.VERIFIED: print 'WARNING: Reference case is not correctly executed' else: refperfvals = [float(val) for val in refcase.measured[mgr.rank_var]] refperfval = sum(refperfvals) / len(refperfvals) Logger.info('\nReference performance: %e' % refperfval, stdout=True)
def execute_refcase(): mgr = State.cases['mgr'] refcase = mgr.get_refcase() refcase.execute() if refcase.result!=Case.VERIFIED: print 'WARNING: Reference case is not correctly executed' else: refperfvals = [ float(val) for val in refcase.measured[mgr.rank_var] ] refperfval = sum(refperfvals)/len(refperfvals) Logger.info('\nReference performance: %e'%refperfval, stdout=True)
def resolve(self, request): from oc_state import ResState from oc_namesearch import f2003_search_unknowns if request is None: return Logger.info('%s is being resolved'%request.uname.firstpartname(), name=request.uname, stmt=self) # if resolved, return if request.state == ResState.RESOLVED: Logger.info('%s is already resolved'%request.uname.firstpartname(), name=request.uname, stmt=self) return if request.uname.firstpartname()==self.name: Logger.info('The request is being resolved by an interface', name=request.uname, stmt=self) request.res_stmt = self request.state = ResState.RESOLVED request.res_stmt.add_geninfo(request.uname) #self.check_spec_stmts(request.uname, request.res_stmt) for _stmt, _depth in walk(request.res_stmt, -1): if not hasattr(_stmt, 'unknowns'): f2003_search_unknowns(_stmt, _stmt.f2003) for unk, req in _stmt.unknowns.iteritems(): if req.state != ResState.RESOLVED: _stmt.resolve(req) # defer to super if request.state != ResState.RESOLVED: super(Interface, self).resolve(request)
def get_name_or_defer(stmt, node, resolvers, defer=True): from oc_utils import OCName, pathname from oc_state import ResState if node is None: return if isinstance(node, Fortran2003.Name): # skip if intrinsic if node.string.lower() in Intrinsic_Procedures: if Config.search['skip_intrinsic'] and not is_except(node, stmt):return elif not Config.search['skip_intrinsic'] and is_except(node, stmt): return ukey = OCName(pathname(stmt, node.string.lower()), node=node, stmt=stmt) if resolvers is None: stmt.unknowns[ukey] = ResState(ukey, stmt, res_default) else: stmt.unknowns[ukey] = ResState(ukey, stmt, resolvers) Logger.info('%s is saved as unknown' % node.string.lower(), name=ukey, stmt=stmt) elif defer: f2003_search_unknowns(stmt, node, resolvers)
def main(): parse_srcfiles() Logger.info('Source files are parsed.', stdout=True) configure_searching() Logger.info('Searching is configured.', stdout=True) copytree(Config.path['refdir'], Config.path['workdir']) execute_refcase() Logger.info('Reference case is executed.', stdout=True) chdir(Config.path['workdir']) continued = True while continued: continued = execute_nextcase() Logger.info('Completed.', stdout=True)
def __init__(self, srcpath): import re import os.path from oc_utils import exec_cmd # set default values self.prep = None self.tree = None self.srcpath = srcpath self.filename = os.path.basename(self.srcpath) self.abspath = os.path.abspath(self.srcpath) self.relpath = os.path.relpath(self.abspath, Config.path['refdir']) self.searchtree = [] self.direct = {} # prepare include paths and macro definitions path_src = [] macros_src = '' if Config.include['file'].has_key(self.abspath): path_src = Config.include['file'][self.abspath]['path'] macros_src = ' '.join([ '-D%s=%s' % (k, v) for k, v in Config.include['file'][ self.abspath]['macro'].iteritems() ]) includes = '-I' + ' -I'.join(Config.include['path'] + path_src + ['.']) macros = ' '.join([ '-D%s=%s' % (k, v) for k, v in Config.include['macro'].iteritems() ]) + ' ' + macros_src # execute preprocessing Logger.info('Reading %s' % self.srcpath, stdout=True) prep = Config.bin['pp'] if prep.endswith('fpp'): flags = Config.bin['fpp_flags'] elif prep.endswith('cpp'): flags = Config.bin['cpp_flags'] else: raise UserException('Preprocessor is not either fpp or cpp') output = exec_cmd('%s %s %s %s %s' % (prep, flags, includes, macros, self.abspath)) # convert the preprocessed for fparser self.prep = map( lambda l: '!__OPENCASE_COMMENT' + l if l.startswith('#') else l, output.split('\n')) # fparse self.tree = parse('\n'.join(self.prep), ignore_comments=False, analyze=True, isfree=True, isstrict=False, \ include_dirs=None, source_only=None ) # parse f2003 lineno = 0 linediff = 0 pending_directs = [] for stmt, depth in walk(self.tree, -1): if isinstance(stmt, Comment) and stmt.item.comment.startswith( '!__OPENCASE_COMMENT#'): comment_split = stmt.item.comment.split(' ') lineno = int(comment_split[1]) stmt.item.span = (0, 0) else: if lineno > 0: linediff = stmt.item.span[0] - lineno lineno = 0 stmt.item.span = (stmt.item.span[0] - linediff, stmt.item.span[1] - linediff) if isinstance(stmt, Comment): match = re.match( r'\$opencase\s*(\w+)\s*([\(\{\[\<])(.+)([\)\}\]\>]\s*\*?\+?\d?)', stmt.content, re.I) if match: name = match.group(1).lower() value = match.group(3) if name == 'include': if value: casefile = value.strip() if casefile[0] == '/': inc_path = os.path.abspath(casefile) else: inc_path = os.path.join( os.path.dirname(self.abspath), value) if os.path.exists(inc_path): finc = open(inc_path, 'r') inc_directs = re.findall( r'(\!?)\s*(\w+)\s*([\(\{\[\<])(.+)([\)\}\]\>]\s*\*?\+?\d?)\s*\n', finc.read(), re.I) finc.close() for direct in inc_directs: if direct[0]: continue direct_line = ''.join(direct) direct_name = direct[1].lower() direct_tree = generate_searchtree( self._strmap(direct_line)) assert len( direct_tree ) == 1, 'Only one element is allowed in direct_tree' self.searchtree.extend(direct_tree) if direct_name in global_directs: if not State.direct.has_key( direct_name): State.direct[direct_name] = [] State.direct[direct_name].append( (direct_tree[0], stmt, stmt.item.span)) elif direct_name in local_directs: if not self.direct.has_key( direct_name): self.direct[direct_name] = [] self.direct[direct_name].append( (direct_tree[0], stmt, stmt.item.span)) else: raise UserException( 'Can not find caes file: %s' % inc_path) else: direct_line = match.group(0) direct_tree = generate_searchtree( self._strmap(direct_line[10:])) self.searchtree.extend(direct_tree) if name in global_directs: if not State.direct.has_key(name): State.direct[name] = [] State.direct[name].append( (direct_tree[0], stmt, stmt.item.span)) elif name in local_directs: if not self.direct.has_key(name): self.direct[name] = [] self.direct[name].append( (direct_tree[0], stmt, stmt.item.span)) #if match.group(1).lower() in ['refcase']: # State.direct[match.group(1).lower()] = direct_tree stmt.parse_f2003() # rename reader.id self.tree.reader.id = self.abspath
def generate_output(): Logger.info("\n******** SUMMARY **********", stdout=True) Logger.info("\nINPUT FILES:", stdout=True) # list source files for srcfile in State.inputfile: Logger.info(TAB + srcfile.tree.reader.id, stdout=True) if Config.debug["enabled"]: # for directline in srcfile.directlines: # Logger.info(TAB*2+directline, stdout=True) pass Logger.info("TOTAL %d cases" % State.cases["size"], stdout=True) # ranking topN = min(10, State.cases["size"]) Logger.info("\nRANKING - top %d:" % topN, stdout=True) mgr = State.cases["mgr"] if mgr: if mgr.refcase.result == Case.VERIFIED: refperfvals = [float(val) for val in mgr.refcase.measured[mgr.rank_var]] refperfval = sum(refperfvals) / len(refperfvals) Logger.info("\nReference performance: %e" % refperfval, stdout=True) else: Logger.info("\nReference performance: not available", stdout=True) Logger.info("\nranking\tcase-number\tcase-order\tperformance", stdout=True) for i, rank in enumerate(mgr.ranking[:topN]): Logger.info("%d\t\t%d\t\t%d\t\t%e" % ((i + 1,) + rank), stdout=True) # summarize operation begin = datetime.datetime.fromtimestamp(State.operation["begin"]).strftime("%Y-%m-%d %H:%M:%S") diffsecs = State.operation["end"] - State.operation["begin"] Logger.info("\nELAPSED TIME:", stdout=True) Logger.info(TAB + "%s from %s" % (str(datetime.timedelta(seconds=diffsecs)), begin), stdout=True) # how much quality has improved(compared to reference???) # what algorithm is used(with parameters) # what are the common features of the quality cases # write ranking into a file with open(Config.path["outdir"] + "/perf.log", "wb") as f: mgr = State.cases["mgr"] if mgr: if mgr.refcase.result == Case.VERIFIED: refperfvals = [float(val) for val in mgr.refcase.measured[mgr.rank_var]] refperfval = sum(refperfvals) / len(refperfvals) f.write("Reference performance: %e\n" % refperfval) else: f.write("Reference performance: not available\n") f.write("\nranking\tcase-number\tcase-order\tperformance\n") for i, rank in enumerate(mgr.ranking): f.write("%d\t\t%d\t\t%d\t\t%e\n" % ((i + 1,) + rank)) for i, failed in enumerate(mgr.failed): f.write("%d\t\t%d\t\t%d\t\t%e\n" % ((-1,) + failed)) Logger.info("", stdout=True)
Logger.info('Completed.', stdout=True) # starts HERE if __name__ == "__main__": try: print '' State.operation['begin'] = time() if Config.misc['timeout']: with Timeout(seconds=Config.misc['timeout']): main() else: main() except UserException as e: print 'ERROR: %s' % str(e) Logger.info(e) except ProgramException as e: Logger.critical(e) except TimeoutException as e: Logger.critical(e) except Exception as e: Logger.critical(e) finally: State.operation['end'] = time() generate_output()
Logger.info('Completed.', stdout=True) # starts HERE if __name__ == "__main__": try: print '' State.operation['begin'] = time() if Config.misc['timeout']: with Timeout(seconds=Config.misc['timeout']): main() else: main() except UserException as e: print 'ERROR: %s'%str(e) Logger.info(e) except ProgramException as e: Logger.critical(e) except TimeoutException as e: Logger.critical(e) except Exception as e: Logger.critical(e) finally: State.operation['end'] = time() generate_output()
def generate_output(): Logger.info('\n******** SUMMARY **********', stdout=True) Logger.info('\nINPUT FILES:', stdout=True) # list source files for srcfile in State.inputfile: Logger.info(TAB + srcfile.tree.reader.id, stdout=True) if Config.debug['enabled']: #for directline in srcfile.directlines: # Logger.info(TAB*2+directline, stdout=True) pass Logger.info('TOTAL %d cases' % State.cases['size'], stdout=True) # ranking topN = min(10, State.cases['size']) Logger.info('\nRANKING - top %d:' % topN, stdout=True) mgr = State.cases['mgr'] if mgr: if mgr.refcase.result == Case.VERIFIED: refperfvals = [ float(val) for val in mgr.refcase.measured[mgr.rank_var] ] refperfval = sum(refperfvals) / len(refperfvals) Logger.info('\nReference performance: %e' % refperfval, stdout=True) else: Logger.info('\nReference performance: not available', stdout=True) Logger.info('\nranking\tcase-number\tcase-order\tperformance', stdout=True) for i, rank in enumerate(mgr.ranking[:topN]): Logger.info('%d\t\t%d\t\t%d\t\t%e' % ((i + 1, ) + rank), stdout=True) # summarize operation begin = datetime.datetime.fromtimestamp( State.operation['begin']).strftime('%Y-%m-%d %H:%M:%S') diffsecs = State.operation['end'] - State.operation['begin'] Logger.info('\nELAPSED TIME:', stdout=True) Logger.info(TAB + '%s from %s' % (str(datetime.timedelta(seconds=diffsecs)), begin), stdout=True) # how much quality has improved(compared to reference???) # what algorithm is used(with parameters) # what are the common features of the quality cases # write ranking into a file with open(Config.path['outdir'] + '/perf.log', 'wb') as f: mgr = State.cases['mgr'] if mgr: if mgr.refcase.result == Case.VERIFIED: refperfvals = [ float(val) for val in mgr.refcase.measured[mgr.rank_var] ] refperfval = sum(refperfvals) / len(refperfvals) f.write('Reference performance: %e\n' % refperfval) else: f.write('Reference performance: not available\n') f.write('\nranking\tcase-number\tcase-order\tperformance\n') for i, rank in enumerate(mgr.ranking): f.write('%d\t\t%d\t\t%d\t\t%e\n' % ((i + 1, ) + rank)) for i, failed in enumerate(mgr.failed): f.write('%d\t\t%d\t\t%d\t\t%e\n' % ((-1, ) + failed)) Logger.info('', stdout=True)