示例#1
0
def loop_split(stmts, add_stmt, before=True):

    for stmt in stmts:
        parent = stmt.parent

        if not isinstance(parent, Do):
            Logger.warn('Parent of statment is not Do type: %s' %
                        parent.__class__)
            continue

        doblk1 = []
        doblk2 = []

        #if add_stmt: doblk1.append(add_stmt[0])
        doblk1.append(parent.tooc())

        if add_stmt: doblk2.append(add_stmt[0])
        doblk2.append(parent.tooc(remove_label=True))

        enddo_stmt = parent.content[-1]

        doblk = doblk1
        remove_label = False
        for childstmt, depth in walk(parent, -1):
            if childstmt not in [parent, enddo_stmt]:
                if not before:
                    doblk.append(childstmt.tooc(remove_label=remove_label))
                if childstmt == stmt:
                    doblk = doblk2
                    remove_label = True
                if before:
                    doblk.append(childstmt.tooc(remove_label=remove_label))

        doblk1.append(enddo_stmt.tooc())
        doblk2.append(enddo_stmt.tooc(remove_label=True))

        if doblk1:
            parsed = parse('\n'.join(doblk1),
                           analyze=False,
                           ignore_comments=False)
            if len(parsed.content) > 0:
                parsed.content[0].parent = parent.parent
                for stmt, depth in walk(parsed, -1):
                    stmt.parse_f2003()
                insert_content(parent, parsed.content, remove_olditem=False)

        if doblk2:
            parsed = parse('\n'.join(doblk2),
                           analyze=False,
                           ignore_comments=False)
            if len(parsed.content) > 0:
                parsed.content[0].parent = parent.parent
                for stmt, depth in walk(parsed, -1):
                    stmt.parse_f2003()
                insert_content(parent, parsed.content, remove_olditem=True)
示例#2
0
def loop_split(stmts, add_stmt, before=True):

    for stmt in stmts:
        parent = stmt.parent

        if not isinstance(parent, Do):
            Logger.warn('Parent of statment is not Do type: %s'%parent.__class__)
            continue

        doblk1 = []
        doblk2 = []

        #if add_stmt: doblk1.append(add_stmt[0])
        doblk1.append(parent.tooc())

        if add_stmt: doblk2.append(add_stmt[0])
        doblk2.append(parent.tooc(remove_label=True))

        enddo_stmt = parent.content[-1]

        doblk = doblk1
        remove_label = False
        for childstmt, depth in walk(parent, -1):
            if childstmt not in [ parent, enddo_stmt]:
                if not before:
                    doblk.append(childstmt.tooc(remove_label=remove_label))
                if childstmt==stmt:
                    doblk = doblk2
                    remove_label = True
                if before:
                    doblk.append(childstmt.tooc(remove_label=remove_label))
            
        doblk1.append(enddo_stmt.tooc())
        doblk2.append(enddo_stmt.tooc(remove_label=True))

        if doblk1:
            parsed = parse('\n'.join(doblk1), analyze=False, ignore_comments=False)
            if len(parsed.content)>0:
                parsed.content[0].parent = parent.parent
                for stmt, depth in walk(parsed, -1): stmt.parse_f2003()
                insert_content(parent, parsed.content, remove_olditem=False)

        if doblk2:
            parsed = parse('\n'.join(doblk2), analyze=False, ignore_comments=False)
            if len(parsed.content)>0:
                parsed.content[0].parent = parent.parent
                for stmt, depth in walk(parsed, -1): stmt.parse_f2003()
                insert_content(parent, parsed.content, remove_olditem=True)
示例#3
0
def promote(inputfile, target_stmt, names, dimensions, targets, allocate, span):

    for target in targets:
        for promote_pair in target.split(','):
            label, idxname = promote_pair.split(':')
            promote_stmt = inputfile.get_stmt(label)
            if promote_stmt:
                lines = []
                in_exepart = False
                for stmt, depth in walk(promote_stmt[0], -1):
                    if isinstance(stmt, TypeDeclarationStatement):
                        org_attrspec = stmt.attrspec
                        if any( [ name in stmt.entity_decls for name in names ] ):
                            entity_decls = []
                            name_decls = [] 
                            attrspec = stmt.attrspec
                            for entity in stmt.entity_decls:
                                if entity in names:
                                    name_decls.append(entity)
                                else:
                                    entity_decls.append(entity)
                            if len(stmt.entity_decls)>0:
                                stmt.entity_decls = entity_decls 
                                lines.append(stmt.tooc())
                            if allocate:
                                if 'allocatable' not in stmt.attrspec:
                                    stmt.attrspec.append('allocatable')
                                stmt.entity_decls = [ name_decl+dimensions[0] for name_decl in name_decls ]
                            else:
                                stmt.entity_decls = [ name_decl+allocate[0] for name_decl in name_decls ]
                            if len(stmt.entity_decls)>0:
                                lines.append(stmt.tooc())
                                stmt.entity_decls = entity_decls
                        else:
                            if len(stmt.entity_decls)>0:
                                lines.append(stmt.tooc())
                    elif not in_exepart and stmt.__class__ in execution_part:
                        renames = []
                        for name in names:
                            for dim in dimensions:
                                if allocate:
                                    lines.append('allocate(%s)'%(name+allocate[0]))
                                renames.append([name, name+idxname])
                        lines.append(stmt.tooc(name_rename=renames))
                        in_exepart = True
                    elif in_exepart:
                        renames = []
                        for name in names:
                            for dim in dimensions:
                                renames.append([name, name+idxname])
                        lines.append(stmt.tooc(name_rename=renames))
                    else:
                        lines.append(stmt.tooc())

                try:
                    parsed = parse('\n'.join(lines), analyze=False, ignore_comments=False)
                    if len(parsed.content)>0:
                        for stmt, depth in walk(parsed, -1): stmt.parse_f2003()
                        insert_content(promote_stmt[0], parsed.content, remove_olditem=True)                       
                except: pass
示例#4
0
def loop_interchange(outer_stmts, inner_stmts):

    for outer_stmt in outer_stmts:

        if not isinstance(outer_stmt, Do):
            Logger.warn('Outer statment is not Do type: %s'%outer_stmt.__class__)
            continue

        for inner_stmt in inner_stmts:
            if not isinstance(inner_stmt, Do):
                Logger.warn('Inner statment is not Do type: %s'%inner_stmt.__class__)
                continue

            lines = []
            for stmt, depth in walk(outer_stmt, -1):
                if stmt is outer_stmt:
                    lines.append(inner_stmt.tooc())
                elif stmt is inner_stmt:
                    lines.append(outer_stmt.tooc())
                elif stmt is inner_stmt.content[-1]:
                    lines.append(outer_stmt.content[-1].tooc())
                elif stmt is outer_stmt.content[-1]:
                    lines.append(inner_stmt.content[-1].tooc())
                else:
                    lines.append(stmt.tooc())

            if lines:
                parsed = parse('\n'.join(lines), analyze=False)
                if len(parsed.content)>0:
                    parsed.content[0].parent = outer_stmt.parent
                    for stmt, depth in walk(parsed, -1): stmt.parse_f2003() 
                    insert_content(outer_stmt, parsed.content)
示例#5
0
def loop_interchange(outer_stmts, inner_stmts):

    for outer_stmt in outer_stmts:

        if not isinstance(outer_stmt, Do):
            Logger.warn('Outer statment is not Do type: %s' %
                        outer_stmt.__class__)
            continue

        for inner_stmt in inner_stmts:
            if not isinstance(inner_stmt, Do):
                Logger.warn('Inner statment is not Do type: %s' %
                            inner_stmt.__class__)
                continue

            lines = []
            for stmt, depth in walk(outer_stmt, -1):
                if stmt is outer_stmt:
                    lines.append(inner_stmt.tooc())
                elif stmt is inner_stmt:
                    lines.append(outer_stmt.tooc())
                elif stmt is inner_stmt.content[-1]:
                    lines.append(outer_stmt.content[-1].tooc())
                elif stmt is outer_stmt.content[-1]:
                    lines.append(inner_stmt.content[-1].tooc())
                else:
                    lines.append(stmt.tooc())

            if lines:
                parsed = parse('\n'.join(lines), analyze=False)
                if len(parsed.content) > 0:
                    parsed.content[0].parent = outer_stmt.parent
                    for stmt, depth in walk(parsed, -1):
                        stmt.parse_f2003()
                    insert_content(outer_stmt, parsed.content)
示例#6
0
 def subroutine_wrapper(self):
     code = self.subroutine_wrapper_code()
     from api import parse
     block = parse(code) # XXX: set include_dirs
     while len(block.content)==1:
         block = block.content[0]
     return block
示例#7
0
 def subroutine_wrapper(self):
     code = self.subroutine_wrapper_code()
     from api import parse
     block = parse(code)  # XXX: set include_dirs
     while len(block.content) == 1:
         block = block.content[0]
     return block
示例#8
0
def remove_stmt(inputfile, target_stmt, targets, span):
    for target in targets:
        if target:
            parsed = parse('!'+str(target), analyze=False, ignore_comments=False)
            if len(parsed.content)>0:
                for stmt, depth in walk(parsed, 1):
                    stmt.parse_f2003()
                insert_content(target, parsed.content, remove_olditem=True)
示例#9
0
def _add_numbers():
 

 a = request.args.get('a', 0, type=str)
 #b = request.args.get('b', 0, type=int)
 quote,image = api.parse(a)
 app.logger.info('================')
 app.logger.info(a)
 a = "https://cdn0.vox-cdn.com/thumbor/xgSQuztumB9vVqm_YKP12gPNHyU=/800x0/filters:no_upscale()/cdn0.vox-cdn.com/uploads/chorus_asset/file/2939696/countries_that_are_south_sudan.0.png"

 return jsonify(result=quote,linke=image)
示例#10
0
def name_change(targets, switch, rename):

    for target_stmt in targets:
        list_switch = [ (pair.split(':')[0].strip(),  pair.split(':')[1].strip()) for pair in switch  if pair]
        list_rename = [ (pair.split(':')[0].strip(),  pair.split(':')[1].strip()) for pair in rename  if pair]
        lines = target_stmt.tooc(name_switch=list_switch, name_rename=list_rename)

        if lines:
            parsed = parse(lines, analyze=False)
            if len(parsed.content)>0:
                parsed.content[0].parent = target_stmt.parent
                for stmt, depth in walk(parsed, -1): stmt.parse_f2003()
                insert_content(target_stmt, parsed.content, remove_olditem=True)
示例#11
0
def insert_stmt(inputfile, target_stmt, label, stmt_line, span):
    new_target_stmt = None
    for stmt, depth in walk(inputfile.tree, -1):
        if stmt.item.span==span:
            new_target_stmt = stmt
            break

    if stmt_line:
        parsed = parse(stmt_line[0], analyze=False, ignore_comments=False)
        if len(parsed.content)>0:
            for stmt, depth in walk(parsed, 1):
                if isinstance(stmt, Comment):
                    stmt.label = int(label[0])
                else:
                    stmt.item.label = int(label[0])
                stmt.parse_f2003()
            insert_content(new_target_stmt, parsed.content, remove_olditem=True)
示例#12
0
def name_change(targets, switch, rename):

    for target_stmt in targets:
        list_switch = [(pair.split(':')[0].strip(), pair.split(':')[1].strip())
                       for pair in switch if pair]
        list_rename = [(pair.split(':')[0].strip(), pair.split(':')[1].strip())
                       for pair in rename if pair]
        lines = target_stmt.tooc(name_switch=list_switch,
                                 name_rename=list_rename)

        if lines:
            parsed = parse(lines, analyze=False)
            if len(parsed.content) > 0:
                parsed.content[0].parent = target_stmt.parent
                for stmt, depth in walk(parsed, -1):
                    stmt.parse_f2003()
                insert_content(target_stmt,
                               parsed.content,
                               remove_olditem=True)
示例#13
0
def directive(inputfile, target_stmt, label, sentinel, directive, span):

    line = ''
    new_target_stmt = None
    for stmt, depth in walk(inputfile.tree, -1):
        if stmt.item.span==span:
            new_target_stmt = stmt
            line = '!%s$ %s'%(SrcFile.applymap(sentinel[0]), SrcFile.applymap(directive[0]))
            break

    if line:
        parsed = parse(line, analyze=False, ignore_comments=False)
        if len(parsed.content)>0:
            for stmt, depth in walk(parsed, 1):
                if isinstance(stmt, Comment):
                    stmt.label = int(label[0])
                else:
                    stmt.item.label = int(label[0])
                stmt.parse_f2003()

            insert_content(new_target_stmt, parsed.content, remove_olditem=True)
示例#14
0
    def reset_parsing(self):
        # fparse
        readerid = self.tree.reader.id
        self.tree = parse('\n'.join(self.prep), ignore_comments=False, analyze=True, isfree=True, isstrict=False, \
            include_dirs=None, source_only=None )
        self.tree.reader.id = readerid

        # f2003 parse
        for stmt, depth in walk(self.tree, -1):
            if isinstance(stmt, Comment) and stmt.item.comment.startswith(
                    '!__OPENCASE_COMMENT#'):
                comment_split = stmt.item.comment.split(' ')
                lineno = int(comment_split[1])
                stmt.item.span = (0, 0)
            else:
                if lineno > 0:
                    linediff = stmt.item.span[0] - lineno
                    lineno = 0
                stmt.item.span = (stmt.item.span[0] - linediff,
                                  stmt.item.span[1] - linediff)
            stmt.parse_f2003()
示例#15
0
def openmp(inputfile, target_stmt, sentinel, directive, clauses, span):

    line = ''
    new_target_stmt = None
    for stmt, depth in walk(inputfile.tree, -1):
        if stmt.item.span==span:
            new_target_stmt = stmt
            if clauses:
                mapped_clauses = SrcFile.applymap(clauses[0])                
            else:
                mapped_clauses = ''
            line = '%s %s %s'%(SrcFile.applymap(sentinel[0]), SrcFile.applymap(directive[0]), mapped_clauses)
            break

    if line:
        parsed = parse(line, analyze=False, ignore_comments=False)
        if len(parsed.content)>0:
            #parsed.content[0].parent = target_stmt.parent
            #import pdb; pdb.set_trace()
            for stmt, depth in walk(parsed, -1): stmt.parse_f2003()
            insert_content(new_target_stmt, parsed.content, remove_olditem=True)
示例#16
0
def directive(inputfile, target_stmt, label, sentinel, directive, span):

    line = ''
    new_target_stmt = None
    for stmt, depth in walk(inputfile.tree, -1):
        if stmt.item.span == span:
            new_target_stmt = stmt
            line = '!%s$ %s' % (SrcFile.applymap(
                sentinel[0]), SrcFile.applymap(directive[0]))
            break

    if line:
        parsed = parse(line, analyze=False, ignore_comments=False)
        if len(parsed.content) > 0:
            for stmt, depth in walk(parsed, 1):
                if isinstance(stmt, Comment):
                    stmt.label = int(label[0])
                else:
                    stmt.item.label = int(label[0])
                stmt.parse_f2003()

            insert_content(new_target_stmt,
                           parsed.content,
                           remove_olditem=True)
示例#17
0
def main():
    print('aa')
    logger.info('Start application')
    parser = create_argument_parser()
    args = parser.parse_args()
    if args:
        logger.info('command_line')
        if args.urls is None:
            urls = config.SITE_URLS
        else:
            urls = args.urls
            print(type(args.urls))
            print(args.urls)
        
        if args.not_older is None:
            not_older = config.DAYS_TO_COMPARE
        else:
            not_older = args.not_older
    logger.debug('Using urls to parse - {0}'.format(' '.join(urls)))
    logger.debug('Application will collect articles not older then {0} days'.format(not_older))
    logger.info('Start parsing of articles')
    print(api.parse(urls, not_older))

    
示例#18
0
 def setUp(self):
     self.xmlDoc = api.parse("isodomain.xml")  
     self.domainAry = self.xmlDoc.get_storage_domain()     
     pass
示例#19
0
def check_mode():
    from kgen_utils import Config, exec_cmd
    from utils import module_file_extensions
    from api import parse, walk
    from statements import Comment
    from kgen_search import f2003_search_unknowns, SearchException
    import logging

    logger = logging.getLogger('kgen')  # KGEN addition
    logger.setLevel(logging.WARNING)

    files = []

    # collect source files
    for path in Config.check_mode:
        if os.path.basename(path).startswith('.'): continue

        if os.path.isdir(path):
            for root, dirnames, filenames in os.walk(os.path.abspath(path)):
                for filename in filenames:
                    if os.path.basename(filename).startswith('.'): continue
                    fname, fext = os.path.splitext(filename)
                    if len(fext) > 1 and fext.lower(
                    ) in module_file_extensions:
                        files.append(os.path.join(root, filename))
        elif os.path.isfile(path):
            if os.path.isfile(path):
                files.append(os.path.abspath(path))
        else:
            raise '%s is not a direcotory nor a file' % path

    # TODO: support #include cpp directive
    # parse source files
    for n, file in enumerate(files):
        print 'Reading(%d/%d): ' % (n + 1, len(files)), file

        #        fsrc  = open(file, 'rb')

        # prepare include paths and macro definitions
        path_src = []
        macros_src = []
        if Config.include['file'].has_key(self.abspath):
            path_src = Config.include['file'][self.abspath]['path'] + [
                os.path.dirname(self.abspath)
            ]
            for k, v in Config.include['file'][
                    self.abspath]['macro'].iteritems():
                if v:
                    macros_src.append('-D%s=%s' % (k, v))
                else:
                    macros_src.append('-D%s' % k)
        includes = '-I' + ' -I'.join(Config.include['path'] + path_src)
        macros_common = []
        for k, v in Config.include['macro'].iteritems():
            if v:
                macros_common.append('-D%s=%s' % (k, v))
            else:
                macros_common.append('-D%s' % k)
        macros = ' '.join(macros_common + macros_src)

        # execute preprocessing
        prep = Config.bin['pp']
        if prep.endswith('fpp'): flags = Config.bin['fpp_flags']
        elif prep.endswith('cpp'): flags = Config.bin['cpp_flags']
        else: raise UserException('Preprocessor is not either fpp or cpp')

        output = exec_cmd('%s %s %s %s %s' %
                          (prep, flags, includes, macros, file))

        # convert the preprocessed for fparser
        prep = map(lambda l: '!KGEN' + l if l.startswith('#') else l,
                   output.split('\n'))

        # fparse
        tree = parse('\n'.join(prep), ignore_comments=False, analyze=False, isfree=True, isstrict=False, \
            include_dirs=None, source_only=None )

        # parse f2003
        Config.search['promote_exception'] = True

        lineno = 0
        linediff = 0
        for stmt, depth in walk(tree, -1):
            try:
                if isinstance(
                        stmt,
                        Comment) and stmt.item.comment.startswith('!KGEN#'):
                    comment_split = stmt.item.comment.split(' ')
                    lineno = int(comment_split[1])
                    stmt.item.span = (0, 0)
                else:
                    if lineno > 0:
                        linediff = stmt.item.span[0] - lineno
                        lineno = 0
                    stmt.item.span = (stmt.item.span[0] - linediff,
                                      stmt.item.span[1] - linediff)

                stmt.parse_f2003()
                if stmt.f2003.__class__ not in exclude_list:
                    f2003_search_unknowns(stmt,
                                          stmt.f2003,
                                          gentype=KGGenType.KERNEL)
            except (NoMatchError, AttributeError) as e:
                if file not in not_parsed:
                    not_parsed[file] = []
                not_parsed[file].append(stmt)
            except NameError as e:
                errmsg = str(e)
                pos = errmsg.find('search_')
                if len(errmsg) > 7 and pos > 0:
                    clsname = errmsg[pos + 7:-16]
                    #print "NOT SUPPORTED: '%s' Fortran statement is not supported yet"%clsname
                    if file not in not_supported:
                        not_supported[file] = []
                    not_supported[file].append((clsname, stmt.item.span[0]))
            except Exception as e:
                print 'WARNING: Following statement is not correctly parsed'
                print stmt
                print ''

    print ''
    print '********************'
    print '*** CHECK RESULT ***'
    print '********************'
    print ''
    print 'NOTE: KGEN may be able to extract kernel even though not all source code lines are parsed or supported.'
    print ''

    print '*** KGEN Parsing Error(s) ***'
    print ''
    for file, stmts in not_parsed.iteritems():
        print file
        lines = []
        for stmt in stmts:
            if hasattr(stmt, 'item'):
                lines.append('Near line # %d:' % stmt.item.span[0])
                lines.append(stmt.tokgen() + '\n')
            else:
                lines.append(str(stmt) + '\n')
        print '\n'.join(lines), '\n'

    print '*** Not Supported Fortran Statement(s) ***'
    print ''
    for file, clsnames in not_supported.iteritems():
        print file
        lines = []
        for clsname, lineno in clsnames:
            lines.append("'%s' Fortran statment near line # %d" %
                         (clsname, lineno))
        print '\n'.join(lines), '\n'

    if len(not_parsed) == 0 and len(not_supported) == 0:
        print 'Current KGEN version can support all source code lines.'
示例#20
0
    def __init__(self, srcpath, preprocess=True):
        import os.path
        from kgen_utils import run_shcmd
        from statements import Comment
        from block_statements import Module, Program

        # set default values
        self.tree = None
        self.srcpath = srcpath
        self.abspath = os.path.abspath(self.srcpath)

        # set source file format
        isfree = True
        isstrict = False
        if self.abspath in Config.source['file'].keys():
            if Config.source['file'][self.abspath].has_key('isfree'):
                isfree = Config.source['file'][self.abspath]['isfree']
            if Config.source['file'][self.abspath].has_key('isstrict'):
                isstrict = Config.source['file'][self.abspath]['isstrict']
        else:
            if Config.source['isstrict']: isstrict = Config.source['isstrict']
            if Config.source['isfree']: isfree = Config.source['isfree']

        # prepare include paths and macro definitions
        path_src = []
        macros_src = []
        if Config.include['file'].has_key(self.abspath):
            path_src = Config.include['file'][self.abspath]['path'] + [
                os.path.dirname(self.abspath)
            ]
            for k, v in Config.include['file'][
                    self.abspath]['macro'].iteritems():
                if v:
                    macros_src.append('-D%s=%s' % (k, v))
                else:
                    macros_src.append('-D%s' % k)
        includes = '-I' + ' -I'.join(Config.include['path'] + path_src)
        macros_common = []
        for k, v in Config.include['macro'].iteritems():
            if v:
                macros_common.append('-D%s=%s' % (k, v))
            else:
                macros_common.append('-D%s' % k)
        macros = ' '.join(macros_common + macros_src)

        # execute preprocessing
        Logger.info('Reading %s' % self.srcpath, stdout=True)

        new_lines = []
        with open(self.abspath, 'r') as f:
            if preprocess:
                pp = Config.bin['pp']
                if pp.endswith('fpp'):
                    if isfree: srcfmt = ' -free'
                    else: srcfmt = ' -fixed'
                    flags = Config.bin['fpp_flags'] + srcfmt
                elif pp.endswith('cpp'):
                    flags = Config.bin['cpp_flags']
                else:
                    raise UserException(
                        'Preprocessor is not either fpp or cpp')

                output, err, retcode = run_shcmd('%s %s %s %s' %
                                                 (pp, flags, includes, macros),
                                                 input=f.read())
                prep = map(lambda l: '!KGEN' + l if l.startswith('#') else l,
                           output.split('\n'))
                new_lines = self.handle_include(prep)
            else:
                new_lines = f.read().split('\n')

        # add include paths
        if Config.include['file'].has_key(
                self.abspath) and Config.include['file'][self.abspath].has_key(
                    'path'):
            include_dirs = Config.include['file'][self.abspath]['path'] + [
                os.path.dirname(self.abspath)
            ]
        else:
            include_dirs = None

        #if self.abspath=='/glade/scratch/youngsun/kgen_system_test/branches/initial/MPAS-Release/src/framework/mpas_derived_types.F':
        #    print '\n'.join(new_lines)
        #    sys.exit()
        #    import pdb ; pdb.set_trace()

        # fparse
        self.tree = parse('\n'.join(new_lines), ignore_comments=False, analyze=True, isfree=isfree, \
            isstrict=isstrict, include_dirs=include_dirs, source_only=None )
        self.tree.prep = new_lines
        self.tree.used4genstate = False

        #if self.abspath=='/glade/scratch/youngsun/kgen_system_test/branches/initial/MPAS-Release/src/framework/mpas_derived_types.F':
        #    print self.tree
        #    sys.exit()

        # parse f2003
        lineno = 0
        linediff = 0
        for stmt, depth in walk(self.tree, -1):
            stmt.parse_f2003()

        # rename reader.id
        self.tree.reader.id = self.abspath

        # collect module information
        for mod_name, mod_stmt in self.tree.a.module.iteritems():
            if not State.modules.has_key(mod_name):
                State.modules[mod_name] = OrderedDict()
                #State.modules[mod_name]['num'] = State.mod_num
                #State.mod_num += 1
                State.modules[mod_name]['stmt'] = mod_stmt
                State.modules[mod_name]['file'] = self
                State.modules[mod_name]['path'] = self.abspath
                #State.modules[mod_name]['extern'] = OrderedDict()
                #State.modules[mod_name]['extern']['names'] = []
                #State.modules[mod_name]['extern']['typedecl_stmt'] = OrderedDict()
                #State.modules[mod_name]['extern']['tkdpat'] = []
                #State.modules[mod_name]['mod_rw_var_depends'] = []
                #State.modules[mod_name]['dtype'] = []

        # collect program unit information
        for item in self.tree.content:
            if item.__class__ not in [Module, Comment, Program]:
                if item.reader.id not in State.program_units.keys():
                    State.program_units[item.reader.id] = []
                State.program_units[item.reader.id].append(item)

        # create a tuple for file dependency
        State.srcfiles[self.abspath] = (self, [], [])
示例#21
0
def loop_unroll(targets, factor, method):
    for target_stmt in targets:
        if not isinstance(target_stmt, Do):
            Logger.warn("Target statment is not Do type: %s" % target_stmt.__class__)
            continue

        # collect loop control
        target_f2003 = target_stmt.f2003
        if isinstance(target_f2003, Nonlabel_Do_Stmt):
            loop_control = target_f2003.items[1]
            loop_var = loop_control.items[0].string.lower()
            start_idx = loop_control.items[1][0]
            end_idx = loop_control.items[1][1]
            if len(loop_control.items[1]) == 3:
                step = Int_Literal_Constant(str(1))
            else:
                step = loop_control.items[1][2]
        else:
            raise ProgramException("Not supported type: %s" % f2003obj.__class__)

        # collect loop controls through static analysis
        start_num = target_stmt.get_param(start_idx)
        end_num = target_stmt.get_param(end_idx)
        step_num = target_stmt.get_param(step)
        try:
            loop_indices = range(start_num, end_num + 1, step_num)
        except:
            loop_indices = None

        # TODO: modify analysis if required
        lines = []
        if factor == "full":
            if loop_indices is not None:
                lines = _unroll(target_stmt.content, loop_var, len(loop_indices), method, start_index=start_num)
            else:
                Logger.warn("Loopcontrol is not collected")

            # save in tree
        elif factor.isdigit():
            factor_num = int(factor)
            if loop_indices is not None and len(loop_indices) == factor_num:
                lines = _unroll(target_stmt.content, loop_var, factor_num, method, start_index=start_num)
            else:
                # replace end and step
                newstep = "%s*%s" % (step.tofortran(), factor)
                newend = "%s-%s" % (end_idx.tofortran(), newstep)
                lines.append(target_stmt.tooc(do_end=newend, do_step=newstep))
                lines.extend(_unroll(target_stmt.content, loop_var, factor_num, method))
                lines.append(target_stmt.content[-1].tooc())

                # replace start
                newstart = loop_var
                lines.append(target_stmt.tooc(do_start=newstart, remove_label=True))
                lines.extend(_unroll(target_stmt.content, loop_var, 1, method))
                lines.append(target_stmt.content[-1].tooc(remove_label=True))
        else:
            raise UserException("Unknown unroll factor: %s" % factor)

        if lines:
            parsed = parse("\n".join(lines), analyze=False)
            if len(parsed.content) > 0:
                for stmt, depth in walk(parsed, -1):
                    stmt.parse_f2003()
                insert_content(target_stmt, parsed.content)
示例#22
0
def loop_unroll(targets, factor, method):
    for target_stmt in targets:
        if not isinstance(target_stmt, Do):
            Logger.warn('Target statment is not Do type: %s' %
                        target_stmt.__class__)
            continue

        # collect loop control
        target_f2003 = target_stmt.f2003
        if isinstance(target_f2003, Nonlabel_Do_Stmt):
            loop_control = target_f2003.items[1]
            loop_var = loop_control.items[0].string.lower()
            start_idx = loop_control.items[1][0]
            end_idx = loop_control.items[1][1]
            if len(loop_control.items[1]) == 3:
                step = Int_Literal_Constant(str(1))
            else:
                step = loop_control.items[1][2]
        else:
            raise ProgramException('Not supported type: %s' %
                                   f2003obj.__class__)

        # collect loop controls through static analysis
        start_num = target_stmt.get_param(start_idx)
        end_num = target_stmt.get_param(end_idx)
        step_num = target_stmt.get_param(step)
        try:
            loop_indices = range(start_num, end_num + 1, step_num)
        except:
            loop_indices = None

        # TODO: modify analysis if required
        lines = []
        if factor == 'full':
            if loop_indices is not None:
                lines = _unroll(target_stmt.content,
                                loop_var,
                                len(loop_indices),
                                method,
                                start_index=start_num)
            else:
                Logger.warn('Loopcontrol is not collected')

            # save in tree
        elif factor.isdigit():
            factor_num = int(factor)
            if loop_indices is not None and len(loop_indices) == factor_num:
                lines = _unroll(target_stmt.content,
                                loop_var,
                                factor_num,
                                method,
                                start_index=start_num)
            else:
                # replace end and step
                newstep = '%s*%s' % (step.tofortran(), factor)
                newend = '%s-%s' % (end_idx.tofortran(), newstep)
                lines.append(target_stmt.tooc(do_end=newend, do_step=newstep))
                lines.extend(
                    _unroll(target_stmt.content, loop_var, factor_num, method))
                lines.append(target_stmt.content[-1].tooc())

                # replace start
                newstart = loop_var
                lines.append(
                    target_stmt.tooc(do_start=newstart, remove_label=True))
                lines.extend(_unroll(target_stmt.content, loop_var, 1, method))
                lines.append(target_stmt.content[-1].tooc(remove_label=True))
        else:
            raise UserException('Unknown unroll factor: %s' % factor)

        if lines:
            parsed = parse('\n'.join(lines), analyze=False)
            if len(parsed.content) > 0:
                for stmt, depth in walk(parsed, -1):
                    stmt.parse_f2003()
                insert_content(target_stmt, parsed.content)
示例#23
0
 def setUp(self):
     self.xmlDoc = api.parse("hosts.xml")
     self.hostAry = self.xmlDoc.get_host()
     pass
示例#24
0
    def _collect_mpi_params(self):
        from api import parse, walk

        if Config.mpi['enabled']:
            # get path of mpif.h
            mpifpath = ''
            if os.path.isabs(Config.mpi['header']):
                if os.path.exists(Config.mpi['header']):
                    mpifpath = Config.mpi['header']
                else:
                    raise UserException('Can not find %s' %
                                        Config.mpi['header'])
            else:
                for p in Config.include['path']:
                    fp = os.path.join(p, Config.mpi['header'])
                    if os.path.exists(fp):
                        mpifpath = fp
                        break
                if not mpifpath:
                    for incpath, incdict in Config.include['file'].items():
                        for p in incdict['path']:
                            fp = os.path.join(p, Config.mpi['header'])
                            if os.path.exists(fp):
                                mpifpath = fp
                                break
                        if mpifpath: break

            # collect required information
            if mpifpath:
                try:
                    with open(mpifpath, 'r') as f:
                        filelines = f.read().split('\n')
                        lines = '\n'.join(
                            handle_include(os.path.dirname(mpifpath),
                                           filelines))
                        #reader = FortranStringReader(lines)
                    tree = parse(lines,
                                 ignore_comments=True,
                                 analyze=False,
                                 isfree=True,
                                 isstrict=False,
                                 include_dirs=None,
                                 source_only=None)
                    for stmt, depth in walk(tree, -1):
                        stmt.parse_f2003()

                    #import pdb; pdb.set_trace()
                    #spec = Specification_Part(reader)
                    bag = {}
                    config_name_mapping = [
                        ('comm', 'MPI_COMM_WORLD'),
                        ('logical', 'MPI_LOGICAL'),
                        ('status_size', 'MPI_STATUS_SIZE'),
                        ('any_source', 'MPI_ANY_SOURCE'),
                        ('source', 'MPI_SOURCE'),
                    ]
                    for config_key, name in config_name_mapping:
                        if not Config.mpi.has_key(
                                config_key) or Config.mpi[config_key] is None:
                            for stmt, depth in walk(tree, -1):
                                bag['key'] = name
                                bag[name] = []
                                if hasattr(stmt, 'f2003'):
                                    traverse(stmt.f2003,
                                             get_MPI_PARAM,
                                             bag,
                                             subnode='content')
                                    if len(bag[name]) > 0:
                                        Config.mpi[config_key] = bag[name][-1]
                                        break

                    for config_key, name in config_name_mapping:
                        if not Config.mpi.has_key(
                                config_key) or Config.mpi[config_key] is None:
                            raise UserException(
                                'Can not find {name} in mpif.h'.format(
                                    name=name))

                except UserException:
                    raise  # Reraise this exception rather than catching it below
                except Exception as e:
                    raise UserException('Error occurred during reading %s.' %
                                        mpifpath)
            else:
                raise UserException(
                    'Can not find mpif.h. Please provide a path to the file')
示例#25
0
from api import get_access_token, anaphoric, parse_sentences, parse
from parsing import Parser
from lex import lexical
import sys


def compiler(parsed):
    results = []
    for p in parsed:
        results.append(p.conv())

    return "\n".join(results)


if __name__ == '__main__':
    filepath = sys.argv[1]
    with open(filepath, 'r') as f:
        text = f.read()

    access_token = get_access_token()
    data, users = anaphoric(access_token, text)
    string_parsed, expressions = parse_sentences(data)
    r = lexical(parse(access_token, string_parsed))
    parser = Parser(r, expressions, users)
    parsed = parser.parse()
    compiled = compiler(parsed)

    exec(compiled)
示例#26
0
argset = sys.argv[1:]
print argset

#fff='./xxx.F'
#fff='./bndfp.F'
#fff='./x.F'
#/home/takao/ecal//lm-7.0betaK001/fp/bndfp.F'
#print fff
#for ffile in argset:
#    print ffile
#
#sys.exit()

for ffile in argset:
    print '@@@@@ ' + ffile + ' @@@@@'
    tree = parse(ffile,
                 isfree=False,
                 isstrict=False,
                 ignore_comments=False,
                 analyze=True)
    #print dir(tree)
    #print tree.content
    print tree.torepr()
    #print tree.torepr(3)
    #tree
    #print tree.item

sys.exit()

#print tree
示例#27
0
#            else:
#                include_dirs = Config.include['path']
#            filename = match.group(1)[1:-1].strip()
#            path = filename
#            for incl_dir in include_dirs+[os.path.dirname(self.abspath)]:
#                path = os.path.join(incl_dir, filename)
#                if os.path.exists(path):
#                    break
#            if os.path.isfile(path):
#                with open(path, 'r') as f:
#                    included_lines = f.read()
#                    insert_lines.extend(handle_include(included_lines.split('\n')))
#            else:
#                raise UserException('Can not find %s in include paths of %s.'%(filename, self.abspath))
#        else:
#            insert_lines.append(line)
#
#    return insert_lines

new_lines = []
with open(sys.argv[1], 'r') as f:
    output = exec_cmd('cpp %s %s %s' % (flags, includes, macros),
                      input=f.read())
    prep = map(lambda l: '!KGEN' + l if l.startswith('#') else l,
               output.split('\n'))
    #new_lines = handle_include(prep)

tree = parse('\n'.join(prep), ignore_comments=False, analyze=True, isfree=True, \
            isstrict=False, source_only=None )
print tree
示例#28
0
文件: kgen_state.py 项目: NCAR/KGen
    def __init__(self, srcpath, preprocess=True):
        import os.path
        from kgen_utils import run_shcmd
        from statements import Comment
        from block_statements import Module, Program

        # set default values
        self.tree = None
        self.srcpath = srcpath
        self.abspath = os.path.abspath(self.srcpath)

        # set source file format
        isfree = True
        isstrict = False
        if self.abspath in Config.source["file"].keys():
            if Config.source["file"][self.abspath].has_key("isfree"):
                isfree = Config.source["file"][self.abspath]["isfree"]
            if Config.source["file"][self.abspath].has_key("isstrict"):
                isstrict = Config.source["file"][self.abspath]["isstrict"]
        else:
            if Config.source["isstrict"]:
                isstrict = Config.source["isstrict"]
            if Config.source["isfree"]:
                isfree = Config.source["isfree"]

        # prepare include paths and macro definitions
        path_src = []
        macros_src = []
        if Config.include["file"].has_key(self.abspath):
            path_src = Config.include["file"][self.abspath]["path"] + [os.path.dirname(self.abspath)]
            for k, v in Config.include["file"][self.abspath]["macro"].iteritems():
                if v:
                    macros_src.append("-D%s=%s" % (k, v))
                else:
                    macros_src.append("-D%s" % k)
        includes = "-I" + " -I".join(Config.include["path"] + path_src)
        macros_common = []
        for k, v in Config.include["macro"].iteritems():
            if v:
                macros_common.append("-D%s=%s" % (k, v))
            else:
                macros_common.append("-D%s" % k)
        macros = " ".join(macros_common + macros_src)

        # execute preprocessing
        Logger.info("Reading %s" % self.srcpath, stdout=True)

        new_lines = []
        with open(self.abspath, "r") as f:
            if preprocess:
                pp = Config.bin["pp"]
                if pp.endswith("fpp"):
                    if isfree:
                        srcfmt = " -free"
                    else:
                        srcfmt = " -fixed"
                    flags = Config.bin["fpp_flags"] + srcfmt
                elif pp.endswith("cpp"):
                    flags = Config.bin["cpp_flags"]
                else:
                    raise UserException("Preprocessor is not either fpp or cpp")

                output, err, retcode = run_shcmd("%s %s %s %s" % (pp, flags, includes, macros), input=f.read())
                prep = map(lambda l: "!KGEN" + l if l.startswith("#") else l, output.split("\n"))
                new_lines = self.handle_include(prep)
            else:
                new_lines = f.read().split("\n")

        # add include paths
        if Config.include["file"].has_key(self.abspath) and Config.include["file"][self.abspath].has_key("path"):
            include_dirs = Config.include["file"][self.abspath]["path"] + [os.path.dirname(self.abspath)]
        else:
            include_dirs = None

        # if self.abspath=='/glade/scratch/youngsun/kgen_system_test/branches/initial/MPAS-Release/src/framework/mpas_derived_types.F':
        #    print '\n'.join(new_lines)
        #    sys.exit()
        #    import pdb ; pdb.set_trace()

        # fparse
        self.tree = parse(
            "\n".join(new_lines),
            ignore_comments=False,
            analyze=True,
            isfree=isfree,
            isstrict=isstrict,
            include_dirs=include_dirs,
            source_only=None,
        )
        self.tree.prep = new_lines
        self.tree.used4genstate = False

        # if self.abspath=='/glade/scratch/youngsun/kgen_system_test/branches/initial/MPAS-Release/src/framework/mpas_derived_types.F':
        #    print self.tree
        #    sys.exit()

        # parse f2003
        lineno = 0
        linediff = 0
        for stmt, depth in walk(self.tree, -1):
            stmt.parse_f2003()

        # rename reader.id
        self.tree.reader.id = self.abspath

        # collect module information
        for mod_name, mod_stmt in self.tree.a.module.iteritems():
            if not State.modules.has_key(mod_name):
                State.modules[mod_name] = OrderedDict()
                # State.modules[mod_name]['num'] = State.mod_num
                # State.mod_num += 1
                State.modules[mod_name]["stmt"] = mod_stmt
                State.modules[mod_name]["file"] = self
                State.modules[mod_name]["path"] = self.abspath
                # State.modules[mod_name]['extern'] = OrderedDict()
                # State.modules[mod_name]['extern']['names'] = []
                # State.modules[mod_name]['extern']['typedecl_stmt'] = OrderedDict()
                # State.modules[mod_name]['extern']['tkdpat'] = []
                # State.modules[mod_name]['mod_rw_var_depends'] = []
                # State.modules[mod_name]['dtype'] = []

        # collect program unit information
        for item in self.tree.content:
            if item.__class__ not in [Module, Comment, Program]:
                if item.reader.id not in State.program_units.keys():
                    State.program_units[item.reader.id] = []
                State.program_units[item.reader.id].append(item)

        # create a tuple for file dependency
        State.srcfiles[self.abspath] = (self, [], [])
示例#29
0
  output = open(os.path.join(output_dir, name + '.json'), 'w')
  output.write(api_def.serialize_json())
  output.close()

def compare_predicates(p1, p2):
  checker = ASTSimilarityChecker()
  return checker.get_similarity(p1,p2)

if __name__ == '__main__':
#  for i in range(0, 100):
#    api_def = api.parse('/Users/hiranya/Projects/api-desc/sandbox/jaxrs-test/starbucks/starbucks3.json')
#    randomize_api(api_def, 'random' + str(i), '/Users/hiranya/Projects/api-desc/sandbox/jaxrs-test/random')
#  print 'DONE'

  k = 1
  api1 = api.parse('/Users/hiranya/Projects/api-desc/sandbox/jaxrs-test/starbucks/starbucks3.json')
  for i in [90]:
    api2 = api.parse('/Users/hiranya/Projects/api-desc/sandbox/jaxrs-test/random/random' + str(i) + '.json')
    for resource1 in api1.resources:
      for op1 in resource1.operations:
        for resource2 in api2.resources:
          for op2 in resource2.operations:
            print
            for c in op1.get_pre_conditions(api1):
              print c

            print
            for c in op2.get_pre_conditions(api2):
              print c

            print
示例#30
0
#!/usr/bin/env python
import sys, os
sys.path.insert(0, '../TOOLS/f2py/fparser')
from api import parse
#for dirpath,dirnames,filenames in os.walk('.'):
#    print dirpath,dirnames,filenames
#sys.exit()

srcdir = './FPLOTdir/'
files = os.listdir(srcdir)
print files
src = []
for file in files:
    if (os.path.splitext(file)[1] == '.F'): src.append(srcdir + file)
print src

#./FPLOTdir/fplot.F  ./FPLOTdir/fpsub.F ./FPLOTdir/plbnds.F ./FPLOTdir/pldos.F  ./FPLOTdir/plsub.F'
#fff='./bndfp.F'
#fff='./x.F'
#/home/takao/ecal//lm-7.0betaK001/fp/bndfp.F'
#print fff
#src=[srcdir+'fpsub.F']
for file in src:
    print '--------------', file, '------------------'
    tree = parse(file, isfree=False, isstrict=False, ignore_comments=False)
    print tree.content
#print tree
示例#31
0
文件: kgen_prepost.py 项目: NCAR/KGen
def check_mode():
    from kgen_utils import Config, run_shcmd
    from utils import module_file_extensions
    from api import parse, walk
    from statements import Comment
    from kgen_search import f2003_search_unknowns, SearchException
    import logging

    logger = logging.getLogger('kgen') # KGEN addition
    logger.setLevel(logging.WARNING)

    files = []

    # collect source files
    for path in Config.check_mode:
        if os.path.basename(path).startswith('.'): continue

        if os.path.isdir(path):
            for root, dirnames, filenames in os.walk(os.path.abspath(path)):
                for filename in filenames:
                    if os.path.basename(filename).startswith('.'): continue
                    fname, fext = os.path.splitext(filename)
                    if len(fext)>1 and fext.lower() in module_file_extensions:
                        files.append(os.path.join(root, filename))
        elif os.path.isfile(path):
            if os.path.isfile(path):
                files.append(os.path.abspath(path))
        else:
            raise '%s is not a direcotory nor a file'%path

    # TODO: support #include cpp directive
    # parse source files
    for n, file in enumerate(files):
        print 'Reading(%d/%d): '%(n+1, len(files)), file

#        fsrc  = open(file, 'rb')

        # prepare include paths and macro definitions
        path_src = []
        macros_src = []
        if Config.include['file'].has_key(self.abspath):
            path_src = Config.include['file'][self.abspath]['path']+[os.path.dirname(self.abspath)]
            for k, v in Config.include['file'][self.abspath]['macro'].iteritems():
                if v:
                    macros_src.append('-D%s=%s'%(k,v))
                else:
                    macros_src.append('-D%s'%k)
        includes = '-I'+' -I'.join(Config.include['path']+path_src)
        macros_common = []
        for k, v in Config.include['macro'].iteritems():
            if v:       
                macros_common.append('-D%s=%s'%(k,v))
            else:
                macros_common.append('-D%s'%k)
        macros = ' '.join(macros_common + macros_src)

        # execute preprocessing
        prep = Config.bin['pp']
        if prep.endswith('fpp'): flags = Config.bin['fpp_flags']
        elif prep.endswith('cpp'): flags = Config.bin['cpp_flags']
        else: raise UserException('Preprocessor is not either fpp or cpp')

        output, err, retcode = run_shcmd('%s %s %s %s %s' % (prep, flags, includes, macros, file))

        # convert the preprocessed for fparser
        prep = map(lambda l: '!KGEN'+l if l.startswith('#') else l, output.split('\n'))

        # fparse
        tree = parse('\n'.join(prep), ignore_comments=False, analyze=False, isfree=True, isstrict=False, \
            include_dirs=None, source_only=None )

        # parse f2003
        Config.search['promote_exception'] = True

        lineno = 0
        linediff = 0
        for stmt, depth in walk(tree, -1):
            try:
                if isinstance(stmt, Comment) and stmt.item.comment.startswith('!KGEN#'):
                    comment_split = stmt.item.comment.split(' ')
                    lineno = int(comment_split[1])
                    stmt.item.span = ( 0, 0 )
                else:
                    if lineno>0:
                        linediff = stmt.item.span[0] - lineno
                        lineno = 0
                    stmt.item.span = ( stmt.item.span[0]-linediff, stmt.item.span[1]-linediff )

                stmt.parse_f2003()
                if stmt.f2003.__class__ not in exclude_list:
                    f2003_search_unknowns(stmt, stmt.f2003, gentype=KGGenType.KERNEL) 
            except (NoMatchError, AttributeError) as e:
                if file not in not_parsed:
                    not_parsed[file] = []
                not_parsed[file].append(stmt)
            except NameError as e:
                errmsg = str(e)
                pos = errmsg.find('search_')
                if len(errmsg)>7 and pos>0:
                    clsname = errmsg[pos+7:-16]
                    #print "NOT SUPPORTED: '%s' Fortran statement is not supported yet"%clsname
                    if file not in not_supported:
                        not_supported[file] = []
                    not_supported[file].append((clsname, stmt.item.span[0]))
            except Exception as e:
                print 'WARNING: Following statement is not correctly parsed'
                print stmt
                print ''

    print ''
    print '********************'
    print '*** CHECK RESULT ***'
    print '********************'
    print ''
    print 'NOTE: KGEN may be able to extract kernel even though not all source code lines are parsed or supported.'
    print ''

    print '*** KGEN Parsing Error(s) ***'
    print ''
    for file, stmts in not_parsed.iteritems():
        print file
        lines = []
        for stmt in stmts:
            if hasattr(stmt, 'item'):
                lines.append('Near line # %d:'%stmt.item.span[0])
                lines.append(stmt.tokgen()+'\n')
            else:
                lines.append(str(stmt)+'\n')
        print '\n'.join(lines), '\n'

    print '*** Not Supported Fortran Statement(s) ***'
    print ''
    for file, clsnames in not_supported.iteritems():
        print file
        lines = []
        for clsname, lineno in clsnames:
            lines.append("'%s' Fortran statment near line # %d"%(clsname, lineno))
        print '\n'.join(lines), '\n'

    if len(not_parsed)==0 and len(not_supported)==0:
        print 'Current KGEN version can support all source code lines.'
示例#32
0
from api import parse

nargv = len(sys.argv) -1
argset= sys.argv[1:]
print argset

#fff='./xxx.F'
#fff='./bndfp.F'
#fff='./x.F'
#/home/takao/ecal//lm-7.0betaK001/fp/bndfp.F'
#print fff
#for ffile in argset:
#    print ffile
#
#sys.exit()

for ffile in argset:
    print '@@@@@ '+ffile+' @@@@@'
    tree = parse(ffile,isfree=False,isstrict=False,ignore_comments=False,analyze=True)
    #print dir(tree)
    #print tree.content
    print tree.torepr()
    #print tree.torepr(3)
    #tree
    #print tree.item


sys.exit()

#print tree
示例#33
0
 def load_api_description(self, name):
   path = os.path.join('../samples', name)
   return parse(path)
示例#34
0
文件: kgparse.py 项目: kimjs29/KGen
    def __init__(self, srcpath, preprocess=True):

        # set default values
        self.tree = None
        self.srcpath = srcpath
        self.realpath = os.path.realpath(self.srcpath)

        # set source file format
        isfree = None
        isstrict = None
        if self.realpath in Config.source['file'].keys():
            if Config.source['file'][self.realpath].has_key('isfree'):
                isfree = Config.source['file'][self.realpath]['isfree']
            if Config.source['file'][self.realpath].has_key('isstrict'):
                isstrict = Config.source['file'][self.realpath]['isstrict']
        else:
            isstrict = Config.source['isstrict']
            isfree = Config.source['isfree']
        # prepare include paths and macro definitions
        path_src = []
        macros_src = []
        if Config.include['file'].has_key(self.realpath):
            path_src = Config.include['file'][self.realpath]['path'] + [
                os.path.dirname(self.realpath)
            ]
            path_src = [path for path in path_src if len(path) > 0]
            for k, v in Config.include['file'][
                    self.realpath]['macro'].iteritems():
                if v is not None:
                    macros_src.append('-D%s=%s' % (k, v))
                else:
                    macros_src.append('-D%s' % k)

        if os.path.isfile(Config.mpi['header']):
            includes = [
                '-I %s' % incpath
                for incpath in [os.path.dirname(Config.mpi['header'])] +
                Config.include['path'] + path_src
            ]
        else:
            includes = [
                '-I %s' % incpath
                for incpath in Config.include['path'] + path_src
            ]

        macros_common = []
        for k, v in Config.include['macro'].iteritems():
            if v:
                macros_common.append('-D%s=%s' % (k, v))
            else:
                macros_common.append('-D%s' % k)
        macros = ' '.join(macros_common + macros_src)

        # execute preprocessing
        logger.info('Reading %s' % self.srcpath)

        new_lines = []
        with open(self.realpath, 'r') as f:
            if preprocess:
                pp = Config.bin['pp']
                if pp.endswith('fpp'):
                    if isfree is None or isfree: srcfmt = ' -free'
                    else: srcfmt = ' -fixed'
                    flags = Config.bin['fpp_flags'] + srcfmt
                elif pp.endswith('cpp'):
                    flags = Config.bin['cpp_flags']
                else:
                    raise UserException(
                        'Preprocessor is not either fpp or cpp')

                output, err, retcode = kgutils.run_shcmd(
                    '%s %s %s %s' % (pp, flags, ' '.join(includes), macros),
                    input=f.read())
                prep = map(lambda l: '!KGEN' + l if l.startswith('#') else l,
                           output.split('\n'))
                new_lines = self.handle_include(prep)
            else:
                new_lines = f.read().split('\n')

        # add include paths
        include_dirs = Config.include['path'][:]
        if Config.include['file'].has_key(
                self.realpath) and Config.include['file'][
                    self.realpath].has_key('path'):
            include_dirs.extend(Config.include['file'][self.realpath]['path'])
            include_dirs.append(os.path.dirname(self.realpath))

        # fparse
        self.tree = api.parse('\n'.join(new_lines), ignore_comments=False, analyze=True, isfree=isfree, \
            isstrict=isstrict, include_dirs=include_dirs, source_only=None )
        self.tree.prep = new_lines

        # parse f2003
        lineno = 0
        linediff = 0
        for stmt, depth in api.walk(self.tree, -1):
            stmt.parse_f2003()

        # rename reader.id
        self.tree.reader.id = self.realpath

        # collect module information
        for mod_name, mod_stmt in self.tree.a.module.iteritems():
            if not Config.modules.has_key(mod_name):
                Config.modules[mod_name] = collections.OrderedDict()
                Config.modules[mod_name]['stmt'] = mod_stmt
                Config.modules[mod_name]['file'] = self
                Config.modules[mod_name]['path'] = self.realpath

        # collect program unit information
        for item in self.tree.content:
            if item.__class__ not in [Module, Comment, Program]:
                if item.reader.id not in Config.program_units.keys():
                    Config.program_units[item.reader.id] = []
                Config.program_units[item.reader.id].append(item)

        # create a tuple for file dependency
        Config.srcfiles[self.realpath] = (self, [], [])

        self.process_directive()
示例#35
0
 def setUp(self):
     self.xmlDoc = api.parse("hosts.xml")   
     self.hostAry = self.xmlDoc.get_host()     
     pass
示例#36
0
    def __init__(self, srcpath):
        import re
        import os.path
        from oc_utils import exec_cmd

        # set default values
        self.prep = None
        self.tree = None
        self.srcpath = srcpath
        self.filename = os.path.basename(self.srcpath)
        self.abspath = os.path.abspath(self.srcpath)
        self.relpath = os.path.relpath(self.abspath, Config.path['refdir'])
        self.searchtree = []
        self.direct = {}

        # prepare include paths and macro definitions
        path_src = []
        macros_src = ''
        if Config.include['file'].has_key(self.abspath):
            path_src = Config.include['file'][self.abspath]['path']
            macros_src = ' '.join([
                '-D%s=%s' % (k, v) for k, v in Config.include['file'][
                    self.abspath]['macro'].iteritems()
            ])
        includes = '-I' + ' -I'.join(Config.include['path'] + path_src + ['.'])
        macros = ' '.join([
            '-D%s=%s' % (k, v) for k, v in Config.include['macro'].iteritems()
        ]) + ' ' + macros_src

        # execute preprocessing
        Logger.info('Reading %s' % self.srcpath, stdout=True)
        prep = Config.bin['pp']
        if prep.endswith('fpp'): flags = Config.bin['fpp_flags']
        elif prep.endswith('cpp'): flags = Config.bin['cpp_flags']
        else: raise UserException('Preprocessor is not either fpp or cpp')
        output = exec_cmd('%s %s %s %s %s' %
                          (prep, flags, includes, macros, self.abspath))

        # convert the preprocessed for fparser
        self.prep = map(
            lambda l: '!__OPENCASE_COMMENT' + l if l.startswith('#') else l,
            output.split('\n'))

        # fparse
        self.tree = parse('\n'.join(self.prep), ignore_comments=False, analyze=True, isfree=True, isstrict=False, \
            include_dirs=None, source_only=None )

        # parse f2003
        lineno = 0
        linediff = 0
        pending_directs = []
        for stmt, depth in walk(self.tree, -1):
            if isinstance(stmt, Comment) and stmt.item.comment.startswith(
                    '!__OPENCASE_COMMENT#'):
                comment_split = stmt.item.comment.split(' ')
                lineno = int(comment_split[1])
                stmt.item.span = (0, 0)
            else:
                if lineno > 0:
                    linediff = stmt.item.span[0] - lineno
                    lineno = 0
                stmt.item.span = (stmt.item.span[0] - linediff,
                                  stmt.item.span[1] - linediff)

                if isinstance(stmt, Comment):
                    match = re.match(
                        r'\$opencase\s*(\w+)\s*([\(\{\[\<])(.+)([\)\}\]\>]\s*\*?\+?\d?)',
                        stmt.content, re.I)
                    if match:
                        name = match.group(1).lower()
                        value = match.group(3)
                        if name == 'include':
                            if value:
                                casefile = value.strip()
                                if casefile[0] == '/':
                                    inc_path = os.path.abspath(casefile)
                                else:
                                    inc_path = os.path.join(
                                        os.path.dirname(self.abspath), value)
                                if os.path.exists(inc_path):
                                    finc = open(inc_path, 'r')
                                    inc_directs = re.findall(
                                        r'(\!?)\s*(\w+)\s*([\(\{\[\<])(.+)([\)\}\]\>]\s*\*?\+?\d?)\s*\n',
                                        finc.read(), re.I)
                                    finc.close()
                                    for direct in inc_directs:
                                        if direct[0]: continue
                                        direct_line = ''.join(direct)
                                        direct_name = direct[1].lower()

                                        direct_tree = generate_searchtree(
                                            self._strmap(direct_line))
                                        assert len(
                                            direct_tree
                                        ) == 1, 'Only one element is allowed in direct_tree'
                                        self.searchtree.extend(direct_tree)

                                        if direct_name in global_directs:
                                            if not State.direct.has_key(
                                                    direct_name):
                                                State.direct[direct_name] = []
                                            State.direct[direct_name].append(
                                                (direct_tree[0], stmt,
                                                 stmt.item.span))
                                        elif direct_name in local_directs:
                                            if not self.direct.has_key(
                                                    direct_name):
                                                self.direct[direct_name] = []
                                            self.direct[direct_name].append(
                                                (direct_tree[0], stmt,
                                                 stmt.item.span))
                                else:
                                    raise UserException(
                                        'Can not find caes file: %s' %
                                        inc_path)
                        else:
                            direct_line = match.group(0)
                            direct_tree = generate_searchtree(
                                self._strmap(direct_line[10:]))
                            self.searchtree.extend(direct_tree)

                            if name in global_directs:
                                if not State.direct.has_key(name):
                                    State.direct[name] = []
                                State.direct[name].append(
                                    (direct_tree[0], stmt, stmt.item.span))
                            elif name in local_directs:
                                if not self.direct.has_key(name):
                                    self.direct[name] = []
                                self.direct[name].append(
                                    (direct_tree[0], stmt, stmt.item.span))

                            #if match.group(1).lower() in ['refcase']:
                            #    State.direct[match.group(1).lower()] = direct_tree
            stmt.parse_f2003()

        # rename reader.id
        self.tree.reader.id = self.abspath
示例#37
0

def compare_predicates(p1, p2):
    checker = ASTSimilarityChecker()
    return checker.get_similarity(p1, p2)


if __name__ == '__main__':
    #  for i in range(0, 100):
    #    api_def = api.parse('/Users/hiranya/Projects/api-desc/sandbox/jaxrs-test/starbucks/starbucks3.json')
    #    randomize_api(api_def, 'random' + str(i), '/Users/hiranya/Projects/api-desc/sandbox/jaxrs-test/random')
    #  print 'DONE'

    k = 1
    api1 = api.parse(
        '/Users/hiranya/Projects/api-desc/sandbox/jaxrs-test/starbucks/starbucks3.json'
    )
    for i in [90]:
        api2 = api.parse(
            '/Users/hiranya/Projects/api-desc/sandbox/jaxrs-test/random/random'
            + str(i) + '.json')
        for resource1 in api1.resources:
            for op1 in resource1.operations:
                for resource2 in api2.resources:
                    for op2 in resource2.operations:
                        print
                        for c in op1.get_pre_conditions(api1):
                            print c

                        print
                        for c in op2.get_pre_conditions(api2):
示例#38
0
import sys,os
sys.path.insert(0,'../TOOLS/f2py/fparser')
from api import parse
#for dirpath,dirnames,filenames in os.walk('.'):
#    print dirpath,dirnames,filenames
#sys.exit()

srcdir='./FPLOTdir/'
files =os.listdir(srcdir)
print files
src=[]
for file in files:
    if(os.path.splitext(file)[1]=='.F'): src.append(srcdir+file)
print src

#./FPLOTdir/fplot.F  ./FPLOTdir/fpsub.F ./FPLOTdir/plbnds.F ./FPLOTdir/pldos.F  ./FPLOTdir/plsub.F'
#fff='./bndfp.F'
#fff='./x.F'
#/home/takao/ecal//lm-7.0betaK001/fp/bndfp.F'
#print fff
#src=[srcdir+'fpsub.F']
for file in src:
    print '--------------',file,'------------------'
    tree = parse(file,isfree=False,isstrict=False,ignore_comments=False)
    print tree.content
#print tree




示例#39
0
def test_parse(candidate, expected):
    assert parse(candidate[0], candidate[1]) == expected