コード例 #1
1
    def parse_string(self, string):
        '''Populate a new object from a string.
        
        Parsing is hard, so we're going to call out to the pyparsing
        library here.  I hope you installed it!
        FTR: this is hideous.
        '''
        from pyparsing import Suppress, Regex, quotedString, restOfLine, Keyword, nestedExpr, Group, OneOrMore, Word, Literal, alphanums, removeQuotes, replaceWith, nums, printables
        gr_eq = Literal('=')
        gr_stripped_string = quotedString.copy().setParseAction( removeQuotes )
        gr_opt_quoted_string = gr_stripped_string | restOfLine
        gr_number = Word(nums)
        gr_yn = Keyword('yes', caseless=True).setParseAction(replaceWith('1')) | Keyword('no', caseless=True).setParseAction(replaceWith('0'))

        def _handle_ip(*x):
            a,b,c =  x[2]
            return '  %s = { %s }' % (a,c[0])

        def _handle_diraddr(*x):
            a,b,c =  x[2]
            self._set(DIRADDRESSES, '  %s' % '\n  '.join(c))
            return

        def np(words, fn = gr_opt_quoted_string, action=None):
            p = Keyword(words[0], caseless=True)
            for w in words[1:]:
                p = p | Keyword(w, caseless=True)
            p = p + gr_eq + fn
            p.setParseAction(action)
            return p
            
        gr_name = np((NAME,), action=lambda x: self._set_name(x[2]))
        gr_address = np((ADDRESS,), action=self._parse_setter(ADDRESS))
        gr_fd_conn = np(PList('fd connect timeout'), gr_number, self._parse_setter(FD_CONNECT_TIMEOUT, True))
        gr_heart = np(PList('heartbeat interval'), gr_number, self._parse_setter(HEARTBEATINTERVAL, True))
        gr_max_con = np(PList('maximum console connections'),
                        gr_number, self._parse_setter(MAXIMUMCONSOLECONNECTIONS, True))
        gr_max_jobs = np(PList('maximum concurrent jobs'), gr_number, action=self._parse_setter(MAXIMUMCONCURRENTJOBS, True))
        gr_pass = np((PASSWORD,), action=self._parse_setter(PASSWORD))
        gr_pid = np(PList('pid directory'), action=self._parse_setter(PIDDIRECTORY))
        gr_query = np(PList('query file'), action=self._parse_setter(QUERYFILE))
        gr_scripts = np(PList('scripts directory'), action=self._parse_setter(SCRIPTS_DIRECTORY))
        gr_sd_conn = np(PList('sd connect timeout'), gr_number, self._parse_setter(SD_CONNECT_TIMEOUT, True))
        gr_source = np(PList('source address'), action=self._parse_setter(SOURCEADDRESS))
        gr_stats = np(PList('statistics retention'), action=self._parse_setter(STATISTICS_RETENTION))
        gr_verid = np((VERID,), action=self._parse_setter(VERID))
        gr_messages = np((MESSAGES,), action=lambda x:self._parse_setter(MESSAGE_ID, dereference=True))
        gr_work_dir = np(PList('working directory'), action=self._parse_setter(WORKINGDIRECTORY))
        gr_port = np(PList('dir port'), gr_number, self._parse_setter(PORT, True))
        gr_monitor = np((MONITOR,), gr_yn, action=self._parse_setter(MONITOR))

        # This is a complicated one
        da_addr = np(('Addr','Port'), Word(printables), lambda x,y,z: ' '.join(z))
        da_ip = np(('IPv4','IPv6','IP'), nestedExpr('{','}', OneOrMore(da_addr).setParseAction(lambda x,y,z: ' ; '.join(z)))).setParseAction(_handle_ip)
        da_addresses = np(PList('dir addresses'), nestedExpr('{','}', OneOrMore(da_ip)), _handle_diraddr)

        gr_res = OneOrMore(gr_name | gr_address | gr_fd_conn | gr_heart | gr_max_con | gr_max_jobs | gr_pass | gr_pid | gr_query | gr_scripts | gr_sd_conn | gr_source | gr_stats | gr_verid | gr_messages | gr_work_dir | gr_port | gr_monitor | da_addresses)

        result = gr_res.parseString(string, parseAll=True)
        return 'Director: ' + self[NAME]
コード例 #2
0
    def parse_string(self, string):
        '''Populate a new object from a string.
        
        Parsing is hard, so we're going to call out to the pyparsing
        library here.  I hope you installed it!
        '''
        from pyparsing import quotedString, restOfLine, Keyword, nestedExpr, OneOrMore, Word, Literal, removeQuotes, nums, replaceWith, printables
        gr_eq = Literal('=')
        gr_stripped_string = quotedString.copy().setParseAction( removeQuotes )
        gr_opt_quoted_string = gr_stripped_string | restOfLine
        gr_number = Word(nums)
        gr_yn = Keyword('yes', caseless=True).setParseAction(replaceWith('1')) | Keyword('no', caseless=True).setParseAction(replaceWith('0'))

        def _handle_ip(*x):
            a,b,c =  x[2]
            return '  %s = { %s }' % (a,c[0])

        def _handle_fdaddr(*x):
            a,b,c =  x[2]
            self._set(FDADDRESSES, '  %s' % '\n  '.join(c))
            return

        def np(words, fn = gr_opt_quoted_string, action=None):
            p = Keyword(words[0], caseless=True)
            for w in words[1:]:
                p = p | Keyword(w, caseless=True)
            p = p + gr_eq + fn
            p.setParseAction(action)
            return p

        gr_line = np((NAME,), action=lambda x: self._set_name(x[2]))
        gr_line = gr_line | np((ADDRESS,), action=self._parse_setter(ADDRESS))
        gr_line = gr_line | np((CATALOG,), action=self._parse_setter(CATALOG_ID, dereference=True))
        gr_line = gr_line | np((PASSWORD,), action=self._parse_setter(PASSWORD))
        gr_line = gr_line | np(PList('file retention'), action=self._parse_setter(FILERETENTION))
        gr_line = gr_line | np(PList('job retention'), action=self._parse_setter(JOBRETENTION))
        gr_line = gr_line | np((PRIORITY,), gr_number, action=self._parse_setter(PRIORITY))
        gr_line = gr_line | np(PList('working directory'), action=self._parse_setter(WORKINGDIRECTORY))
        gr_line = gr_line | np(PList('pid directory'), action=self._parse_setter(PIDDIRECTORY))
        gr_line = gr_line | np(PList('heart beat interval'), action=self._parse_setter(HEARTBEATINTERVAL))
        gr_line = gr_line | np(PList('fd address'), action=self._parse_setter(FDADDRESS))
        gr_line = gr_line | np(PList('fd source address'), action=self._parse_setter(FDSOURCEADDRESS))
        gr_line = gr_line | np(PList('pki key pair'), action=self._parse_setter(PKIKEYPAIR))
        gr_line = gr_line | np(PList('pki master key'), action=self._parse_setter(PKIMASTERKEY))
        gr_line = gr_line | np(PList('fd port'), gr_number, action=self._parse_setter(FDPORT))
        gr_line = gr_line | np(PList('auto prune'), gr_yn, action=self._parse_setter(AUTOPRUNE))
        gr_line = gr_line | np(PList('maximum concurrent jobs'), gr_number, action=self._parse_setter(FDPORT))
        gr_line = gr_line | np(PList('pki encryption'), gr_yn, action=self._parse_setter(PKIENCRYPTION))
        gr_line = gr_line | np(PList('pki signatures'), gr_yn, action=self._parse_setter(PKISIGNATURES))

        # This is a complicated one
        da_addr = np(('Addr','Port'), Word(printables), lambda x,y,z: ' '.join(z))
        da_ip = np(('IPv4','IPv6','IP'), nestedExpr('{','}', OneOrMore(da_addr).setParseAction(lambda x,y,z: ' ; '.join(z)))).setParseAction(_handle_ip)
        da_addresses = np(('fd addresses', FDADDRESSES), nestedExpr('{','}', OneOrMore(da_ip)), _handle_fdaddr)

        gr_res = OneOrMore(gr_line|da_addresses)
        result = gr_res.parseString(string, parseAll=True)
        return 'Client: ' + self[NAME]
コード例 #3
0
 def __init__(self):
     # supported operators
     operator = pp.Regex(r"<=|>=|<>|\!=|==|<|>|not|in|regex_partial|regex_exact|geo_box|geo_radius|geo_polygon|contains_any|substr|contains_near|any|contains_substr|near|contains").setName("operator").addParseAction(self.validateOperator)
 
     # literals
     number = pp.Regex(r"[+-]?\d+(:?\.\d*)?(:?[eE][+-]?\d+)?").setName("number")
     numberList = pp.Group(pp.Literal('[') + number + pp.ZeroOrMore("," + number) + pp.Literal(']')).setName("numberList")
     string = pp.dblQuotedString
     literals = number | numberList | string
 
     # symbols
     identifier = pp.Regex(r"[a-z][a-z_]+(?:\.[a-z][a-z_]+)+").addParseAction(self.validateIdentifier).setName("identifier")
 
     # we'll get there...
     subExpr = pp.Forward()
 
     # predicates
     stream = pp.Group(pp.Literal("stream") + string).setName("stream")
     exists = pp.Group(identifier + pp.Literal("exists")).setName("exists")
 
     # boolean predicates
     comparison = pp.Group(
         identifier + operator + literals
         | literals + operator + identifier
     ).setName("comparison")
 
     condition = comparison | stream | exists | subExpr
     subExpr << pp.nestedExpr(content=condition)
 
     # standard boolean operator precedence
     expr = pp.operatorPrecedence(condition,[
         (pp.CaselessLiteral("not"), 1, pp.opAssoc.RIGHT, ), 
         (pp.CaselessLiteral("AND"), 2, pp.opAssoc.LEFT, ),
         (pp.CaselessLiteral("OR"), 2, pp.opAssoc.LEFT, ),
         ])
 
     # tag "thing" { expr }
     tag = pp.Group(pp.Literal("tag") + pp.quotedString + pp.nestedExpr("{", "}", expr)).setName("tag")
 
     # return { expr }
     a_return = pp.Group(pp.Literal("return") + pp.nestedExpr("{", "}", expr)).setName("return")
 
     # a single expression or tag [, tag, ...] return { expression }
     parser = expr | (pp.OneOrMore(tag) + a_return)
 
     # handle multilines
     parser.setDefaultWhitespaceChars(" \t\n\r")
 
     # handle // comments
     parser.ignore("//" + pp.restOfLine)
     self.parser = parser
コード例 #4
0
ファイル: markers.py プロジェクト: rogerjlogan/scripts
def removeComments(string):
    """Remove all comments from string"""
    if __strip_nested__:
        print "Removing nested comments...\nIf you are sure that you don't have nested comments, set \"__strip_nested__ = False\"\nIt will be faster."
        print "Warning: You can still have problems if you have unmatched comment delimiters inside literal strings.\n\n"
        comment1 = pyparsing.nestedExpr("(*", "*)").suppress()
        comment2 = pyparsing.nestedExpr("{", "}").suppress()
        string = comment1.transformString(string)         
        string = comment2.transformString(string)         
    else:
        print "Warning! Removing simple comments...\nIf you have nested comments set \"__strip_nested__ = True\" and re-run."
        string = re.sub(re.compile("\(\*.*?\*\)",re.DOTALL ) ,"" ,string)
        string = re.sub(re.compile("\{.*?\}",re.DOTALL ) ,"" ,string)
    return string
コード例 #5
0
ファイル: head2pairs.py プロジェクト: pld/dep2pcfg
def output(line):
    txt = line.strip()

    parse = nestedExpr('(',')').parseString(txt).asList()[0]
    
    #print "\n\n"; pprint.pprint(parse)

    depstruct = depgen(parse, None, object())

    #pprint.pprint(depstruct)

    parents = [x[2] for x in depstruct]
    ids = [None] + [x[1] for x in depstruct]

    try:
        deps = [ids.index(p) for p in parents]
    except:
        pp.pprint(p)
        pp.pprint(ids)
        raise


    #assert deps[1::2]==deps[::2], deps
    #deps = [(d+1)/2 for d in deps[::2]]

    for i in range(0, len(deps)):
        if deps[i] > 0:        
            deps[i] = (deps[i]+1)/2
    print ' '.join(map(str, deps[::2]))
    """
コード例 #6
0
ファイル: parseklee.py プロジェクト: ziqiaozhou/klee
 def parseOb(self,obpath):
     obfile=open(obpath)
     outpath=obpath.replace('.observable','.ob')
     of=open(outpath,'w+')
     allline=[]
     for line in obfile:
         allline.append(line)
     count={}
     assignment={}
     index=1
     while index <len(allline):
         line=allline[index]
         while line.count('(')>line.count(')'):
             index=index+1
             line=line+" "+allline[index]
         pair=line.split(': ')
         index=index+1
         if count.has_key(pair[0]):
             count[pair[0]]=count[pair[0]]+1
         else:
             count[pair[0]]=0
         key=pair[0]+str(count[pair[0]])
         #print pair[1]
         path_str=pair[1].replace('\n',' ')
         exprs=nestedExpr(opener='(',closer=')').parseString(path_str).asList()
         result=self.printNested(of,exprs,False,True)
         assignment[key]=result
         of.write(key+":"+str(result)+"\n")
     obfile.close()
     of.close()
コード例 #7
0
ファイル: findfunc.py プロジェクト: frnogueira/puddletag
def func_tokens(dictionary, parse_action):
    func_name = Word(alphas+'_', alphanums+'_')

    func_ident = Combine('$' + func_name.copy()('funcname'))
    func_tok = func_ident + originalTextFor(nestedExpr())('args')
    func_tok.leaveWhitespace()
    func_tok.setParseAction(parse_action)
    func_tok.enablePackrat()

    rx_tok = Combine(Literal('$').suppress() + Word(nums)('num'))

    def replace_token(tokens):
        index = int(tokens.num)
        return dictionary.get(index, u'')

    rx_tok.setParseAction(replace_token)

    strip = lambda s, l, tok: tok[0].strip()
    text_tok = CharsNotIn(u',').setParseAction(strip)
    quote_tok = QuotedString('"')

    if dictionary:
        arglist = Optional(delimitedList(quote_tok | rx_tok | text_tok))
    else:
        arglist = Optional(delimitedList(quote_tok | text_tok))

    return func_tok, arglist, rx_tok
コード例 #8
0
	def _stripExtraParenthesis(s):
		
		try:
			if s.count("(") != s.count(")"):
				raise Exception("Cannot parse condition")

			toStrip=0
			el = nestedExpr('(', ')').searchString(s).asList()
			
			if len(el) >1:
				raise Exception("non-strippable")

			def _countExtraParenthesis(el):
				if len(el) == 1 and isinstance(el[0],list):
					return 1+_countExtraParenthesis(el[0])
				return 0	
			toStrip+=_countExtraParenthesis(el[0])	

			for it in range(0,toStrip):
				match = re.match(r'[\s]*\((?P<inner>.+)\)[\s]*',s)
				s = match.group("inner")
			
		except Exception as e:
			pass
		return s 
コード例 #9
0
ファイル: omnibase.py プロジェクト: alvaromorales/wikiscout
def _parse_get_symbols_response(response):
    if response[:2] == '()':
        return None

    # some responses are HUGE ... we truncate them
    if len(response) > 0 and response[-2:] != "))":
        last_occurence = response.rfind(':priority 0)')
        response = response[:last_occurence + len(':priority 0)')] + ")"

    parsed_response = []

    try:
        data = OneOrMore(nestedExpr()).parseString(response)
    except:
        raise OmnibaseParseException("Could not parse %s" % response)

    if data[0][0] == "error":
        raise OmnibaseException(response)

    for d in data[0]:
        r = {}
        r['class'] = d[0]
        r['match'] = d[1].replace('"', '')
        r['span'] = (int(d[3][0]), int(d[3][1]))
        r['symbol'] = d[5].replace('"', '')
        parsed_response.append(r)
    return parsed_response
コード例 #10
0
ファイル: parse_feed.py プロジェクト: danintheory/urXiv
def parse_title(title):
    '''
    Parses the title returned from the arxiv RSS

    Returns a triple (parse_title, updated, subject)
    with the parsed title, a boolean indicating if the preprint is new or
    updated, and the parsed subject (i.e. [hep-th]). This lets us identify
    cross-lists if we want.
    '''

    ## gets the index of where the metadata begins
    end_idx = title.rfind('(')

    metadata_parsed_list = nestedExpr('(',')').parseString(title[end_idx:])

    ## the output is a doubly nested list. The last token indicates
    ## whether the preprint was updated or not
    updated = metadata_parsed_list[-1][-1] == u'UPDATED'

    if updated:
        subject = metadata_parsed_list[-1][-2]
    else:
        subject = metadata_parsed_list[-1][-1]

    subject = subject[1:-1] ## trim the bracket

    return title[:end_idx], updated, subject
コード例 #11
0
ファイル: xen.py プロジェクト: khorben/cuckoo
    def _is_running(self, host):
        """Checks if a virtual machine is running.
        @param host: name of the virtual machine.
        @return: running status.
        """
        #FIXME use domstate instead
        try:
            proc = subprocess.Popen([self.options.xen.path, 'list', '-l',
                host],
                stdout=subprocess.PIPE, stderr=subprocess.PIPE)
            output, err = proc.communicate()

            if proc.returncode != 0:
                log.debug("Xen returns error checking status for machine %s: %s"
                        % (host, err))
                return False
            data = OneOrMore(nestedExpr()).parseString(output)
            for row in data.asList()[0]:
                if row[0] == 'status' and row[1] == '2':
                    return True
            return False
        except OSError as e:
            log.warning("Xen failed to check status for machine %s: %s"
                    % (label, e))
            return False
コード例 #12
0
ファイル: submitviews.py プロジェクト: Arzar/aligulac
def parse_match(s):
    res = nestedExpr('(',')').parseString('('+s.encode()+')').asList()[0]
    elements = []
    collect = []

    first = True
    for r in res:
        if type(r) == str:
            splits = r.split('-')
            if len(splits) > 1 and first:
                elements.append(splits[0].strip())
                collect.append(elements)
                elements = []
                elements += [r.strip() for r in splits[1:] if r.strip() != '']
                first = False
            elif len(splits) > 1 and not first:
                collect.append(elements)
                elements = []
                elements += [r.strip() for r in splits if r.strip() != '']
            else:
                elements.append(splits[0].strip())

        else:
            elements += r
 
    collect.append(elements)
    collect = [[f for f in col if f != ''] for col in collect]
    return collect
コード例 #13
0
ファイル: sumo.py プロジェクト: Karun-Jayaprathap/opencog
def parse_kif_string(inputdata):
    '''Returns a list containing the ()-expressions in the file.
    Each list expression is converted into a Python list of strings. Nested expressions become nested lists''' 
    from pyparsing import OneOrMore, nestedExpr
    data = OneOrMore(nestedExpr()).parseString(inputdata)

    return data
コード例 #14
0
ファイル: ttws.py プロジェクト: FND/trimtrailingwhitespaces
def cleanAnnotation(filepath):
    """Clean out the obsolete or superflous annotations."""
    with open(filepath, 'r') as mo_file:
        string = mo_file.read()
        # remove 'Window(),' and 'Coordsys()' annotations:
        WindowRef = ZeroOrMore(White(' \t')) + (Keyword('Window')|Keyword('Coordsys')) + nestedExpr() + ',' + ZeroOrMore(White(' \t') + lineEnd)
        out = Suppress(WindowRef).transformString(string)
        # special care needs to be taken if the annotation is the last one
        WindowLastRef = Optional(',') + ZeroOrMore(White(' \t')) + (Keyword('Window')|Keyword('Coordsys')) + nestedExpr() + ZeroOrMore(White(' \t') + lineEnd)
        out = Suppress(WindowLastRef).transformString(out)

        # remove empty '[__Dymola_]experimentSetupOutput(),' annotation:
        expRef = Optional(',') +  ZeroOrMore(White(' \t')) +  Optional('__Dymola_') + (Keyword('experimentSetupOutput')|Keyword('experiment')|Keyword('DymolaStoredErrors')|Keyword('Diagram')|Keyword('Icon')) + ~nestedExpr() +  ~CharsNotIn(',)')
        out = Suppress(expRef).transformString(out)

        # Remove Icon and Diagram annotations that do not contain any graphics
        emptyRef =  ZeroOrMore(White(' \t')) + (Keyword('Icon')|Keyword('Diagram')) + nestedExpr()('args') + ',' + ZeroOrMore(White(' \t') + lineEnd)
        emptyRef.setParseAction(skipNonEmptyGraphics)
        out = Suppress(emptyRef).transformString(out)
        # special care for the last annotation again
        emptyRef =   Optional(',') + ZeroOrMore(White(' \t')) + (Keyword('Icon')|Keyword('Diagram')) + nestedExpr()('args') + ZeroOrMore(White(' \t') + lineEnd)
        emptyRef.setParseAction(skipNonEmptyGraphics)
        out = Suppress(emptyRef).transformString(out)

        # in case we end up with empty annotations remove them too
        AnnotationRef = ZeroOrMore(White(' \t')) + Keyword('annotation') + nestedExpr('(',');',content=' ') + ZeroOrMore(White(' \t') + lineEnd)
        out = Suppress(AnnotationRef).transformString(out)
    with open(filepath,'w') as mo_file:
        mo_file.write(out)
コード例 #15
0
ファイル: smart_parsing.py プロジェクト: fredsod/NIPAP
    def parse(self, input_string):
        # check for unclosed quotes/parentheses
        paired_exprs = nestedExpr('(', ')') | quotedString
        stripped_line = paired_exprs.suppress().transformString(input_string)

        error_dictsql = {
            'operator': None,
            'val1': None,
            'val2': None,
            'interpretation': {
                'interpretation': None,
                'string': input_string,
                'attribute': 'text',
                'operator': None,
                'error': True,
                'error_message': None
            }
        }

        if '"' in stripped_line or "'" in stripped_line:
            error_dictsql['interpretation']['error_message'] = 'unclosed quote'
            return False, error_dictsql
        if '(' in stripped_line or ')' in stripped_line:
            error_dictsql['interpretation']['error_message'] = 'unclosed parentheses'
            return False, error_dictsql

        ast = self._string_to_ast(input_string)
        return self._ast_to_dictsql(ast)
コード例 #16
0
ファイル: TTree.py プロジェクト: tuur/STPS
   def __init__( self,string = "", id = -1):
      self._children = []
      self._label = ""
      self.string = string
      self._proj = False
      if id == -1:
      	id = str(self)
      self.id = id
      
      if type(string)==str and ("(" in string):
        string = string.replace('\n','')
        string = nestedExpr().parseString(string).asList()[0]
       
      if type(string)==str:
        self._label=string

      elif type(string)==list:
        self._label=string[0]
        children_string = string[1:]

        # order the children according to their labels, to achieve the cannonical form
        children_string.sort()
             
        for e in children_string:
                self._children+= [TTree(e)]
            
      else:
        print "TTREE ERROR: Syntax error in:",string," of type ",type(string)
        exit()
コード例 #17
0
ファイル: train.py プロジェクト: shicks/icfp13
def evaluateBV(s):
    l=nestedExpr('(',')').parseString(s).asList()

    #print(l)

    e=nlToS(l)
    return e
コード例 #18
0
def read_sets_java(string):
    from pyparsing import nestedExpr, alphas, Word, nums, ParserElement, delimitedList
    ParserElement.setDefaultWhitespaceChars(" ,")
    element = Word(alphas + nums).setParseAction(parse_elem_java) 
    elements = delimitedList(element)
    setofsets = nestedExpr("[", "]", content=elements).setParseAction(lambda x: frozenset(x[0]))
    return setofsets.parseString(string).asList()[0]
コード例 #19
0
ファイル: CodeParser.py プロジェクト: GrahamDennis/xpdeint
def nonlocalDimensionAccessForComponents(components, codeBlock):
    """
    Find all places in the `codeBlock` where any of `components` are accessed with
    non-locally (usually integer-valued dimensions) and return a ``(componentName, resultDict, codeSlice)``
    tuple for each such occurrence. The companion of `nonlocalDimensionAccessForVectors` and
    to be used when `components` are components of vectors.
    """
    
    # Optimise for the common case: if the code doesn't contain the string "=>", then we know it doesn't have any nonlocal access
    if "=>" not in codeBlock.codeString:
        return []
    
    dictionaryElement = identifier + Suppress('=>') + sliceFor(Group(baseExpr))
    nonlocalAccessDictParser = Dict(
        ZeroOrMore(Group(dictionaryElement + Suppress(','))) + Group(dictionaryElement)
    )
    parser = identifier.setResultsName('name') \
                + nestedExpr('(', ')', nonlocalAccessDictParser, ignoreExpr).setResultsName('access')
    parser.ignore(cppStyleComment.copy())
    parser.ignore(quotedString.copy())
    results = []
    
    for tokens, start, end in parser.scanString(codeBlock.codeString):
        if tokens.name not in components: continue
        accessDict = {}
        tokenDict = tokens.access[0].asDict()
        for key, value in tokenDict.items():
            accessDict[key] = (' '.join(value[0].asList()), value.slice.start)
        results.append((tokens.name, accessDict, slice(start, end)))
    return results
コード例 #20
0
    def handle(self):
        self.data = self.rfile.readline().strip()

        logger.info('-----------------------------------')
        logger.info('%s wrote: %s' % (self.client_address[0], self.data))
        
        data = OneOrMore(nestedExpr()).parseString(self.data)[0]

        if data[0] != 'get-annotation':
            self.wfile.write('ERROR: method "%s" not supported' % data[0])
        elif len(data) != 3:
            self.wfile.write('ERROR: badly formatted request')
        else:
            object = data[1][1:-1]
            sentence = data[2][1:-1]
            try:
                logging.info("Sentence: %s" % sentence)
                a = annotation.annotate(sentence, object)
                logging.info("Annotation: %s" % a.join_tokens())
                self.wfile.write(a.join_tokens() + "\n")
            except annotation.ObjectNotFoundException as e:
                logging.exception(e)
                self.wfile.write('ERROR: %s' % e)
            except annotation.ObjectSymbolNotFoundException as e:
                logging.exception(e)
                self.wfile.write('ERROR: %s' % e)

        logger.info('-----------------------------------')
コード例 #21
0
ファイル: named.py プロジェクト: udoprog/bsa
def build_parser(root_directory, path, fake_root=os.getcwd(), file_reader=None):
    from pyparsing import nestedExpr
    from pyparsing import QuotedString
    from pyparsing import Group
    from pyparsing import restOfLine
    from pyparsing import Word
    from pyparsing import alphanums
    from pyparsing import cStyleComment
    from pyparsing import OneOrMore
    from pyparsing import ZeroOrMore
    from pyparsing import Optional
    from pyparsing import Forward
    from pyparsing import Literal
    from pyparsing import Keyword

    root = Forward()

    include_handler = IncludeHandler(
        root_directory,
        path,
        root,
        fake_root=fake_root,
        file_reader=file_reader)

    # relaxed grammar
    identifier = Word(alphanums + "-_.:/")

    comment = ("//" + restOfLine).suppress() \
        | ("#" + restOfLine).suppress() \
        | cStyleComment

    endstmt = Literal(";").suppress()

    argument = QuotedString('"') \
        | identifier

    arguments = ZeroOrMore(argument)

    statements = Forward()

    section = nestedExpr("{", "}", statements)

    include = Keyword("include").suppress() + QuotedString('"')

    regular = identifier + Group(arguments) + Optional(section, default=[])

    statement = include.setParseAction(include_handler.pyparsing_call) \
        | regular.setParseAction(include_handler.pyparsing_mark)

    statements << OneOrMore(statement + endstmt)

    root << Optional(statements)

    root.ignore(comment)

    setattr(
        root, 'parse_file',
        lambda f, root=root: root.parseFile(f, parseAll=True))

    return root
コード例 #22
0
def load_config(filename):
    assert(isinstance(filename, str))

    text = ""
    try:
        f = open(filename)
    except IOError:
        print(std.strerr, "Cannot open {}".format(filename))
        sys.exit(1)
    else:
        text = f.read()
        f.close()

    # Remove c comments /* ... */
    ccomments = pp.nestedExpr("/*", "*/").suppress()
    text = ccomments.transformString(text)

    # Fixme: The regex substitution wrongly uncomments global occurences of
    # 'sync_trash_ttl'. This may lead to problems reading the json file in case
    # multiple global occurences of 'sync_trash_ttl' exists. It may also
    # trigger an incorrect warning in the function test_config()!

    # Uncomment //"sync_trash_ttl" : x"
    text = re.sub(r'/{2,}\s*("sync_trash_ttl"\s+:\s+[0-9]+)','\g<1>',text)

    # Remove c++ comments // ...
    cppcomments = pp.cppStyleComment.suppress()
    text = cppcomments.transformString(text)

    # Return config as dict
    return json.loads(text)
コード例 #23
0
ファイル: idl-compiler.py プロジェクト: TsaiJin/scylla
def parse_file(file_name):

    number = pp.Word(pp.nums)
    identifier = pp.Word(pp.alphas + "_", pp.alphanums + "_")

    lbrace = pp.Literal('{').suppress()
    rbrace = pp.Literal('}').suppress()
    cls = pp.Keyword('class')
    colon = pp.Literal(":")
    semi = pp.Literal(";").suppress()
    langle = pp.Literal("<")
    rangle = pp.Literal(">")
    equals = pp.Literal("=")
    comma = pp.Literal(",")
    lparen = pp.Literal("(")
    rparen = pp.Literal(")")
    lbrack = pp.Literal("[")
    rbrack = pp.Literal("]")
    mins = pp.Literal("-")
    struct = pp.Keyword('struct')
    template = pp.Keyword('template')
    final = pp.Keyword('final')("final")
    stub = pp.Keyword('stub')("stub")
    with_colon = pp.Word(pp.alphanums + "_" + ":")
    btype = with_colon
    type = pp.Forward()
    nestedParens = pp.nestedExpr('<', '>')

    tmpl = pp.Group(btype("template_name") + langle.suppress() + pp.Group(pp.delimitedList(type)) + rangle.suppress())
    type << (tmpl | btype)
    enum_lit = pp.Keyword('enum')
    enum_class = pp.Group(enum_lit + cls)
    ns = pp.Keyword("namespace")

    enum_init = equals.suppress() + pp.Optional(mins) + number
    enum_value = pp.Group(identifier + pp.Optional(enum_init))
    enum_values = pp.Group(lbrace + pp.delimitedList(enum_value) + pp.Optional(comma) + rbrace)
    content = pp.Forward()

    member_name = pp.Combine(pp.Group(identifier + pp.Optional(lparen + rparen)))
    attrib = pp.Group(lbrack.suppress() + lbrack.suppress() + pp.SkipTo(']') + rbrack.suppress() + rbrack.suppress())
    opt_attribute = pp.Optional(attrib)("attribute")
    namespace = pp.Group(ns("type") + identifier("name") + lbrace + pp.Group(pp.OneOrMore(content))("content") + rbrace)
    enum = pp.Group(enum_class("type") + identifier("name") + colon.suppress() + identifier("underline_type") + enum_values("enum_values") + pp.Optional(semi).suppress())
    default_value = equals.suppress() + pp.SkipTo(';')
    class_member = pp.Group(type("type") + member_name("name") + opt_attribute + pp.Optional(default_value)("default") + semi.suppress())("member")
    template_param = pp.Group(identifier("type") + identifier("name"))
    template_def = pp.Group(template + langle + pp.Group(pp.delimitedList(template_param))("params") + rangle)
    class_content = pp.Forward()
    class_def = pp.Group(pp.Optional(template_def)("template") + (cls | struct)("type") + with_colon("name") + pp.Optional(final) + pp.Optional(stub) + opt_attribute + lbrace + pp.Group(pp.ZeroOrMore(class_content))("members") + rbrace + pp.Optional(semi))
    content << (enum | class_def | namespace)
    class_content << (enum | class_def | class_member)
    for varname in "enum class_def class_member content namespace template_def".split():
        locals()[varname].setName(varname)
    rt = pp.OneOrMore(content)
    singleLineComment = "//" + pp.restOfLine
    rt.ignore(singleLineComment)
    rt.ignore(pp.cStyleComment)
    return rt.parseFile(file_name, parseAll=True)
コード例 #24
0
ファイル: executable.py プロジェクト: feifzhou/fortpy
    def __init__(self, vparser, docparser):
        self.setup_regex()
        self.vparser = vparser
        self.docparser = docparser
        self.nester = pyparsing.nestedExpr('(', ')')

        #A list of all the intrinsic functions in fortran 2003
        self._intrinsic = [ k.strip().lower() for k in self._intrinsic_functions()]
コード例 #25
0
def getExpressionsInParentheses(theString):
	#print("Get the expressions for: " + theString)
	theData = OneOrMore(nestedExpr()).parseString(theString)
	theNewArr = []
	for current in theData[0]:
		if(type(current) != str):
			theNewArr += [lisp(current)]
	return theNewArr
コード例 #26
0
ファイル: ontoprog.py プロジェクト: andreasBihlmaier/ontoprog
 def parse_item(self, string):
   #print('string:', string)
   parsed_string = pyparsing.nestedExpr().parseString(string)
   #print('parsed:', parsed_string)
   fun, args = parsed_string[0][0], parsed_string[0][1:]
   #print('fun: %s args: %s' % (fun, args))
   if fun == 'subclass':
     self.add_subclass(args[0], args[1])
コード例 #27
0
ファイル: smart_parsing.py プロジェクト: fredsod/NIPAP
    def _string_to_ast(self, input_string):
        """ Parse a smart search string and return it in an AST like form
        """

        # simple words
        # we need to use a regex to match on words because the regular
        # Word(alphanums) will only match on American ASCII alphanums and since
        # we try to be Unicode / internationally friendly we need to match much
        # much more. Trying to expand a word class to catch it all seems futile
        # so we match on everything *except* a few things, like our operators
        comp_word = Regex("[^*\s=><~!]+")
        word = Regex("[^*\s=><~!]+").setResultsName('word')
        # numbers
        comp_number = Word(nums)
        number = Word(nums).setResultsName('number')

        # IPv4 address
        ipv4_oct = Regex("((2(5[0-5]|[0-4][0-9])|[01]?[0-9][0-9]?))")
        comp_ipv4_address = Combine(ipv4_oct + ('.' + ipv4_oct*3))
        ipv4_address = Combine(ipv4_oct + ('.' + ipv4_oct*3)).setResultsName('ipv4_address')

        # IPv6 address
        ipv6_address = Regex("((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:)))(%.+)?").setResultsName('ipv6_address')
        ipv6_prefix = Combine(ipv6_address + Regex("/(12[0-8]|1[01][0-9]|[0-9][0-9]?)")).setResultsName('ipv6_prefix')

        # VRF RTs of the form number:number
        vrf_rt = Combine((comp_ipv4_address | comp_number) + Literal(':') + comp_number).setResultsName('vrf_rt')

        # tags
        tags = Combine( Literal('#') + comp_word).setResultsName('tag')

        # operators for matching
        match_op = oneOf(' '.join(self.match_operators)).setResultsName('operator')
        boolean_op = oneOf(' '.join(self.boolean_operators)).setResultsName('boolean')
        # quoted string
        d_quoted_string = QuotedString('"', unquoteResults=True, escChar='\\')
        s_quoted_string = QuotedString('\'', unquoteResults=True, escChar='\\')
        quoted_string = (s_quoted_string | d_quoted_string).setResultsName('quoted_string')
        # expression to match a certain value for an attribute
        expression = Group(word + match_op + (quoted_string | vrf_rt | word | number)).setResultsName('expression')
        # we work on atoms, which are single quoted strings, match expressions,
        # tags, VRF RT or simple words.
        # NOTE: Place them in order of most exact match first!
        atom = Group(ipv6_prefix | ipv6_address | quoted_string | expression | tags | vrf_rt | boolean_op | word)

        enclosed = Forward()
        parens = nestedExpr('(', ')', content=enclosed)
        enclosed << (
                parens | atom
                ).setResultsName('nested')

        content = Forward()
        content << (
                ZeroOrMore(enclosed)
                )

        res = content.parseString(input_string)
        return res
コード例 #28
0
ファイル: vyatta.py プロジェクト: AmineYaiche/pyatta
def get_interfaces_infos(type='all'):
    out = _run('vyatta-cfg-wrapper show', output=True)
    data = nestedExpr(opener='{', closer='}').parseString('{'+ out +'}').asList()
    if type == 'eth':
        return get_eth_ifaces(data[0][1])
    elif type == 'ovpn':
        return get_ovpn_ifaces(data[0][1])
    else:
        return dict(get_eth_ifaces(data[0][1]).items() + get_ovpn_ifaces(data[0][1]).items())
コード例 #29
0
ファイル: fasp2smt.py プロジェクト: alviano/python
def parseModel(lines):
    data = OneOrMore(nestedExpr()).parseString("\n".join(lines))[0]
    assert data[0] == 'model'
    atoms = Atom.getInstances()
    for d in data[1:]:
        assert d[0] == 'define-fun'
        if d[1][0] != 'x': continue
        atom = atoms[int(d[1][1:])]
        atom.setModel(d[4])
コード例 #30
0
 def search(self, query_str):
     """The generic search method that parses a lisp-like QL."""
     try:
         query = nestedExpr().parseString(query_str)[0]
         documents = self.__search(query)
         return QueryResponse(
             sorted(documents, key=itemgetter(1), reverse=True))
     except Exception:
         return "Error: invalid query"
コード例 #31
0
ファイル: cache.py プロジェクト: rosenbrockc/fortpy
from fortpy.code import CodeParser
from . import builtin
import time
import re
import pyparsing
from . import rtupdate

#Time cache has expiration on the items inside that get used only
#temporarily during code completion
_time_caches = []
#This is our instance of the CodeParser. It handles the parsing
#of all the Fortran modules and has its own caching builtin.
_parsers = { "default": CodeParser() }
nester = pyparsing.nestedExpr("(",")")
#Get a generic module updater for doing real-time updates on
#source code sent from the emacs buffer.

def parser(key = "default"):
    """Returns the parser for the given key, (e.g. 'ssh')"""
    #Make sure we have a parser for that key. If we don't, then set
    #one up if we know what parameters to use; otherwise return the
    #default parser.
    if key not in _parsers:
        if key == "ssh":
            _parsers["ssh"] = CodeParser(True, False)
        else:
            key = "default"

    return _parsers[key]

def clear_caches(delete_all=False):
コード例 #32
0
ファイル: utils.py プロジェクト: secretnonempty/ccrawl
objecttype = pp.Or([rawtypes,strucdecl])
#define arrays:
intp       = pp.Regex(r'[1-9][0-9]*')
intp.setParseAction(lambda r: int(r[0]))
bitfield   = rawtypes + pp.Suppress('#') + intp
arraydecl  = pp.Suppress('[')+intp+pp.Suppress(']')
arrazdecl  = pp.Suppress('[')+pp.Or((intp,symbol))+pp.Suppress(']')
pointer    = pp.Optional(pstars,default='')+pp.Optional(arraydecl,default=0)
pointerxx  = pp.Optional(ampers,default='')+pp.Optional(arrazdecl,default=0)
cvref      = pp.Or((cvqual,ampers))
#
# definitions for nested_c ----------------------------------------------------
# nested_c captures "pointer to function/array" part of the declaration.
# this is the tricky part due to the nesting mix of pointer grouping vs.
# function prototyping using both parenthesis as delimiters!
nested_par = pp.nestedExpr(content=pp.Regex(r'[^()]+'),ignoreExpr=None)
nested_c   = pp.OneOrMore(nested_par)

class c_type(object):
    """The c_type object parses a C type string and decomposes it into
       several parts 
    """
    def __init__(self,decl):
        # get final element type:
        bf = decl.rfind('#')
        if bf>0:
            x = bitfield.parseString(decl)
            self.lbfw = x.pop()
            r = ''
        else:
            x,r = (pp.Group(objecttype)+pp.restOfLine).parseString(decl)
コード例 #33
0
ファイル: qmake_parser.py プロジェクト: zhangyu151152/qtbase
    def _generate_grammar(self):
        # Define grammar:
        pp.ParserElement.setDefaultWhitespaceChars(" \t")

        def add_element(name: str, value: pp.ParserElement):
            nonlocal self
            if self.debug:
                value.setName(name)
                value.setDebug()
            return value

        EOL = add_element("EOL", pp.Suppress(pp.LineEnd()))
        Else = add_element("Else", pp.Keyword("else"))
        Identifier = add_element(
            "Identifier", pp.Word(f"{pp.alphas}_", bodyChars=pp.alphanums + "_-./")
        )
        BracedValue = add_element(
            "BracedValue",
            pp.nestedExpr(
                ignoreExpr=pp.quotedString
                | pp.QuotedString(
                    quoteChar="$(", endQuoteChar=")", escQuote="\\", unquoteResults=False
                )
            ).setParseAction(lambda s, l, t: ["(", *t[0], ")"]),
        )

        Substitution = add_element(
            "Substitution",
            pp.Combine(
                pp.Literal("$")
                + (
                    (
                        (pp.Literal("$") + Identifier + pp.Optional(pp.nestedExpr()))
                        | (pp.Literal("(") + Identifier + pp.Literal(")"))
                        | (pp.Literal("{") + Identifier + pp.Literal("}"))
                        | (
                            pp.Literal("$")
                            + pp.Literal("{")
                            + Identifier
                            + pp.Optional(pp.nestedExpr())
                            + pp.Literal("}")
                        )
                        | (pp.Literal("$") + pp.Literal("[") + Identifier + pp.Literal("]"))
                    )
                )
            ),
        )
        LiteralValuePart = add_element(
            "LiteralValuePart", pp.Word(pp.printables, excludeChars="$#{}()")
        )
        SubstitutionValue = add_element(
            "SubstitutionValue",
            pp.Combine(pp.OneOrMore(Substitution | LiteralValuePart | pp.Literal("$"))),
        )
        FunctionValue = add_element(
            "FunctionValue",
            pp.Group(
                pp.Suppress(pp.Literal("$") + pp.Literal("$"))
                + Identifier
                + pp.nestedExpr()  # .setParseAction(lambda s, l, t: ['(', *t[0], ')'])
            ).setParseAction(lambda s, l, t: handle_function_value(*t)),
        )
        Value = add_element(
            "Value",
            pp.NotAny(Else | pp.Literal("}") | EOL)
            + (
                pp.QuotedString(quoteChar='"', escChar="\\")
                | FunctionValue
                | SubstitutionValue
                | BracedValue
            ),
        )

        Values = add_element("Values", pp.ZeroOrMore(Value)("value"))

        Op = add_element(
            "OP",
            pp.Literal("=")
            | pp.Literal("-=")
            | pp.Literal("+=")
            | pp.Literal("*=")
            | pp.Literal("~="),
        )

        Key = add_element("Key", Identifier)

        Operation = add_element(
            "Operation", Key("key") + pp.locatedExpr(Op)("operation") + Values("value")
        )
        CallArgs = add_element("CallArgs", pp.nestedExpr())

        def parse_call_args(results):
            out = ""
            for item in chain(*results):
                if isinstance(item, str):
                    out += item
                else:
                    out += "(" + parse_call_args(item) + ")"
            return out

        CallArgs.setParseAction(parse_call_args)

        Load = add_element("Load", pp.Keyword("load") + CallArgs("loaded"))
        Include = add_element(
            "Include", pp.Keyword("include") + pp.locatedExpr(CallArgs)("included")
        )
        Option = add_element("Option", pp.Keyword("option") + CallArgs("option"))
        RequiresCondition = add_element("RequiresCondition", pp.originalTextFor(pp.nestedExpr()))

        def parse_requires_condition(s, l, t):
            # The following expression unwraps the condition via the additional info
            # set by originalTextFor.
            condition_without_parentheses = s[t._original_start + 1 : t._original_end - 1]

            # And this replaces the colons with '&&' similar how it's done for 'Condition'.
            condition_without_parentheses = (
                condition_without_parentheses.strip().replace(":", " && ").strip(" && ")
            )
            return condition_without_parentheses

        RequiresCondition.setParseAction(parse_requires_condition)
        Requires = add_element(
            "Requires", pp.Keyword("requires") + RequiresCondition("project_required_condition")
        )

        # ignore the whole thing...
        DefineTestDefinition = add_element(
            "DefineTestDefinition",
            pp.Suppress(
                pp.Keyword("defineTest")
                + CallArgs
                + pp.nestedExpr(opener="{", closer="}", ignoreExpr=pp.LineEnd())
            ),
        )

        # ignore the whole thing...
        ForLoop = add_element(
            "ForLoop",
            pp.Suppress(
                pp.Keyword("for")
                + CallArgs
                + pp.nestedExpr(opener="{", closer="}", ignoreExpr=pp.LineEnd())
            ),
        )

        # ignore the whole thing...
        ForLoopSingleLine = add_element(
            "ForLoopSingleLine",
            pp.Suppress(pp.Keyword("for") + CallArgs + pp.Literal(":") + pp.SkipTo(EOL)),
        )

        # ignore the whole thing...
        FunctionCall = add_element("FunctionCall", pp.Suppress(Identifier + pp.nestedExpr()))

        Scope = add_element("Scope", pp.Forward())

        Statement = add_element(
            "Statement",
            pp.Group(
                Load
                | Include
                | Option
                | Requires
                | ForLoop
                | ForLoopSingleLine
                | DefineTestDefinition
                | FunctionCall
                | Operation
            ),
        )
        StatementLine = add_element("StatementLine", Statement + (EOL | pp.FollowedBy("}")))
        StatementGroup = add_element(
            "StatementGroup", pp.ZeroOrMore(StatementLine | Scope | pp.Suppress(EOL))
        )

        Block = add_element(
            "Block",
            pp.Suppress("{")
            + pp.Optional(EOL)
            + StatementGroup
            + pp.Optional(EOL)
            + pp.Suppress("}")
            + pp.Optional(EOL),
        )

        ConditionEnd = add_element(
            "ConditionEnd",
            pp.FollowedBy(
                (pp.Optional(pp.White()) + (pp.Literal(":") | pp.Literal("{") | pp.Literal("|")))
            ),
        )

        ConditionPart1 = add_element(
            "ConditionPart1", (pp.Optional("!") + Identifier + pp.Optional(BracedValue))
        )
        ConditionPart2 = add_element("ConditionPart2", pp.CharsNotIn("#{}|:=\\\n"))
        ConditionPart = add_element(
            "ConditionPart", (ConditionPart1 ^ ConditionPart2) + ConditionEnd
        )

        ConditionOp = add_element("ConditionOp", pp.Literal("|") ^ pp.Literal(":"))
        ConditionWhiteSpace = add_element(
            "ConditionWhiteSpace", pp.Suppress(pp.Optional(pp.White(" ")))
        )

        ConditionRepeated = add_element(
            "ConditionRepeated", pp.ZeroOrMore(ConditionOp + ConditionWhiteSpace + ConditionPart)
        )

        Condition = add_element("Condition", pp.Combine(ConditionPart + ConditionRepeated))
        Condition.setParseAction(lambda x: " ".join(x).strip().replace(":", " && ").strip(" && "))

        # Weird thing like write_file(a)|error() where error() is the alternative condition
        # which happens to be a function call. In this case there is no scope, but our code expects
        # a scope with a list of statements, so create a fake empty statement.
        ConditionEndingInFunctionCall = add_element(
            "ConditionEndingInFunctionCall",
            pp.Suppress(ConditionOp)
            + FunctionCall
            + pp.Empty().setParseAction(lambda x: [[]]).setResultsName("statements"),
        )

        SingleLineScope = add_element(
            "SingleLineScope",
            pp.Suppress(pp.Literal(":")) + pp.Group(Block | (Statement + EOL))("statements"),
        )
        MultiLineScope = add_element("MultiLineScope", Block("statements"))

        SingleLineElse = add_element(
            "SingleLineElse",
            pp.Suppress(pp.Literal(":")) + (Scope | Block | (Statement + pp.Optional(EOL))),
        )
        MultiLineElse = add_element("MultiLineElse", Block)
        ElseBranch = add_element("ElseBranch", pp.Suppress(Else) + (SingleLineElse | MultiLineElse))

        # Scope is already add_element'ed in the forward declaration above.
        Scope <<= pp.Group(
            Condition("condition")
            + (SingleLineScope | MultiLineScope | ConditionEndingInFunctionCall)
            + pp.Optional(ElseBranch)("else_statements")
        )

        Grammar = StatementGroup("statements")
        Grammar.ignore(pp.pythonStyleComment())

        return Grammar
コード例 #34
0
ファイル: parseIDSL.py プロジェクト: vkrm1612/robocomp
    def fromString(inputText):
        text = nestedExpr("/*", "*/").suppress().transformString(inputText)

        semicolon = Suppress(Word(";"))
        quote = Suppress(Word("\""))
        op = Suppress(Word("{"))
        cl = Suppress(Word("}"))
        opp = Suppress(Word("("))
        clp = Suppress(Word(")"))
        lt = Suppress(Word("<"))
        gt = Suppress(Word(">"))
        eq = Suppress(Word("="))
        identifier = Word(alphas + "_", alphanums + "_")
        typeIdentifier = Word(alphas + "_", alphanums + "_:")
        structIdentifer = Group(
            typeIdentifier.setResultsName('type') +
            identifier.setResultsName('identifier') + Optional(eq) +
            Optional(CharsNotIn(";").setResultsName('defaultValue')) +
            semicolon)
        structIdentifers = Group(OneOrMore(structIdentifer))

        ## Imports
        idslImport = Suppress(Word("import")) + quote + CharsNotIn(
            "\";").setResultsName('path') + quote + semicolon
        idslImports = ZeroOrMore(idslImport)

        structDef = Word("struct").setResultsName(
            'type') + identifier.setResultsName(
                'name') + op + structIdentifers.setResultsName(
                    "structIdentifiers") + cl + semicolon
        dictionaryDef = Word("dictionary").setResultsName(
            'type') + lt + CharsNotIn("<>").setResultsName(
                'content') + gt + identifier.setResultsName('name') + semicolon
        sequenceDef = Word("sequence").setResultsName(
            'type') + lt + typeIdentifier.setResultsName(
                'typeSequence') + gt + identifier.setResultsName(
                    'name') + semicolon
        enumDef = Word("enum").setResultsName(
            'type') + identifier.setResultsName('name') + op + CharsNotIn(
                "{}").setResultsName('content') + cl + semicolon
        exceptionDef = Word("exception").setResultsName(
            'type') + identifier.setResultsName('name') + op + CharsNotIn(
                "{}").setResultsName('content') + cl + semicolon

        raiseDef = Suppress(Word("throws")) + typeIdentifier + ZeroOrMore(
            Literal(',') + typeIdentifier)
        decoratorDef = Literal('idempotent') | Literal('out')
        retValDef = typeIdentifier.setResultsName('ret')

        firstParam = Group(
            Optional(decoratorDef.setResultsName('decorator')) +
            typeIdentifier.setResultsName('type') +
            identifier.setResultsName('name'))
        nextParam = Suppress(Word(',')) + firstParam
        params = firstParam + ZeroOrMore(nextParam)

        remoteMethodDef = Group(
            Optional(decoratorDef.setResultsName('decorator')) +
            retValDef.setResultsName('ret') +
            typeIdentifier.setResultsName('name') + opp +
            Optional(params).setResultsName('params') + clp +
            Optional(raiseDef.setResultsName('raise')) + semicolon)
        interfaceDef = Word('interface').setResultsName(
            'type') + typeIdentifier.setResultsName('name') + op + Group(
                ZeroOrMore(remoteMethodDef)).setResultsName(
                    'methods') + cl + semicolon

        moduleContent = Group(structDef | enumDef | exceptionDef
                              | dictionaryDef | sequenceDef | interfaceDef)
        module = Suppress(Word("module")) + identifier.setResultsName(
            "name") + op + ZeroOrMore(moduleContent).setResultsName(
                "contents") + cl + semicolon

        IDSL = idslImports.setResultsName("imports") + module.setResultsName(
            "module")
        IDSL.ignore(cppStyleComment)
        tree = IDSL.parseString(text)
        return tree
コード例 #35
0
test('m_and', 'and')
test('m_or', 'or')

m_logical_operator = m_and ^ m_or

test('m_logical_operator', '''
and
or
''')

m_expression = Forward()
m_expression.setName('EXPR')
m_infix_operator = m_logical_operator
m_prefix_operator = m_not
m_subexpression = nestedExpr(content=m_expression)

m_term = m_literal ^ m_identifier ^ m_subexpression

m_infix_expression = ((m_term + m_infix_operator + m_expression)
                      #^
                      #(m_expression + m_infix_operator + m_term)
                      ^ (m_term + m_infix_operator + m_term))

m_prefix_expression = m_prefix_operator + m_expression

m_expression << (m_term ^ m_prefix_expression
                 ^ m_infix_expression) + StringEnd()

test('m_subexpression', '(True)')
test('m_term', '''
コード例 #36
0
def _create_config_parser():
    """
    Creates a parser using pyparsing that works with bibfield rule definitions

    BNF like grammar:

    rule ::= ([persitent_identifier] json_id ["[0]" | "[n]"] "," aliases":" INDENT body UNDENT) | include | python_comment
    include ::= "include(" PATH ")"
    body ::=  [inherit_from] (creator | derived | calculated) [checker] [documentation] [producer]
    aliases ::= json_id ["[0]" | "[n]"] ["," aliases]

    creator ::= "creator:" INDENT creator_body+ UNDENT
    creator_body ::= [decorators] source_format "," source_tag "," python_allowed_expr
    source_format ::= MASTER_FORMATS
    source_tag ::= QUOTED_STRING

    derived ::= "derived" INDENT derived_calculated_body UNDENT
    calculated ::= "calculated:" INDENT derived_calculated_body UNDENT
    derived_calculated_body ::= [decorators] "," python_allowed_exp

    decorators ::= (peristent_identfier | legacy | do_not_cache | parse_first | depends_on | only_if | only_if_master_value)*
    peristent_identfier ::= @persitent_identifier( level )
    legacy ::= "@legacy(" correspondences+ ")"
    correspondences ::= "(" source_tag [ "," tag_name ] "," json_id ")"
    parse_first ::= "@parse_first(" jsonid+ ")"
    depends_on ::= "@depends_on(" json_id+ ")"
    only_if ::= "@only_if(" python_condition+ ")"
    only_if_master_value ::= "@only_if_master_value(" python_condition+  ")"

    inherit_from ::= "@inherit_from()"
    do_not_cache ::= "@do_not_cache"

    python_allowed_exp ::= ident | list_def | dict_def | list_access | dict_access | function_call

    checker ::= "checker:" INDENT checker_function+ UNDENT

    documentation ::= INDENT doc_string subfield* UNDENT
    doc_string ::= QUOTED_STRING
    subfield ::= "@subfield" json_id["."json_id*] ":" docstring

    producer ::= "producer:" INDENT producer_body UNDENT
    producer_body ::= producer_code "," python_dictionary
    producer_code ::= ident
    """

    indent_stack = [1]

    def check_sub_indent(str, location, tokens):
        cur_col = col(location, str)
        if cur_col > indent_stack[-1]:
            indent_stack.append(cur_col)
        else:
            raise ParseException(str, location, "not a subentry")

    def check_unindent(str, location, tokens):
        if location >= len(str):
            return
        cur_col = col(location, str)
        if not(cur_col < indent_stack[-1] and cur_col <= indent_stack[-2]):
            raise ParseException(str, location, "not an unindent")

    def do_unindent():
        indent_stack.pop()

    INDENT = lineEnd.suppress() + empty + empty.copy().setParseAction(check_sub_indent)
    UNDENT = FollowedBy(empty).setParseAction(check_unindent)
    UNDENT.setParseAction(do_unindent)

    json_id = (Word(alphas + "_", alphanums + "_") + Optional(oneOf("[0] [n]")))\
              .setResultsName("json_id", listAllMatches=True)\
              .setParseAction(lambda tokens: "".join(tokens))
    aliases = delimitedList((Word(alphanums + "_") + Optional(oneOf("[0] [n]")))
                            .setParseAction(lambda tokens: "".join(tokens)))\
              .setResultsName("aliases")
    python_allowed_expr = Forward()
    ident = Word(alphas + "_", alphanums + "_")
    dict_def = originalTextFor(nestedExpr('{', '}'))
    list_def = originalTextFor(nestedExpr('[', ']'))
    dict_access = list_access = originalTextFor(ident + nestedExpr('[', ']'))
    function_call = originalTextFor(ZeroOrMore(ident + ".") + ident + nestedExpr('(', ')'))

    python_allowed_expr << (ident ^ dict_def ^ list_def ^ dict_access ^ list_access ^ function_call ^ restOfLine)\
                          .setResultsName("value", listAllMatches=True)

    persistent_identifier = (Suppress("@persistent_identifier") +  nestedExpr("(", ")"))\
                            .setResultsName("persistent_identifier")
    legacy = (Suppress("@legacy") + originalTextFor(nestedExpr("(", ")")))\
             .setResultsName("legacy", listAllMatches=True)
    only_if = (Suppress("@only_if") + originalTextFor(nestedExpr("(", ")")))\
              .setResultsName("only_if")
    only_if_master_value = (Suppress("@only_if_value") + originalTextFor(nestedExpr("(", ")")))\
                    .setResultsName("only_if_master_value")
    depends_on = (Suppress("@depends_on") + originalTextFor(nestedExpr("(", ")")))\
                 .setResultsName("depends_on")
    parse_first = (Suppress("@parse_first") + originalTextFor(nestedExpr("(", ")")))\
                  .setResultsName("parse_first")
    do_not_cache = (Suppress("@") + "do_not_cache")\
                   .setResultsName("do_not_cache")
    field_decorator = parse_first ^ depends_on ^ only_if ^ only_if_master_value ^ do_not_cache ^ legacy

    #Independent decorators
    inherit_from = (Suppress("@inherit_from") + originalTextFor(nestedExpr("(", ")")))\
                    .setResultsName("inherit_from")

    master_format = (Suppress("@master_format") + originalTextFor(nestedExpr("(", ")")))\
                    .setResultsName("master_format")

    derived_calculated_body = ZeroOrMore(field_decorator) + python_allowed_expr

    derived = "derived" + Suppress(":") + INDENT + derived_calculated_body + UNDENT
    calculated = "calculated" + Suppress(":") + INDENT + derived_calculated_body + UNDENT

    source_tag = quotedString\
                 .setParseAction(removeQuotes)\
                 .setResultsName("source_tag", listAllMatches=True)
    source_format = oneOf(CFG_BIBFIELD_MASTER_FORMATS)\
                    .setResultsName("source_format", listAllMatches=True)
    creator_body = (ZeroOrMore(field_decorator) + source_format + Suppress(",") + source_tag + Suppress(",") + python_allowed_expr)\
                   .setResultsName("creator_def", listAllMatches=True)
    creator = "creator" + Suppress(":") + INDENT + OneOrMore(creator_body) + UNDENT

    checker_function = (Optional(master_format) + ZeroOrMore(ident + ".") + ident + originalTextFor(nestedExpr('(', ')')))\
                       .setResultsName("checker_function", listAllMatches=True)
    checker = ("checker" + Suppress(":") + INDENT + OneOrMore(checker_function) + UNDENT)

    doc_string = QuotedString(quoteChar='"""', multiline=True) | quotedString.setParseAction(removeQuotes)
    subfield = (Suppress("@subfield") + Word(alphanums + "_" + '.') + Suppress(":") + Optional(doc_string))\
                 .setResultsName("subfields", listAllMatches=True)
    documentation = ("documentation" + Suppress(":") + INDENT + Optional(doc_string).setResultsName("main_doc") + ZeroOrMore(subfield) + UNDENT)\
                     .setResultsName("documentation")

    producer_code = Word(alphas + "_", alphanums + "_")\
                    .setResultsName("producer_code", listAllMatches=True)
    producer_body = (producer_code + Suppress(",") + python_allowed_expr)\
                    .setResultsName("producer_def", listAllMatches=True)
    producer = "producer"  + Suppress(":") + INDENT + OneOrMore(producer_body) + UNDENT

    field_def = (creator | derived | calculated)\
                .setResultsName("type_field", listAllMatches=True)

    body = Optional(inherit_from) + Optional(field_def) + Optional(checker) + Optional(documentation) + Optional(producer)
    comment = Literal("#") + restOfLine + LineEnd()
    include = (Suppress("include") + quotedString)\
              .setResultsName("includes", listAllMatches=True)
    rule = (Optional(persistent_identifier) + json_id + Optional(Suppress(",") + aliases) + Suppress(":") + INDENT + body + UNDENT)\
           .setResultsName("rules", listAllMatches=True)

    return OneOrMore(rule | include | comment.suppress())
コード例 #37
0
class NginxConfigParser(object):
    """
    Nginx config parser originally based on https://github.com/fatiherikli/nginxparser

    Heavily customized and extended by Amplify team.

    Optimized by Paul McGuire author of the pyparsing library (https://www.linkedin.com/in/ptmcg).  Paul's
    optimizations (with minor compatibility tweaks during incorporation by Amplify team) resulted in over a 50%
    performance improvement (~59%).

    Parses single file into json structure
    """
    tokens_cache = {}

    max_size = 20 * 1024 * 1024  # 20 mb

    # constants
    left_brace = Literal("{").suppress()
    right_brace = Literal("}").suppress()
    semicolon = Literal(";").suppress()

    # keywords
    IF, SET, REWRITE, PERL_SET, LOG_FORMAT, ALIAS, RETURN, ERROR_PAGE, MAP, \
        SERVER_NAME, SUB_FILTER, ADD_HEADER, LOCATION = (
            map(
                lambda x: x.setParseAction(set_line_number),
                map(
                    Keyword,
                    "if set rewrite perl_set log_format alias return "
                    "error_page map server_name sub_filter add_header "
                    "location".split()
                )
            )
        )

    # string helpers
    string = (QuotedString("'", escChar='\\') | QuotedString(
        '"', escChar='\\')).setParseAction(set_line_number)

    multiline_string = (QuotedString("'", escChar='\\', multiline=True)
                        | QuotedString('"', escChar='\\', multiline=True)
                        ).setParseAction(set_line_number)

    multiline_string_keep_quotes = (
        QuotedString("'", escChar='\\', multiline=True, unquoteResults=False)
        | QuotedString('"', escChar='\\', multiline=True,
                       unquoteResults=False)).setParseAction(set_line_number)

    # lua keys
    start_with_lua_key = Regex(r'lua_\S+').setParseAction(set_line_number)
    contains_by_lua_key = Regex(r'\S+_by_lua\S*').setParseAction(
        set_line_number)

    key = (~MAP & ~ALIAS & ~PERL_SET & ~IF & ~SET & ~REWRITE & ~SERVER_NAME
           & ~SUB_FILTER & ~ADD_HEADER
           & ~LOCATION) + Word(alphanums + '$_:%?"~<>\/-+.,*()[]"' +
                               "'").setParseAction(set_line_number)

    # values
    value_one = Regex(r'[^{};]*"[^\";]+"[^{};]*')
    value_two = Regex(r'[^{};]*\'[^\';]+\'')
    value_three = Regex(r'[^{};]+((\${[\d|\w]+(?=})})|[^{};])+')
    value_four = Regex(r'[^{};]+(?!${.+})')
    value = (string | value_one | value_two | value_three
             | value_four).setParseAction(set_line_number)
    rewrite_value = CharsNotIn(";").setParseAction(set_line_number)
    any_value = CharsNotIn(";").setParseAction(set_line_number)
    non_space_value = Regex(r'[^\'\";\s]+').setParseAction(set_line_number)
    if_value = nestedExpr().setParseAction(set_line_number)  # Regex(r'\(.*\)')
    strict_value = CharsNotIn("{};").setParseAction(set_line_number)
    sub_filter_value = (
        non_space_value
        | multiline_string_keep_quotes).setParseAction(set_line_number)
    log_format_value = (non_space_value
                        | multiline_string).setParseAction(set_line_number)
    add_header_value = Regex(r'[^{};]*"[^"]+"').setParseAction(set_line_number)
    map_value = (string |
                 Regex(r'((\\\s|[^{};\s])*)')).setParseAction(set_line_number)
    raw_value = Word(alphanums + '$_:%?"~<>\/-+.,*()[];|^@"=' +
                     "'").setParseAction(set_line_number)

    # modifier for location uri [ = | ~ | ~* | ^~ ]
    modifier = oneOf("= ~* ~ ^~")

    assignment = (key + Optional(OneOrMore(value + Optional(value))) +
                  semicolon).setParseAction(set_line_number)

    set = (SET + any_value + semicolon).setParseAction(set_line_number)

    rewrite = (REWRITE + rewrite_value +
               semicolon).setParseAction(set_line_number)

    perl_set = (PERL_SET + key + multiline_string +
                semicolon).setParseAction(set_line_number)

    lua_content = ((start_with_lua_key | contains_by_lua_key) +
                   multiline_string +
                   semicolon).setParseAction(set_line_number)

    alias = (ALIAS + any_value + semicolon).setParseAction(set_line_number)

    return_ = ((RETURN | ERROR_PAGE) + value + Optional(any_value) +
               semicolon).setParseAction(set_line_number)

    log_format = (LOG_FORMAT + log_format_value + OneOrMore(log_format_value) +
                  semicolon).setParseAction(set_line_number)

    server_name = (SERVER_NAME + any_value +
                   semicolon).setParseAction(set_line_number)

    sub_filter = (SUB_FILTER + sub_filter_value + sub_filter_value +
                  semicolon).setParseAction(set_line_number)

    add_header = (ADD_HEADER + (non_space_value | string) +
                  Optional(multiline_string_keep_quotes | add_header_value
                           | non_space_value) + Optional(value) +
                  semicolon).setParseAction(set_line_number)

    # script
    map_block = Forward()
    map_block << Group(
        Group(MAP + map_value + map_value).setParseAction(set_line_number) +
        left_brace + Group(
            ZeroOrMore(Group(map_value + Optional(map_value) +
                             semicolon)).setParseAction(set_line_number)) +
        right_brace)

    block = Forward()
    block << Group(
        (Group(key + Optional(modifier) + Optional(value + Optional(value)))
         | Group(IF + if_value) | Group(LOCATION + Optional(modifier) +
                                        Optional(multiline_string | raw_value))
         ).setParseAction(set_line_number) + left_brace
        -  # <----- use '-' operator instead of '+' to get better error messages
        Group(
            ZeroOrMore(
                Group(add_header) | Group(log_format) | Group(lua_content)
                | Group(perl_set) | Group(set) | Group(rewrite) | Group(alias)
                | Group(return_) | Group(assignment) | Group(server_name)
                | Group(sub_filter) | map_block | block).setParseAction(
                    set_line_number)).setParseAction(set_line_number) +
        right_brace)

    script = OneOrMore(
        Group(add_header) | Group(server_name) | Group(log_format)
        | Group(perl_set) | Group(lua_content) | Group(alias) | Group(return_)
        | Group(assignment) | Group(set) | Group(rewrite) | Group(sub_filter)
        | map_block | block).ignore(pythonStyleComment)

    INCLUDE_RE = re.compile(r'[^#]*include\s+(?P<include_file>.*);')
    SSL_CERTIFICATE_RE = re.compile(
        r'[^#]*ssl_certificate\s+(?P<cert_file>.*);')

    def __init__(self, filename='/etc/nginx/nginx.conf'):
        self.filename = filename
        self.folder = os.path.dirname(
            self.filename)  # stores path to folder with main config
        self.files = {}  # to prevent cycle files and line indexing
        self.directories = {}
        self.parsed_cache = {}  # to cache multiple includes
        self.broken_files = set()  # to prevent reloading broken files
        self.broken_directories = set(
        )  # to prevent reloading broken directories
        self.index = [
        ]  # stores index for all sections (points to file number and line number)
        self.ssl_certificates = []
        self.errors = []
        self.tree = {}
        self.directory_map = {}

        self.file_errors = []  # For broken files
        self.directory_errors = []  # for broken directories

    def parse(self):
        NginxConfigParser.tokens_cache = {}
        self.directories, self.files, self.parsed_cache = {}, {}, {
        }  # drop results from the previous run
        self.tree = self.__logic_parse(self.__pyparse(self.filename))  # parse
        self.construct_directory_map()  # construct a tree of structure
        self.parsed_cache = {}  # drop cached, as it is no longer needed

    @staticmethod
    def get_filesystem_info(path):
        """
        Returns file/folder size, mtime and permissions

        :param path: str path to file/folder
        :return: int, int, str - size, mtime, permissions
        """
        size, mtime, permissions = 0, 0, '0000'

        try:
            size = os.path.getsize(path)
            mtime = int(os.path.getmtime(path))
            permissions = oct(os.stat(path).st_mode & 0777)
        except Exception, e:
            exception_name = e.__class__.__name__
            message = 'failed to stat %s due to: %s' % (path, exception_name)
            context.log.debug(message, exc_info=True)

        return size, mtime, permissions
コード例 #38
0
    def parse_domain(self):
        """Parse the domain file and create a problem instance. In case of an
           error, raise a ParsingException with the appropriate code from above.

           (Parser) -> Problem
        """
        #Parse the domain file
        try:
            with open(self.domain_file_name) as domain_file:
                domain_lines = domain_file.readlines()
        except IOError:
            raise ParsingException("Error: could not open the doman file: " +\
                self.domain_file_name, parsing_error_code)

        #Strip comments and turn into a single string
        domain_str = ''
        for line in domain_lines:
            line = line.strip().lower()
            if line:
                try:
                    c_index = line.index(";")
                    line = line[:c_index]
                except ValueError:
                    pass
            if not line: continue
            domain_str += line + " "

        domain_list = OneOrMore(nestedExpr()).parseString(domain_str).asList()
        if not domain_list:
            raise ParsingException("Error: empty domain file: " +\
                 self.domain_file_name, parsing_error_code)

        domain_list = domain_list[0]
        if domain_list[0] != "define":
            raise ParsingException(
                "Error: define expected at beginning of file",
                parsing_error_code)
        for line in domain_list[1:]:
            #print "LINE:", line

            if line[0] == "domain":
                try:
                    self.problem = Problem(line[1])
                except IndexError:
                    raise ParsingException("Error: badly formed domain name",
                                           parsing_error_code)
            elif line[0] == ":requirements":
                self.parse_requirements(line[1:])

            elif line[0] == ":types":
                self.parse_types(line[1:])

            elif line[0] == ":constants":
                self.parse_constants(line[1:])

            elif line[0] == ":predicates":
                self.parse_predicates(line[1:])

            elif line[0] == ":functions":
                self.parse_functions(line[1:])

            elif line[0] == ":action":
                self.parse_action(line[1:])

            elif line[0] == ":derived":
                self.parse_derived(line[1:])
コード例 #39
0
    def parse_problem(self):
        """Parse the problem file and add the details to self.problem.
           In case of an error, raise a ParsingException with the appropriate
           code from above.

           (Parser) -> None
        """
        if not self.problem:
            raise ParsingException("Error: must parse the domain file first.",
                                   parsing_error_code)

        #Parse the problem file
        try:
            with open(self.problem_file_name) as problem_file:
                problem_lines = problem_file.readlines()
        except IOError:
            raise ParsingException("Error: could not open the problem file: " +\
                self.problem_file_name, parsing_error_code)

        #Strip comments and turn into a single string
        problem_str = ''
        for line in problem_lines:
            line = line.strip().lower()
            if line:
                try:
                    c_index = line.index(";")
                    line = line[:c_index]
                except ValueError:
                    pass
            if not line: continue
            problem_str += line + " "

        problem_list = OneOrMore(
            nestedExpr()).parseString(problem_str).asList()
        if not problem_list:
            raise ParsingException("Error: empty problem file: " +\
                 self.problem_file_name, parsing_error_code)

        problem_list = problem_list[0]
        if problem_list[0] != "define":
            raise ParsingException(
                "Error: define expected at beginning of file",
                parsing_error_code)
        for line in problem_list[1:]:

            if line[0] == "problem":
                try:
                    self.problem.problem_name = line[1]
                except IndexError:
                    raise ParsingException("Error: badly formed problem name",
                                           parsing_error_code)

            elif line[0] == ":domain":
                try:
                    if self.problem.name != line[1]:
                        raise ParsingException("Error: problem not for domain",
                                               parsing_error_code)
                except IndexError:
                    raise ParsingException(
                        "Error: badly formed problem domain line",
                        parsing_error_code)

            elif line[0] == ":objects":
                self.parse_objects(line[1:])

            elif line[0] == ":init":
                self.parse_init(line[1:])

            elif line[0] == ":goal":
                self.parse_goal(line[1:])

            elif line[0] == ":metric":
                self.parse_metric(line[1:])

            else:
                print(("Unknown line:", line))
コード例 #40
0
def bnglFunction(rule,
                 functionTitle,
                 reactants,
                 compartments=[],
                 parameterDict={},
                 reactionDict={}):
    def powParse(match):
        if match.group(1) == 'root':
            exponent = '(1/%s)' % match.group(3)
        else:
            exponent = match.group(3)
        if match.group(1) in ['root', 'pow']:
            operator = '^'
        return '({0}){1}({2})'.format(match.group(2), operator, exponent)

    def compParse(match):

        translator = {
            'gt': '>',
            'lt': '<',
            'and': '&&',
            'or': '||',
            'geq': '>=',
            'leq': '<=',
            'eq': '=='
        }
        exponent = match.group(3)
        operator = translator[match.group(1)]
        return '{0} {1} {2}'.format(match.group(2), operator, exponent)

    def ceilfloorParse(math):
        flag = False
        if math.group(1) == 'ceil':
            flag = True
        if flag:
            return 'min(rint({0}+0.5),rint({0} + 1))'.format(math.group(2))
        else:
            return 'min(rint({0}-0.5),rint({0}+0.5))'.format(math.group(2))

    def parameterRewrite(match):
        return match.group(1) + 'param_' + match.group(2) + match.group(3)

    def constructFromList(argList, optionList):
        parsedString = ''
        idx = 0
        translator = {
            'gt': '>',
            'lt': '<',
            'and': '&&',
            'or': '||',
            'geq': '>=',
            'leq': '<=',
            'eq': '=='
        }
        while idx < len(argList):
            if type(argList[idx]) is list:
                parsedString += '(' + constructFromList(
                    argList[idx], optionList) + ')'
            elif argList[idx] in optionList:
                if argList[idx] == 'ceil':
                    parsedString += 'min(rint(({0}) + 0.5),rint(({0}) + 1))'.format(
                        constructFromList(argList[idx + 1], optionList))
                    idx += 1
                elif argList[idx] == 'floor':
                    parsedString += 'min(rint(({0}) -0.5),rint(({0}) + 0.5))'.format(
                        constructFromList(argList[idx + 1], optionList))
                    idx += 1
                elif argList[idx] in ['pow']:
                    index = rindex(argList[idx + 1], ',')
                    parsedString += '((' + constructFromList(
                        argList[idx + 1][0:index], optionList) + ')'
                    parsedString += ' ^ ' + '(' + constructFromList(
                        argList[idx + 1][index + 1:], optionList) + '))'
                    idx += 1
                elif argList[idx] in ['sqr', 'sqrt']:
                    tag = '1/' if argList[idx] == 'sqrt' else ''
                    parsedString += '((' + constructFromList(
                        argList[idx + 1],
                        optionList) + ') ^ ({0}2))'.format(tag)
                    idx += 1
                elif argList[idx] == 'root':
                    index = rindex(argList[idx + 1], ',')
                    tmp = '1/(' + constructFromList(argList[idx + 1][0:index],
                                                    optionList) + '))'
                    parsedString += '((' + constructFromList(
                        argList[idx + 1][index + 1:],
                        optionList) + ') ^ ' + tmp
                    idx += 1
                elif argList[idx] == 'piecewise':
                    index1 = argList[idx + 1].index(',')
                    index2 = argList[idx + 1][index1 +
                                              1:].index(',') + index1 + 1

                    try:
                        index3 = argList[idx + 1][index2 +
                                                  1:].index(',') + index2 + 1
                    except ValueError:
                        index3 = -1
                    condition = constructFromList(
                        [argList[idx + 1][index1 + 1:index2]], optionList)
                    result = constructFromList([argList[idx + 1][:index1]],
                                               optionList)
                    if index3 == -1:
                        result2 = constructFromList(
                            [argList[idx + 1][index2 + 1:]], optionList)
                    else:
                        result2 = constructFromList(
                            ['piecewise', argList[idx + 1][index2 + 1:]],
                            optionList)
                    parsedString += 'if({0},{1},{2})'.format(
                        condition, result, result2)
                    idx += 1
                elif argList[idx] in ['and', 'or']:
                    symbolDict = {'and': ' && ', 'or': ' || '}
                    indexArray = [-1]
                    elementArray = []
                    for idx2, element in enumerate(argList[idx + 1]):
                        if element == ',':
                            indexArray.append(idx2)
                    indexArray.append(len(argList[idx + 1]))
                    tmpStr = argList[idx + 1]
                    for idx2, _ in enumerate(indexArray[0:-1]):
                        elementArray.append(
                            constructFromList(
                                tmpStr[indexArray[idx2] +
                                       1:indexArray[idx2 + 1]], optionList))
                    parsedString += symbolDict[argList[idx]].join(elementArray)
                    idx += 1
                elif argList[idx] == 'lambda':

                    tmp = '('
                    upperLimit = rindex(argList[idx + 1], ',')
                    parsedParams = []
                    for x in argList[idx + 1][0:upperLimit]:
                        if x == ',':
                            tmp += ', '
                        else:
                            tmp += 'param_' + x
                            parsedParams.append(x)

                    #tmp = ''.join([x for x in constructFromList(argList[idx+1][0:upperLimit])])
                    tmp2 = ') = ' + constructFromList(
                        argList[idx + 1][rindex(argList[idx + 1], ',') + 1:],
                        optionList)
                    for x in parsedParams:
                        while re.search(r'(\W|^)({0})(\W|$)'.format(x),
                                        tmp2) != None:
                            tmp2 = re.sub(r'(\W|^)({0})(\W|$)'.format(x),
                                          r'\1param_\2 \3', tmp2)
                    idx += 1
                    parsedString += tmp + tmp2
            else:
                parsedString += argList[idx]
            idx += 1
        return parsedString

    def changeToBNGL(functionList, rule, function):
        oldrule = ''
        #if the rule contains any mathematical function we need to reformat
        while any([
                re.search(r'(\W|^)({0})(\W|$)'.format(x), rule) != None
                for x in functionList
        ]) and (oldrule != rule):
            oldrule = rule
            for x in functionList:
                rule = re.sub('({0})\(([^,]+),([^)]+)\)'.format(x), function,
                              rule)
            if rule == oldrule:
                logMess('ERROR', 'Malformed pow or root function %s' % rule)
                print 'meep'
        return rule

    #rule = changeToBNGL(['pow','root'],rule,powParse)
    rule = changeToBNGL(['gt', 'lt', 'leq', 'geq', 'eq'], rule, compParse)
    #rule = changeToBNGL(['and','or'],rule,compParse)
    flag = True
    contentRule = pyparsing.Word(
        pyparsing.alphanums + '_'
    ) | ',' | '.' | '+' | '-' | '*' | '/' | '^' | '&' | '>' | '<' | '=' | '|'
    parens = pyparsing.nestedExpr('(', ')', content=contentRule)
    finalString = ''
    #remove ceil,floor

    if any([
            re.search(r'(\W|^)({0})(\W|$)'.format(x), rule) != None for x in
        ['ceil', 'floor', 'pow', 'sqrt', 'sqr', 'root', 'and', 'or']
    ]):
        argList = parens.parseString('(' + rule + ')').asList()
        rule = constructFromList(
            argList[0],
            ['floor', 'ceil', 'pow', 'sqrt', 'sqr', 'root', 'and', 'or'])

    while 'piecewise' in rule:
        argList = parens.parseString('(' + rule + ')').asList()
        rule = constructFromList(argList[0], ['piecewise'])
    #remove references to lambda functions
    if 'lambda(' in rule:
        lambdaList = parens.parseString('(' + rule + ')')
        functionBody = constructFromList(lambdaList[0].asList(), ['lambda'])
        flag = False
        rule = '{0}{1}'.format(functionTitle, functionBody)

    tmp = rule
    #delete the compartment from the rate function since cBNGL already does it
    for compartment in compartments:
        tmp = re.sub('^{0}\s*[*]'.format(compartment[0]), '', tmp)
        tmp = re.sub('([*]\s*{0})$'.format(compartment[0]), '', tmp)
        if compartment[0] in tmp:
            tmp = re.sub(r'(\W|^)({0})(\W|$)'.format(compartment[0]),
                         r'\1 {0} \3'.format(str(compartment[1])), tmp)
            #tmp = re.sub(r'(\W)({0})(\W)'.format(compartment[0]),r'\1%s\3' % str(compartment[1]),tmp)
            logMess(
                'INFO',
                'Exchanging reference to compartment %s for its dimensions' %
                compartment[0])

    #change references to time for time()
    #tmp =re.sub(r'(\W|^)(time)(\W|$)',r'\1time()\3',tmp)
    #tmp =re.sub(r'(\W|^)(Time)(\W|$)',r'\1time()\3',tmp)
    #BNGL has ^ for power.
    if flag:
        finalString = '%s = %s' % (functionTitle, tmp)
    else:
        finalString = tmp
    #change references to local parameters
    for parameter in parameterDict:
        finalString = re.sub(r'(\W|^)({0})(\W|$)'.format(parameter),
                             r'\1 {0} \3'.format(parameterDict[parameter]),
                             finalString)
    #change references to reaction Id's to their netflux equivalent
    for reaction in reactionDict:
        if reaction in finalString:
            finalString = re.sub(r'(\W|^)({0})(\W|$)'.format(reaction),
                                 r'\1 {0} \3'.format(reactionDict[reaction]),
                                 finalString)

    #combinations '+ -' break ibonetgen
    finalString = re.sub(r'(\W|^)([-])(\s)+', r'\1-', finalString)
    #changing reference of 't' to time()
    #finalString = re.sub(r'(\W|^)(t)(\W|$)',r'\1time()\3',finalString)
    #pi
    finalString = re.sub(r'(\W|^)(pi)(\W|$)', r'\1 3.1415926535 \3',
                         finalString)
    #print reactants,finalString
    #log for log 10
    finalString = re.sub(r'(\W|^)log\(', r'\1 ln(', finalString)
    #reserved keyword: e
    finalString = re.sub(r'(\W|^)(e)(\W|$)', r'\1 are \3', finalString)
    #changing ceil
    #avoiding variables whose name starts with a number

    #removing mass-action elements

    tmp = finalString

    #print finalString,reactants
    #for reactant in reactants:
    #    finalString = re.sub(r'(\W|^)({0}\s+\*)'.format(reactant[0]),r'\1',finalString)
    #    finalString = re.sub(r'(\W|^)(\*\s+{0}(\s|$))'.format(reactant[0]),r'\1',finalString)
    #print finalString

    #if finalString != tmp:
    #    logMess('WARNING','Removed mass action elements from )
    return finalString
コード例 #41
0
 def _ParseDataField(DataField:str,Verbose=False)->(str,list):
     '''
     Parse and return a field with dataname and indexes.
 
     Example: 
         >>> ParseDataField('ni[1,4:6]') 
         ('ni',[1,[4,5])
         
         >>> ParseDataField('ni[:]')
         ('ni', [[0, -1]])
 
     Args:
         DataField (str): Name of the field to be retrieved (e.g. ni, te,ti) with indexes of third dimension between brackets []
 
     Returns:
         (DataFieldName,DataFieldIndex) (str,list(int)): Name of the field (e.g. te), indexes of third dimensions if any; otherwise None
 
     '''
     # Input data for 
     Nb=DataField.count('[')+DataField.count(']')
     Np=DataField.count('(')+DataField.count(')')
     if Nb>0 and Np>0:
         print('Cannot combine () and [] in field name: {}'.format(DataField))
         return ('',[])
     from pyparsing import nestedExpr
     if Nb>0:
         Bracket=['[',']']
     elif Np>0:
         Bracket=['(',')'] 
     if Np ==0 and Nb==0:
         return (DataField,[])
     
     if DataField.count('[')!=DataField.count(']'):
         print('# of [ != # of ] in {}'.format(DataField))
         return ('',[])
     
     S=DataField.split(Bracket[0],1)
     if Np>0:
         Offset=-1
     else:
         Offset=0
     
     Name=S[0]
     if Name=='':
         print('No field provided in {} ...'.format(DataField))
         return ('',[])
     if Verbose: print('Bracket:',Bracket,'Name:',Name)
     if len(S)<2:
         Index=[]
     else:
         S='['+S[1]
         Index=[]
         if Verbose: print('Parsing S:',S)
         Sb=nestedExpr('[',']').parseString(S).asList()[0]
         for sb in Sb:
             if type(sb)==str:
                 S=sb.split(',')
                 for s in S:
                     if s=='':
                         continue
                     if s.count(':')>0:
                         s1=s.split(':')[0]
                         if s1=='':
                             s1=0
                         s2=s.split(':')[1]
                         if s2=='' or s2=='-1':
                             s2='-1'
                             Index.append([int(s1)+Offset,int(s2)])
                         else:    
                             Index.append(np.array(range(int(s1)+Offset,int(s2)+Offset)).tolist())
                     else:
                         Index.append(int(s)+Offset)
                     
             elif type(sb)==list:
                 S=sb[0].split(',')
                 ILocal=[]
                 for s in S:
                     if s=='':
                         continue
                     if s.count(':')>0:
                         s1=s.split(':')[0]
                         if s1=='':
                             s1=0
                         s2=s.split(':')[1]
                         if s2=='' or s2=='-1':
                             s2='-1'
                             ILocal.extend([int(s1)+Offset,int(s2)])
                         else:    
                             ILocal.extend(np.array(range(int(s1)+Offset,int(s2)+Offset)).tolist())
                     else:
                         ILocal.append(int(s)+Offset)
                 Index.append(ILocal)
             
             
         
     
     if Verbose:
         print('ParsingDataField: Name:{}; IndexSet:{}'.format(Name,Index))
     return (Name,Index)
コード例 #42
0
class CompositorSpec(Parser):
    """
    The syntax for defining a set of compositor is as follows:

    [ mode op(spec) [settings] value ]+

    The components are:

    mode      : Operation mode, either 'data' or 'display'.
    group     : Value identifier with capitalized initial letter.
    op        : The name of the operation to apply.
    spec      : Overlay specification of form (A * B) where A and B are
                 dotted path specifications.
    settings  : Optional list of keyword arguments to be used as
                parameters to the operation (in square brackets).
    """

    mode = pp.Word(pp.alphas+pp.nums+'_').setResultsName("mode")

    op = pp.Word(pp.alphas+pp.nums+'_').setResultsName("op")

    overlay_spec = pp.nestedExpr(opener='(',
                                 closer=')',
                                 ignoreExpr=None
                             ).setResultsName("spec")

    value = pp.Word(pp.alphas+pp.nums+'_').setResultsName("value")

    op_settings = pp.nestedExpr(opener='[',
                                closer=']',
                                ignoreExpr=None
                            ).setResultsName("op_settings")

    compositor_spec = pp.OneOrMore(pp.Group(mode + op + overlay_spec + value
                                            + pp.Optional(op_settings)))


    @classmethod
    def parse(cls, line, ns={}):
        """
        Parse compositor specifications, returning a list Compositors
        """
        definitions = []
        parses  = [p for p in cls.compositor_spec.scanString(line)]
        if len(parses) != 1:
            raise SyntaxError("Invalid specification syntax.")
        else:
            e = parses[0][2]
            processed = line[:e]
            if (processed.strip() != line.strip()):
                raise SyntaxError("Failed to parse remainder of string: %r" % line[e:])

        opmap = {op.__name__:op for op in Compositor.operations}
        for group in cls.compositor_spec.parseString(line):

            if ('mode' not in group) or group['mode'] not in ['data', 'display']:
                raise SyntaxError("Either data or display mode must be specified.")
            mode = group['mode']

            kwargs = {}
            operation = opmap[group['op']]
            spec = ' '.join(group['spec'].asList()[0])

            if  group['op'] not in opmap:
                raise SyntaxError("Operation %s not available for use with compositors."
                                  % group['op'])
            if  'op_settings' in group:
                kwargs = cls.todict(group['op_settings'][0], 'brackets', ns=ns)

            definition = Compositor(str(spec), operation, str(group['value']), mode, **kwargs)
            definitions.append(definition)
        return definitions
コード例 #43
0
ファイル: test_simple_unit.py プロジェクト: zendesk/pyparsing
class TestCommonHelperExpressions(PyparsingExpressionTestCase):
    tests = [
        PpTestSpec(
            desc="A comma-delimited list of words",
            expr=pp.delimitedList(pp.Word(pp.alphas)),
            text="this, that, blah,foo,   bar",
            expected_list=["this", "that", "blah", "foo", "bar"],
        ),
        PpTestSpec(
            desc="A counted array of words",
            expr=pp.countedArray(pp.Word("ab"))[...],
            text="2 aaa bbb 0 3 abab bbaa abbab",
            expected_list=[["aaa", "bbb"], [], ["abab", "bbaa", "abbab"]],
        ),
        PpTestSpec(
            desc="skipping comments with ignore",
            expr=(pp.pyparsing_common.identifier("lhs") + "=" +
                  pp.pyparsing_common.fnumber("rhs")).ignore(
                      pp.cppStyleComment),
            text="abc_100 = /* value to be tested */ 3.1416",
            expected_list=["abc_100", "=", 3.1416],
            expected_dict={
                "lhs": "abc_100",
                "rhs": 3.1416
            },
        ),
        PpTestSpec(
            desc=
            "some pre-defined expressions in pyparsing_common, and building a dotted identifier with delimted_list",
            expr=(pp.pyparsing_common.number("id_num") + pp.delimitedList(
                pp.pyparsing_common.identifier, ".", combine=True)("name") +
                  pp.pyparsing_common.ipv4_address("ip_address")),
            text="1001 www.google.com 192.168.10.199",
            expected_list=[1001, "www.google.com", "192.168.10.199"],
            expected_dict={
                "id_num": 1001,
                "name": "www.google.com",
                "ip_address": "192.168.10.199",
            },
        ),
        PpTestSpec(
            desc=
            "using oneOf (shortcut for Literal('a') | Literal('b') | Literal('c'))",
            expr=pp.oneOf("a b c")[...],
            text="a b a b b a c c a b b",
            expected_list=[
                "a", "b", "a", "b", "b", "a", "c", "c", "a", "b", "b"
            ],
        ),
        PpTestSpec(
            desc="parsing nested parentheses",
            expr=pp.nestedExpr(),
            text="(a b (c) d (e f g ()))",
            expected_list=[["a", "b", ["c"], "d", ["e", "f", "g", []]]],
        ),
        PpTestSpec(
            desc="parsing nested braces",
            expr=(pp.Keyword("if") + pp.nestedExpr()("condition") +
                  pp.nestedExpr("{", "}")("body")),
            text='if ((x == y) || !z) {printf("{}");}',
            expected_list=[
                "if",
                [["x", "==", "y"], "||", "!z"],
                ["printf(", '"{}"', ");"],
            ],
            expected_dict={
                "condition": [[["x", "==", "y"], "||", "!z"]],
                "body": [["printf(", '"{}"', ");"]],
            },
        ),
    ]
コード例 #44
0
def extendFunction(function, subfunctionName, subfunction):
    def constructFromList(argList, optionList, subfunctionParam,
                          subfunctionBody):
        parsedString = ""
        idx = 0
        while idx < len(argList):
            if type(argList[idx]) is list:
                parsedString += (
                    "(" +
                    constructFromList(argList[idx], optionList,
                                      subfunctionParam, subfunctionBody) + ")")
            elif argList[idx] in optionList:
                tmp = subfunctionBody
                commaIndexes = [0]
                commaIndexes.extend(
                    [i for i, x in enumerate(argList[idx + 1]) if x == ","])
                commaIndexes.append(len(argList[idx + 1]))
                instancedParameters = [
                    argList[idx + 1][commaIndexes[i]:commaIndexes[i + 1]]
                    for i in range(0,
                                   len(commaIndexes) - 1)
                ]
                for parameter, instance in zip(subfunctionParam,
                                               instancedParameters):
                    if "," in instance:
                        instance.remove(",")
                    parsedParameter = (" ( " + constructFromList(
                        instance, optionList, subfunctionParam,
                        subfunctionBody) + " ) ")
                    tmp = re.sub(
                        r"(\W|^)({0})(\W|$)".format(parameter.strip()),
                        r"\1{0} \3".format(parsedParameter),
                        tmp,
                    )
                parsedString += " " + tmp + " "
                idx += 1
            else:
                if argList[idx] == "=":
                    parsedString += " " + argList[idx] + " "
                else:
                    parsedString += argList[idx]
            idx += 1
        return parsedString

    param = subfunction.split(" = ")[0][len(subfunctionName) + 1:-1]
    # ASS2019: There are cases where the fuction doesn't have a definition and the
    # following line errors out with IndexError, let's handle it.
    try:
        body = subfunction.split(" = ")[1]
    except IndexError as e:
        logMess(
            "ERROR:TRS002",
            "This function doesn't have a definition, note that atomizer doesn't allow for function linking: {}"
            .format(subfunction),
        )
        raise TranslationException(
            f"ERROR:TRS002: This function doesn't have a definition, note that atomizer doesn't allow for function linking: {subfunction}"
        )
    while (re.search(r"(\W|^){0}\([^)]*\)(\W|$)".format(subfunctionName),
                     function) != None):
        contentRule = (pyparsing.Word(pyparsing.alphanums + "_.")
                       | ","
                       | "+"
                       | "-"
                       | "*"
                       | "/"
                       | "^"
                       | "&"
                       | ">"
                       | "<"
                       | "="
                       | "|")
        parens = pyparsing.nestedExpr("(", ")", content=contentRule)
        subfunctionList = parens.parseString("(" + function + ")").asList()
        function = constructFromList(subfunctionList[0], [subfunctionName],
                                     param.split(","), body)
    return function
コード例 #45
0
def bnglFunction(rule,
                 functionTitle,
                 reactants,
                 compartments=[],
                 parameterDict={},
                 reactionDict={}):
    def powParse(match):
        if match.group(1) == "root":
            exponent = "(1/%s)" % match.group(3)
        else:
            exponent = match.group(3)
        if match.group(1) in ["root", "pow"]:
            operator = "^"
        return "({0}){1}({2})".format(match.group(2), operator, exponent)

    def compParse(match):
        translator = {
            "gt": ">",
            "lt": "<",
            "and": "&&",
            "or": "||",
            "geq": ">=",
            "leq": "<=",
            "eq": "==",
            "neq": "!=",
        }
        exponent = match.group(3)
        operator = translator[match.group(1)]
        return "{0} {1} {2}".format(match.group(2), operator, exponent)

    def ceilfloorParse(math):
        flag = False
        if math.group(1) == "ceil":
            flag = True
        if flag:
            return "min(rint({0}+0.5),rint({0} + 1))".format(math.group(2))
        else:
            return "min(rint({0}-0.5),rint({0}+0.5))".format(math.group(2))

    def parameterRewrite(match):
        return match.group(1) + "param_" + match.group(2) + match.group(3)

    def constructFromList(argList, optionList):
        parsedString = ""
        idx = 0
        translator = {
            "gt": ">",
            "lt": "<",
            "and": "&&",
            "or": "||",
            "geq": ">=",
            "leq": "<=",
            "eq": "==",
        }
        while idx < len(argList):
            if type(argList[idx]) is list:
                parsedString += "(" + constructFromList(
                    argList[idx], optionList) + ")"
            elif argList[idx] in optionList:
                if argList[idx] == "ceil":
                    parsedString += "min(rint(({0}) + 0.5),rint(({0}) + 1))".format(
                        constructFromList(argList[idx + 1], optionList))
                    idx += 1
                elif argList[idx] == "floor":
                    parsedString += "min(rint(({0}) -0.5),rint(({0}) + 0.5))".format(
                        constructFromList(argList[idx + 1], optionList))
                    idx += 1
                elif argList[idx] in ["pow"]:
                    index = rindex(argList[idx + 1], ",")
                    parsedString += ("((" + constructFromList(
                        argList[idx + 1][0:index], optionList) + ")")
                    parsedString += (" ^ " + "(" + constructFromList(
                        argList[idx + 1][index + 1:], optionList) + "))")
                    idx += 1
                elif argList[idx] in ["sqr", "sqrt"]:
                    tag = "1/" if argList[idx] == "sqrt" else ""
                    parsedString += (
                        "((" +
                        constructFromList(argList[idx + 1], optionList) +
                        ") ^ ({0}2))".format(tag))
                    idx += 1
                elif argList[idx] == "root":
                    index = rindex(argList[idx + 1], ",")
                    tmp = ("1/(" + constructFromList(argList[idx + 1][0:index],
                                                     optionList) + "))")
                    parsedString += ("((" + constructFromList(
                        argList[idx + 1][index + 1:], optionList) + ") ^ " +
                                     tmp)
                    idx += 1
                elif argList[idx] == "piecewise":
                    index1 = argList[idx + 1].index(",")
                    try:
                        index2 = argList[idx + 1][index1 +
                                                  1:].index(",") + index1 + 1
                        try:
                            index3 = (
                                argList[idx + 1][index2 + 1:].index(",") +
                                index2 + 1)
                        except ValueError:
                            index3 = -1
                    except ValueError:
                        parsedString += constructFromList(
                            [argList[idx + 1][index1 + 1:]], optionList)
                        index2 = -1
                    if index2 != -1:
                        condition = constructFromList(
                            [argList[idx + 1][index1 + 1:index2]], optionList)
                        result = constructFromList([argList[idx + 1][:index1]],
                                                   optionList)
                        if index3 == -1:
                            result2 = constructFromList(
                                [argList[idx + 1][index2 + 1:]], optionList)
                        else:
                            result2 = constructFromList(
                                ["piecewise", argList[idx + 1][index2 + 1:]],
                                optionList,
                            )
                        parsedString += "if({0},{1},{2})".format(
                            condition, result, result2)
                    idx += 1
                elif argList[idx] in ["and", "or"]:
                    symbolDict = {"and": " && ", "or": " || "}
                    indexArray = [-1]
                    elementArray = []
                    for idx2, element in enumerate(argList[idx + 1]):
                        if element == ",":
                            indexArray.append(idx2)
                    indexArray.append(len(argList[idx + 1]))
                    tmpStr = argList[idx + 1]
                    for idx2, _ in enumerate(indexArray[0:-1]):
                        elementArray.append(
                            constructFromList(
                                tmpStr[indexArray[idx2] + 1:indexArray[idx2 +
                                                                       1]],
                                optionList,
                            ))
                    parsedString += symbolDict[argList[idx]].join(elementArray)
                    idx += 1
                elif argList[idx] == "lambda":

                    tmp = "("
                    # ASS2019 - I'm not sure if this is an actual solution or
                    # this should just never happen. argList[idx+1] sometimes
                    # returns _only_ ['0'] and thus the following call fails with
                    # ValueError. Not sure if the list is built wrong or this
                    # result is not handled correctly. Either way, this, for now,
                    # skirts the issue.
                    try:
                        upperLimit = rindex(argList[idx + 1], ",")
                    except ValueError:
                        idx += 1
                        continue
                    parsedParams = []
                    for x in argList[idx + 1][0:upperLimit]:
                        if x == ",":
                            tmp += ", "
                        else:
                            tmp += "param_" + x
                            parsedParams.append(x)

                    # tmp = ''.join([x for x in constructFromList(argList[idx+1][0:upperLimit])])
                    tmp2 = ") = " + constructFromList(
                        argList[idx + 1][rindex(argList[idx + 1], ",") + 1:],
                        optionList,
                    )
                    for x in parsedParams:
                        while re.search(r"(\W|^)({0})(\W|$)".format(x),
                                        tmp2) != None:
                            tmp2 = re.sub(r"(\W|^)({0})(\W|$)".format(x),
                                          r"\1param_\2 \3", tmp2)
                    idx += 1
                    parsedString += tmp + tmp2
            else:
                parsedString += argList[idx]
            idx += 1
        return parsedString

    def changeToBNGL(functionList, rule, function):
        oldrule = ""
        # if the rule contains any mathematical function we need to reformat
        while any([
                re.search(r"(\W|^)({0})(\W|$)".format(x), rule) != None
                for x in functionList
        ]) and (oldrule != rule):
            oldrule = rule
            for x in functionList:
                rule = re.sub("({0})\(([^,]+),([^)]+)\)".format(x), function,
                              rule)
            if rule == oldrule:
                logMess("ERROR:TRS001",
                        "Malformed pow or root function %s" % rule)
                print("meep")
        return rule

    # rule = changeToBNGL(['pow','root'],rule,powParse)
    rule = changeToBNGL(["gt", "lt", "leq", "geq", "eq"], rule, compParse)
    # rule = changeToBNGL(['and','or'],rule,compParse)
    flag = True
    contentRule = (pyparsing.Word(pyparsing.alphanums + "_")
                   | ","
                   | "."
                   | "+"
                   | "-"
                   | "*"
                   | "/"
                   | "^"
                   | "&"
                   | ">"
                   | "<"
                   | "="
                   | "|")
    parens = pyparsing.nestedExpr("(", ")", content=contentRule)
    finalString = ""
    # remove ceil,floor

    if any([
            re.search(r"(\W|^)({0})(\W|$)".format(x), rule) != None for x in
        ["ceil", "floor", "pow", "sqrt", "sqr", "root", "and", "or"]
    ]):
        argList = parens.parseString("(" + rule + ")").asList()
        rule = constructFromList(
            argList[0],
            ["floor", "ceil", "pow", "sqrt", "sqr", "root", "and", "or"])

    while "piecewise" in rule:
        argList = parens.parseString("(" + rule + ")").asList()
        rule = constructFromList(argList[0], ["piecewise"])
    # remove references to lambda functions
    if "lambda(" in rule:
        lambdaList = parens.parseString("(" + rule + ")")
        functionBody = constructFromList(lambdaList[0].asList(), ["lambda"])
        flag = False
        rule = "{0}{1}".format(functionTitle, functionBody)

    tmp = rule
    # delete the compartment from the rate function since cBNGL already does it
    # this is not true as seen by the test
    for compartment in compartments:
        # if len(reactants) < 2:
        #    tmp = re.sub('^{0}\s*[*]'.format(compartment[0]),'',tmp)
        #    tmp = re.sub('([*]\s*{0})$'.format(compartment[0]),'',tmp)

        if compartment[0] in tmp:
            tmp = re.sub(
                r"(\W|^)({0})(\W|$)".format(compartment[0]),
                r"\1 {0} \3".format(str(compartment[1])),
                tmp,
            )
            # tmp = re.sub(r'(\W)({0})(\W)'.format(compartment[0]),r'\1%s\3' % str(compartment[1]),tmp)
            # logMess('INFO:MSC005','Exchanging reference to compartment %s for its dimensions' % compartment[0])

    # change references to time for time()
    # tmp =re.sub(r'(\W|^)(time)(\W|$)',r'\1time()\3',tmp)
    # tmp =re.sub(r'(\W|^)(Time)(\W|$)',r'\1time()\3',tmp)
    while re.search(r"(\W|^)inf(\W|$)", tmp) != None:
        tmp = re.sub(r"(\W|^)(inf)(\W|$)", r"\1 1e20 \3", tmp)
    # BNGL has ^ for power.
    if flag:
        finalString = "%s = %s" % (functionTitle, tmp)
    else:
        finalString = tmp
    # change references to local parameters
    for parameter in parameterDict:
        finalString = re.sub(
            r"(\W|^)({0})(\W|$)".format(parameter),
            r"\g<1>{0}\g<3>".format(parameterDict[parameter]),
            finalString,
        )
    # change references to reaction Id's to their netflux equivalent
    for reaction in reactionDict:
        if reaction in finalString:
            finalString = re.sub(
                r"(\W|^)({0})(\W|$)".format(reaction),
                r"\g<1>{0}\g<3>".format(reactionDict[reaction]),
                finalString,
            )

    # combinations '+ -' break ibonetgen
    finalString = re.sub(r"(\W|^)([-])(\s)+", r"\1-", finalString)
    # changing reference of 't' to time()
    # finalString = re.sub(r'(\W|^)(t)(\W|$)',r'\1time()\3',finalString)
    # pi
    finalString = re.sub(r"(\W|^)(pi)(\W|$)", r"\g<1>3.1415926535\g<3>",
                         finalString)
    # print(reactants,finalString)
    # log for log 10
    finalString = re.sub(r"(\W|^)log\(", r"\1 ln(", finalString)
    # reserved keyword: e
    finalString = re.sub(r"(\W|^)(e)(\W|$)", r"\g<1>__e__\g<3>", finalString)
    # changing ceil
    # avoiding variables whose name starts with a number

    # removing mass-action elements

    tmp = finalString

    # print(finalString,reactants)
    # for reactant in reactants:
    #    finalString = re.sub(r'(\W|^)({0}\s+\*)'.format(reactant[0]),r'\1',finalString)
    #    finalString = re.sub(r'(\W|^)(\*\s+{0}(\s|$))'.format(reactant[0]),r'\1',finalString)
    # print(finalString)

    # if finalString != tmp:
    #    logMess('WARNING','Removed mass action elements from )
    return finalString
コード例 #46
0
ファイル: noderange.py プロジェクト: jedrecord/confluent
try:
    range = xrange
except NameError:
    pass

# construct custom grammar with pyparsing
_nodeword = pp.Word(pp.alphanums + '~^$/=-_:.*+!')
_nodebracket = pp.QuotedString(quoteChar='[',
                               endQuoteChar=']',
                               unquoteResults=False)
_nodeatom = pp.Group(pp.OneOrMore(_nodeword | _nodebracket))
_paginationstart = pp.Group(pp.Word('<', pp.nums))
_paginationend = pp.Group(pp.Word('>', pp.nums))
_grammar = _nodeatom | ',-' | ',' | '@' | _paginationstart | _paginationend
_parser = pp.nestedExpr(content=_grammar)

_numextractor = pp.OneOrMore(pp.Word(pp.alphas + '-') | pp.Word(pp.nums))

numregex = re.compile('([0-9]+)')

lastnoderange = None


def humanify_nodename(nodename):
    """Analyzes nodename in a human way to enable natural sort

    :param nodename: The node name to analyze
    :returns: A structure that can be consumed by 'sorted'
    """
    return [
コード例 #47
0
ファイル: test_simple_unit.py プロジェクト: rcoup/pyparsing
class TestCommonHelperExpressions(PyparsingExpressionTestCase):
    tests = [
        PpTestSpec(
            desc="A comma-delimited list of words",
            expr=pp.delimitedList(pp.Word(pp.alphas)),
            text="this, that, blah,foo,   bar",
            expected_list=['this', 'that', 'blah', 'foo', 'bar'],
        ),
        PpTestSpec(
            desc="A counted array of words",
            expr=pp.countedArray(pp.Word('ab'))[...],
            text="2 aaa bbb 0 3 abab bbaa abbab",
            expected_list=[['aaa', 'bbb'], [], ['abab', 'bbaa', 'abbab']],
        ),
        PpTestSpec(
            desc="skipping comments with ignore",
            expr=(pp.pyparsing_common.identifier('lhs') + '=' +
                  pp.pyparsing_common.fnumber('rhs')).ignore(
                      pp.cppStyleComment),
            text="abc_100 = /* value to be tested */ 3.1416",
            expected_list=['abc_100', '=', 3.1416],
            expected_dict={
                'lhs': 'abc_100',
                'rhs': 3.1416
            },
        ),
        PpTestSpec(
            desc=
            "some pre-defined expressions in pyparsing_common, and building a dotted identifier with delimted_list",
            expr=(pp.pyparsing_common.number("id_num") + pp.delimitedList(
                pp.pyparsing_common.identifier, '.', combine=True)("name") +
                  pp.pyparsing_common.ipv4_address("ip_address")),
            text="1001 www.google.com 192.168.10.199",
            expected_list=[1001, 'www.google.com', '192.168.10.199'],
            expected_dict={
                'id_num': 1001,
                'name': 'www.google.com',
                'ip_address': '192.168.10.199'
            },
        ),
        PpTestSpec(
            desc=
            "using oneOf (shortcut for Literal('a') | Literal('b') | Literal('c'))",
            expr=pp.oneOf("a b c")[...],
            text="a b a b b a c c a b b",
            expected_list=[
                'a', 'b', 'a', 'b', 'b', 'a', 'c', 'c', 'a', 'b', 'b'
            ],
        ),
        PpTestSpec(
            desc="parsing nested parentheses",
            expr=pp.nestedExpr(),
            text="(a b (c) d (e f g ()))",
            expected_list=[['a', 'b', ['c'], 'd', ['e', 'f', 'g', []]]],
        ),
        PpTestSpec(
            desc="parsing nested braces",
            expr=(pp.Keyword('if') + pp.nestedExpr()('condition') +
                  pp.nestedExpr('{', '}')('body')),
            text='if ((x == y) || !z) {printf("{}");}',
            expected_list=[
                'if', [['x', '==', 'y'], '||', '!z'],
                ['printf(', '"{}"', ');']
            ],
            expected_dict={
                'condition': [[['x', '==', 'y'], '||', '!z']],
                'body': [['printf(', '"{}"', ');']]
            },
        ),
    ]
コード例 #48
0
def graph_definition():

    global graphparser

    if not graphparser:

        # punctuation
        colon  = Literal(":")
        lbrace = Literal("{")
        rbrace = Literal("}")
        lbrack = Literal("[")
        rbrack = Literal("]")
        lparen = Literal("(")
        rparen = Literal(")")
        equals = Literal("=")
        comma  = Literal(",")
        dot    = Literal(".")
        slash  = Literal("/")
        bslash = Literal("\\")
        star   = Literal("*")
        semi   = Literal(";")
        at     = Literal("@")
        minus  = Literal("-")

        # keywords
        strict_    = CaselessLiteral("strict")
        graph_     = CaselessLiteral("graph")
        digraph_   = CaselessLiteral("digraph")
        subgraph_  = CaselessLiteral("subgraph")
        node_      = CaselessLiteral("node")
        edge_      = CaselessLiteral("edge")


        # token definitions

        identifier = Word(alphanums + "_." ).setName("identifier")

        double_quoted_string = QuotedString(
            '"', multiline=True, unquoteResults=False)  # dblQuotedString

        noncomma = "".join([c for c in printables if c != ","])
        alphastring_ = OneOrMore(CharsNotIn(noncomma + ' '))

        def parse_html(s, loc, toks):
            return '<%s>' % ''.join(toks[0])


        opener = '<'
        closer = '>'
        html_text = nestedExpr( opener, closer,
            ( CharsNotIn( opener + closer )  )
                ).setParseAction(parse_html).leaveWhitespace()

        ID = ( identifier | html_text |
            double_quoted_string | #.setParseAction(strip_quotes) |
            alphastring_ ).setName("ID")


        float_number = Combine(Optional(minus) +
            OneOrMore(Word(nums + "."))).setName("float_number")

        righthand_id =  (float_number | ID ).setName("righthand_id")

        port_angle = (at + ID).setName("port_angle")

        port_location = (OneOrMore(Group(colon + ID)) |
            Group(colon + lparen +
                  ID + comma + ID + rparen)).setName("port_location")

        port = (Group(port_location + Optional(port_angle)) |
            Group(port_angle + Optional(port_location))).setName("port")

        node_id = (ID + Optional(port))
        a_list = OneOrMore(ID + Optional(equals + righthand_id) +
            Optional(comma.suppress())).setName("a_list")

        attr_list = OneOrMore(lbrack.suppress() + Optional(a_list) +
            rbrack.suppress()).setName("attr_list")

        attr_stmt = (Group(graph_ | node_ | edge_) +
                     attr_list).setName("attr_stmt")

        edgeop = (Literal("--") | Literal("->")).setName("edgeop")

        stmt_list = Forward()
        graph_stmt = Group(lbrace.suppress() + Optional(stmt_list) +
            rbrace.suppress() +
            Optional(semi.suppress())).setName("graph_stmt")


        edge_point = Forward()

        edgeRHS = OneOrMore(edgeop + edge_point)
        edge_stmt = edge_point + edgeRHS + Optional(attr_list)

        subgraph = Group(
            subgraph_ + Optional(ID) + graph_stmt).setName("subgraph")

        edge_point << Group(
            subgraph | graph_stmt | node_id).setName('edge_point')

        node_stmt = (
            node_id + Optional(attr_list) +
            Optional(semi.suppress())).setName("node_stmt")

        assignment = (ID + equals + righthand_id).setName("assignment")
        stmt = (assignment | edge_stmt | attr_stmt |
                subgraph | graph_stmt | node_stmt).setName("stmt")
        stmt_list << OneOrMore(stmt + Optional(semi.suppress()))

        graphparser = OneOrMore(
            (Optional(strict_) + Group((graph_ | digraph_)) +
             Optional(ID) + graph_stmt).setResultsName("graph"))

        singleLineComment = Group(
            "//" + restOfLine) | Group("#" + restOfLine)


        # actions

        graphparser.ignore(singleLineComment)
        graphparser.ignore(cStyleComment)

        assignment.setParseAction(push_attr_list)
        a_list.setParseAction(push_attr_list)
        edge_stmt.setParseAction(push_edge_stmt)
        node_stmt.setParseAction(push_node_stmt)
        attr_stmt.setParseAction(push_default_stmt)

        subgraph.setParseAction(push_subgraph_stmt)
        graph_stmt.setParseAction(push_graph_stmt)
        graphparser.setParseAction(push_top_graph_stmt)


    return graphparser
コード例 #49
0
	return ParseResults(normalise_templates(toks[0].asList()))

def normalise_templates(toks, isinstance=isinstance, basestring=basestring):
	s_list = ['<']
	s_list_append = s_list.append #lookup append func once, instead of many times
	for tok in toks:
		if isinstance(tok, basestring): #See if it's a string
			s_list_append(' ' + tok)
		else:
			#If it's not a string
			s_list_append(normalise_templates(tok))
	s_list_append(' >')
	return ''.join(s_list)

#Skip pairs of brackets.
angle_bracket_pair = nestedExpr(opener='<',closer='>').setParseAction(turn_parseresults_to_list)
#TODO Fix for nesting brackets
parentheses_pair = LPAR + SkipTo(RPAR) + RPAR
square_bracket_pair = LBRACK + SkipTo(RBRACK) + RBRACK

#The raw type of the input, i.e. 'int' in (unsigned const int * foo)
#TODO I guess this should be a delimited list (by '::') of name and angle brackets
input_type = Combine(Word(alphanums + ':_') + Optional(angle_bracket_pair + Optional(Word(alphanums + ':_'))))

#A number. e.g. -1, 3.6 or 5
number = Word('-.' + nums)

#The name of the argument. We will ignore this but it must be matched anyway.
input_name = OneOrMore(Word(alphanums + '_') | angle_bracket_pair | parentheses_pair | square_bracket_pair)

#Grab the '&', '*' or '**' type bit in (const QString & foo, int ** bar)
コード例 #50
0
ファイル: parser.py プロジェクト: glaucouri/XLtoy
    def __init__(self, collector=None):
        self.current_sheet = None
        self.collector = collector
        EQ, LPAR, RPAR, COLON, COMMA, EXCL, DOLLAR = map(Literal, "=():,!$")
        COMPARISON_OP = oneOf("= < > >= <= != <>")('op')
        multOp = oneOf("* /")
        addOp = oneOf("+ -")
        words = Word(alphas, alphanums + '_')
        sheetRef = words | QuotedString("'", escQuote="''")
        colRef = Optional(DOLLAR) + Word(alphas, max=2)
        rowRef = Optional(DOLLAR) + Word(nums)

        # Atomic element for cell
        cellAtom = Group(
            Optional(sheetRef("sheet") + EXCL) + \
            Combine(colRef + rowRef)("pos")
        )

        #Range
        cellRange = Group(
            cellAtom("start") + COLON + cellAtom("end")
        )("range")

        # Single cell with action
        cellRef = cellAtom("cell")\
                  .setParseAction(self.cell_action)

        expr = Forward()

        # Conditions
        condExpr = expr + Optional(COMPARISON_OP.setParseAction(self.equal_as_comparison_action) + expr)

        # If function
        ifFunc = (
                CaselessKeyword("if")
                + LPAR
                + Combine(condExpr)("condition")
                + COMMA
                + Combine(expr)("if_true")
                + COMMA
                + Combine(expr)("if_false")
                + RPAR).setParseAction(self.ternary_if_action)

        # Functions
        def stat_function(name, obj=expr, empty=False):
            if empty:
                x = Group(LPAR + RPAR)
            else:
                x = Group(LPAR + delimitedList(obj, combine=True)('elem_list') + RPAR)
            return CaselessKeyword(name)('function_name') + x

        sumFunc = stat_function("sum")
        minFunc = stat_function("min")
        maxFunc = stat_function("max")
        aveFunc = stat_function("ave")
        sqrFunc = stat_function("sqrt")
        lenFunc = stat_function("len")
        andFunc = stat_function("and", obj=condExpr).setParseAction(self.logic_operation)
        orFunc  = stat_function("or", obj=condExpr).setParseAction(self.logic_operation)
        randFunc= stat_function("rand", empty=True)
        logFunc = stat_function("log")
        unknowFunc = words + Group(LPAR + (delimitedList(expr|cellRange) | cellRange | expr) + RPAR)

        functions = ifFunc | sumFunc | minFunc | maxFunc | aveFunc | sqrFunc | lenFunc | randFunc | \
                    andFunc| orFunc | logFunc | unknowFunc

        numericLiteral = ppc.number

        # Numeric Expressions
        operand = numericLiteral | functions | cellRange | cellRef | words
        arithExpr = infixNotation(
            operand, [
                (addOp, 1, opAssoc.RIGHT),
                (multOp, 2, opAssoc.LEFT),
                (addOp, 2, opAssoc.LEFT),
                ('^', 2, opAssoc.LEFT)],
            lpar=LPAR, rpar=RPAR
        )

        # Text Expressions
        textOperand = dblQuotedString | cellRef
        textExpr = infixNotation(textOperand, [("&", 2, opAssoc.LEFT), ])

        # Final syntax
        atom = arithExpr | textExpr | nestedExpr('(', ')')
        expr << atom
        bnf = Optional(EQ|addOp) + expr + LineEnd()
        self.bnf = bnf
コード例 #51
0
    def _string_to_ast(self, input_string):
        """ Parse a smart search string and return it in an AST like form
        """

        # simple words
        comp_word = Word(alphanums + "-./_")
        word = Word(alphanums + "-./_").setResultsName('word')
        # numbers
        comp_number = Word(nums)
        number = Word(nums).setResultsName('number')

        # IPv4 address
        ipv4_oct = Regex("((2(5[0-5]|[0-4][0-9])|[01]?[0-9][0-9]?))")
        comp_ipv4_address = Combine(ipv4_oct + ('.' + ipv4_oct * 3))
        ipv4_address = Combine(ipv4_oct +
                               ('.' +
                                ipv4_oct * 3)).setResultsName('ipv4_address')

        # IPv6 address
        ipv6_address = Regex(
            "((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:)))(%.+)?"
        ).setResultsName('ipv6_address')
        ipv6_prefix = Combine(ipv6_address +
                              Regex("/(12[0-8]|1[01][0-9]|[0-9][0-9]?)")
                              ).setResultsName('ipv6_prefix')

        # VRF RTs of the form number:number
        vrf_rt = Combine((comp_ipv4_address | comp_number) + Literal(':') +
                         comp_number).setResultsName('vrf_rt')

        # tags
        tags = Combine(Literal('#') + comp_word).setResultsName('tag')

        # operators for matching
        match_op = oneOf(' '.join(
            self.match_operators)).setResultsName('operator')
        boolean_op = oneOf(' '.join(
            self.boolean_operators)).setResultsName('boolean')
        # quoted string
        d_quoted_string = QuotedString('"', unquoteResults=True, escChar='\\')
        s_quoted_string = QuotedString('\'', unquoteResults=True, escChar='\\')
        quoted_string = (s_quoted_string
                         | d_quoted_string).setResultsName('quoted_string')
        # expression to match a certain value for an attribute
        expression = Group(word + match_op +
                           (quoted_string | vrf_rt | word
                            | number)).setResultsName('expression')
        # we work on atoms, which are single quoted strings, match expressions,
        # tags, VRF RT or simple words.
        # NOTE: Place them in order of most exact match first!
        atom = Group(ipv6_prefix | ipv6_address | quoted_string | expression
                     | tags | vrf_rt | boolean_op | word)

        enclosed = Forward()
        parens = nestedExpr('(', ')', content=enclosed)
        enclosed << (parens | atom).setResultsName('nested')

        content = Forward()
        content << (ZeroOrMore(enclosed))

        res = content.parseString(input_string)
        return res
    def define_dot_parser(self):
        """Define dot grammar

        Based on the grammar http://www.graphviz.org/doc/info/lang.html
        """
        # punctuation
        colon = Literal(":")
        lbrace = Suppress("{")
        rbrace = Suppress("}")
        lbrack = Suppress("[")
        rbrack = Suppress("]")
        lparen = Literal("(")
        rparen = Literal(")")
        equals = Suppress("=")
        comma = Literal(",")
        dot = Literal(".")
        slash = Literal("/")
        bslash = Literal("\\")
        star = Literal("*")
        semi = Suppress(";")
        at = Literal("@")
        minus = Literal("-")
        pluss = Suppress("+")

        # keywords
        strict_ = CaselessLiteral("strict")
        graph_ = CaselessLiteral("graph")
        digraph_ = CaselessLiteral("digraph")
        subgraph_ = CaselessLiteral("subgraph")
        node_ = CaselessLiteral("node")
        edge_ = CaselessLiteral("edge")

        punctuation_ = "".join([c for c in string.punctuation if c not in '_'
                                ]) + string.whitespace
        # token definitions

        identifier = Word(alphanums + "_").setName("identifier")

        # double_quoted_string = QuotedString('"', multiline=True,escChar='\\',
        #    unquoteResults=True) # dblQuotedString
        double_quoted_string = Regex(r'\"(?:\\\"|\\\\|[^"])*\"', re.MULTILINE)
        double_quoted_string.setParseAction(removeQuotes)
        quoted_string = Combine(
            double_quoted_string +
            Optional(OneOrMore(pluss + double_quoted_string)),
            adjacent=False)
        alphastring_ = OneOrMore(CharsNotIn(punctuation_))

        def parse_html(s, loc, toks):
            return '<<%s>>' % ''.join(toks[0])

        opener = '<'
        closer = '>'
        try:
            html_text = pyparsing.nestedExpr(
                opener, closer,
                ((CharsNotIn(opener + closer).setParseAction(lambda t: t[0]))
                 )).setParseAction(parse_html)
        except:
            log.debug('nestedExpr not available.')
            log.warning('Old version of pyparsing detected. Version 1.4.8 or '
                        'later is recommended. Parsing of html labels may not '
                        'work properly.')
            html_text = Combine(Literal("<<") + OneOrMore(CharsNotIn(",]")))

        float_number = Combine(Optional(minus) +
                               OneOrMore(Word(nums +
                                              "."))).setName("float_number")

        ID = (
            alphastring_ | html_text | float_number | quoted_string
            |  # .setParseAction(strip_quotes) |
            identifier).setName("ID")

        righthand_id = (float_number | ID).setName("righthand_id")

        port_angle = (at + ID).setName("port_angle")

        port_location = ((OneOrMore(Group(colon + ID))
                          | Group(colon + lparen + ID + comma + ID +
                                  rparen))).setName("port_location")

        port = Combine(
            (Group(port_location + Optional(port_angle))
             | Group(port_angle + Optional(port_location)))).setName("port")

        node_id = (ID + Optional(port))
        a_list = OneOrMore(ID + Optional(equals + righthand_id) +
                           Optional(comma.suppress())).setName("a_list")

        attr_list = OneOrMore(lbrack + Optional(a_list) + rbrack).setName(
            "attr_list").setResultsName('attrlist')

        attr_stmt = ((graph_ | node_ | edge_) + attr_list).setName("attr_stmt")

        edgeop = (Literal("--") | Literal("->")).setName("edgeop")

        stmt_list = Forward()
        graph_stmt = (lbrace + Optional(stmt_list) + rbrace +
                      Optional(semi)).setName("graph_stmt")

        edge_point = Forward()

        edgeRHS = OneOrMore(edgeop + edge_point)
        edge_stmt = edge_point + edgeRHS + Optional(attr_list)

        subgraph = (
            Optional(subgraph_, '') + Optional(ID, '') +
            Group(graph_stmt)).setName("subgraph").setResultsName('ssubgraph')

        edge_point <<= (subgraph | graph_stmt | node_id)

        node_stmt = (node_id + Optional(attr_list) +
                     Optional(semi)).setName("node_stmt")

        assignment = (ID + equals + righthand_id).setName("assignment")
        stmt = (assignment | edge_stmt | attr_stmt | subgraph | graph_stmt
                | node_stmt).setName("stmt")
        stmt_list <<= OneOrMore(stmt + Optional(semi))

        graphparser = ((Optional(strict_, 'notstrict') +
                        ((graph_ | digraph_)) + Optional(ID, '') + lbrace +
                        Group(Optional(stmt_list)) +
                        rbrace).setResultsName("graph"))

        singleLineComment = Group("//" + restOfLine) | Group("#" + restOfLine)

        # actions
        graphparser.ignore(singleLineComment)
        graphparser.ignore(cStyleComment)
        node_id.setParseAction(self._proc_node_id)
        assignment.setParseAction(self._proc_attr_assignment)
        a_list.setParseAction(self._proc_attr_list)
        edge_stmt.setParseAction(self._proc_edge_stmt)
        node_stmt.setParseAction(self._proc_node_stmt)
        attr_stmt.setParseAction(self._proc_default_attr_stmt)
        attr_list.setParseAction(self._proc_attr_list_combine)
        subgraph.setParseAction(self._proc_subgraph_stmt)
        # graph_stmt.setParseAction(self._proc_graph_stmt)
        graphparser.setParseAction(self._main_graph_stmt)
        return graphparser
コード例 #53
0
def parse_file(file_name):

    number = pp.Word(pp.nums)
    identifier = pp.Word(pp.alphas + "_", pp.alphanums + "_")

    lbrace = pp.Literal('{').suppress()
    rbrace = pp.Literal('}').suppress()
    cls = pp.Keyword('class')
    colon = pp.Literal(":")
    semi = pp.Literal(";").suppress()
    langle = pp.Literal("<")
    rangle = pp.Literal(">")
    equals = pp.Literal("=")
    comma = pp.Literal(",")
    lparen = pp.Literal("(")
    rparen = pp.Literal(")")
    lbrack = pp.Literal("[")
    rbrack = pp.Literal("]")
    mins = pp.Literal("-")
    struct = pp.Keyword('struct')
    template = pp.Keyword('template')
    final = pp.Keyword('final')("final")
    stub = pp.Keyword('stub')("stub")
    with_colon = pp.Word(pp.alphanums + "_" + ":")
    btype = with_colon
    type = pp.Forward()
    nestedParens = pp.nestedExpr('<', '>')

    tmpl = pp.Group(
        btype("template_name") + langle.suppress() +
        pp.Group(pp.delimitedList(type)) + rangle.suppress())
    type << (tmpl | btype)
    enum_lit = pp.Keyword('enum')
    enum_class = pp.Group(enum_lit + cls)
    ns = pp.Keyword("namespace")

    enum_init = equals.suppress() + pp.Optional(mins) + number
    enum_value = pp.Group(identifier + pp.Optional(enum_init))
    enum_values = pp.Group(lbrace + pp.delimitedList(enum_value) +
                           pp.Optional(comma) + rbrace)
    content = pp.Forward()

    member_name = pp.Combine(
        pp.Group(identifier + pp.Optional(lparen + rparen)))
    attrib = pp.Group(lbrack.suppress() + lbrack.suppress() + pp.SkipTo(']') +
                      rbrack.suppress() + rbrack.suppress())
    opt_attribute = pp.Optional(attrib)("attribute")
    namespace = pp.Group(
        ns("type") + identifier("name") + lbrace +
        pp.Group(pp.OneOrMore(content))("content") + rbrace)
    enum = pp.Group(
        enum_class("type") + identifier("name") + colon.suppress() +
        identifier("underline_type") + enum_values("enum_values") +
        pp.Optional(semi).suppress())
    default_value = equals.suppress() + pp.SkipTo(';')
    class_member = pp.Group(
        type("type") + member_name("name") + opt_attribute +
        pp.Optional(default_value)("default") + semi.suppress())("member")
    template_param = pp.Group(identifier("type") + identifier("name"))
    template_def = pp.Group(template + langle +
                            pp.Group(pp.delimitedList(template_param))
                            ("params") + rangle)
    class_content = pp.Forward()
    class_def = pp.Group(
        pp.Optional(template_def)("template") + (cls | struct)("type") +
        with_colon("name") + pp.Optional(final) + pp.Optional(stub) +
        opt_attribute + lbrace +
        pp.Group(pp.ZeroOrMore(class_content))("members") + rbrace +
        pp.Optional(semi))
    content << (enum | class_def | namespace)
    class_content << (enum | class_def | class_member)
    for varname in "enum class_def class_member content namespace template_def".split(
    ):
        locals()[varname].setName(varname)
    rt = pp.OneOrMore(content)
    singleLineComment = "//" + pp.restOfLine
    rt.ignore(singleLineComment)
    rt.ignore(pp.cStyleComment)
    return rt.parseFile(file_name, parseAll=True)
コード例 #54
0
class LauncherMenuModel(object):
    """Representation - model of the launcher menu configuration.

    This class contains all logic and data needed to parse and output a
    single launcher menu configuration file. The configuration is read
    from a tickle script parsed, transmuted and output to a JSON
    configuration file. During parsing a list of files is compiled of
    all the menu configuration files that this file depends on. This
    list is used for recursive parsing.
    """

    # Parser to split the TCL configuration
    # lines into an list of parameters
    expr_split = pyparsing.nestedExpr('{', '}')

    # Translation table for character replacement
    translate_table = dict((ord(char), u'') for char in u'\\\n')

    def __init__(self, dir_path, file_path, force):
        self.dir_path = dir_path
        self.file_path = file_path
        self.path = os.path.join(self.dir_path, self.file_path)

        self.force = force

        self.title = None
        self.file_choice = None

        self.menu_items = list()
        self.json_config = dict()
        self.file_list = list()

        self.line_number = 0
        self.parse()

    def parse(self):
        """Entry method to parse the tickle configuration file.

        Opens the tickle file and reads it line by line. Each line is
        parsed separately by the parse_line method.
        """
        with codecs.open(self.path, encoding='ISO-8859-1') as tickle_file:
            parse_line = ''

            for line in tickle_file:
                line = line.lstrip()
                line = line.rstrip('\n')

                # Track the current line number for logging output
                self.line_number += 1

                # Skip over empty lines
                if not line:
                    continue

                # Tickle has the option of multi-line-split
                # configuration lines using \ character
                if '\\' in line:
                    # Remove '\' character and newlines
                    line = line.translate(LauncherMenuModel.translate_table)
                    parse_line += line
                else:
                    parse_line += line
                    # Skip over comment lines
                    if line[0] != '#':
                        try:
                            self.parse_line(parse_line)
                        except:
                            print("ERR: Following line can not be parsed:")
                            print(parse_line)
                            sys.exit(-1)
                    parse_line = ''

    def parse_line(self, line):
        """Parses each line and converts it into objects.

        The data is stored into a structure of lists and dictionaries
        that can be directly output to JSON and have the same structure.
        Each line is transformed based on the first parameter in the
        line - the command.
        """
        # In order for the parser to behave properly we need
        # to add the curly brackets at the front and back of
        # the line.
        line = '{' + line + '}'

        items = LauncherMenuModel.expr_split.parseString(line).asList()[0]

        # Split parsed list into 2 lists depending on
        # function of the parameters inside
        command = items[0]
        items.pop(0)

        # Join internal lists into a string
        params = list()
        for item in items:
            if isinstance(item, list):
                params.append(self.concatenate(item))

        element = dict()

        # Configure the title of the main menu
        if command[0] == '@main-title':
            self.json_config['menu-title'] = dict()
            self.json_config['menu-title']['text'] = params[0]

            if len(params) > 0:
                print(('Inf: Skipping additional parameters in '
                       'file "%s", line line %d') \
                    % (self.file_path, self.line_number))

        # Add the file choice element to the configuration list
        elif command[0] == '@FileChoice':
            file_choice = list()
            file_choice.append(
                dict([('text', params[0]), ('file', command[1] + '.json')]))
            self.json_config['file-choice'] = file_choice

            if len(params) > 1:
                print(('Inf: Skipping additional parameters in '
                       'file "%s", line line %d') \
                    % (self.file_path, self.line_number))

        # The command dictates that a separator is added
        elif command[0] == '@separator':
            element['type'] = 'separator'

            if len(params) > 0:
                print(('Inf: Skipping additional parameters in '
                       'file "%s", line line %d') \
                    % (self.file_path, self.line_number))

        # The commands translates into the title element
        elif command[0] == '@title':
            element['type'] = 'title'
            element['text'] = params[0]

            if len(params) > 1:
                print(('Inf: Skipping additional parameters in '
                       'file "%s", line line %d') \
                    % (self.file_path, self.line_number))

        # The command loads a new menu from another file
        elif command[0] == '>launcher':
            filepath = os.path.join(self.dir_path, command[1] + '.config')

            # Don't add the menu if the file does not exist
            if not os.path.isfile(filepath):
                if self.force:
                    print('Wrn: File "%s" does not exist. Skipping...' % \
                        filepath)
                    return
                else:
                    print('Err: File "%s" does not exist.' % filepath)
                    sys.exit(-1)

            element['type'] = 'menu'
            element['text'] = params[0]
            element['file'] = command[1] + '.json'

            if len(params) > 1:
                print(('Inf: Skipping additional parameters in '
                       'file "%s", line line %d') \
                    % (self.file_path, self.line_number))

            # Track all additional files that need to be parsed
            self.file_list.append(command[1] + '.config')

        # Skip over lines where the command starts with a hash (comment)
        elif command[0].startswith('#'):
            print('Inf: Skipping line %d in file "%s" - comment' \
                % (self.line_number, self.file_path))

        # If nothing else this is a command
        else:
            cmd_text = self.concatenate(command)
            # Replace tabulators with spaces
            cmd_text = cmd_text.replace('\t', ' ')

            element['type'] = 'cmd'
            element['text'] = params[0].replace('"', r'\"')
            element['command'] = cmd_text

        # Add the element dictionary to the list of menu items
        if len(element) > 0:
            # Check if one of the parameters is a help link
            html_help = self.get_html_help(params[1:])
            if html_help:
                element['help-link'] = html_help

            self.menu_items.append(element)

    def get_html_help(self, parameters):
        """Checks the parameter if it is a link to a web page.

        The function checks if one of the parameters is a link to a page
        or html file that will be used as the help link text. If none of
        the paramters match the criteria, None is returned.
        """
        for param in parameters:
            part = param.split()[0]
            if (part == 'obj:' or part == 'fltr:' or part == 'lvl:'):
                continue
            else:
                part = os.path.splitext(param)
                if (len(part) > 1 and (part[1] == '.html' or part[1] == '.php3'
                                       or part[1] == '.php')):
                    return param

        return None

    def concatenate(self, item_list, level=0):
        """Concatenates a list of string and list items into a string.

        If the list contains sub-lists they are recursively merged until
        we are left with a single string. The item_list parameter should
        be a list that contains string or list elements only. Each
        embedded list is marked in the final string with curly braces.
        """
        new_item_list = list()
        for item in item_list:
            if isinstance(item, list):
                new_item_list.append(self.concatenate(item, level + 1))
            else:
                new_item_list.append(item)

        if level > 0:
            return '{%s}' % ' '.join(new_item_list)
        else:
            return ' '.join(new_item_list)

    def to_json(self, out_path, overwrite=False):
        """Mehod to output internal data into the JSON file.

        This method outputs the parsed configuration data into the JSON
        file. If the file already exists the user is asked if it should
        be overwritten or not. The overwrite flag parameter specifies
        if the files should be overwritten without asking the user.
        """
        split = os.path.splitext(self.file_path)
        if not split[1]:
            print('Err: Unable to parse extension from file name: %s' \
                % self.file_path)
            return

        out_file = os.path.join(out_path, split[0] + '.json')
        print('Inf: Writing file: %s' % out_file)

        if os.path.isdir(out_file):
            print('Err: Output file "%s" is a directory!' % out_file)
            return

        if os.path.isfile(out_file):
            if not overwrite:
                print('Wrn: Output file "%s" already exists!' \
                    % out_file)

                user_input = ''
                while True:
                    user_input = raw_input('Overwrite? [y/N]:')
                    if user_input == 'y' or user_input == 'Y':
                        break
                    elif (user_input == 'n' or user_input == 'N'
                          or not user_input):
                        return

        # Set the item list to the menu key in the top dictionary
        self.json_config['menu'] = self.menu_items

        with codecs.open(out_file, mode='w', encoding='utf-8') \
                as output_file:

            json.dump(self.json_config, output_file, indent=4)
            output_file.close()

    def get_file_list(self):
        """Method to get the list of menu files that this menu
        depends on."""
        return self.file_list
コード例 #55
0
ファイル: legacy_parser.py プロジェクト: chokribr/invenio-1
 def parse_element(cls, indent_stack):
     return (Keyword("@legacy").suppress() +
             originalTextFor(nestedExpr("(", ")"))).setResultsName(
                 "legacy", listAllMatches=True)
コード例 #56
0
#misc
number = Word(nums)
integer = Combine(Optional(plusorminus) + number)
real = Combine(integer + Optional(point + Optional(number)) +
               Optional(e + integer))
numarg = (real | integer)
identifier = Word(alphas + "_", alphanums + "_")
dotidentifier = Word(alphas, alphanums + "_" + ".")
bracketidentifier = identifier + lbracket + Word(alphas) + rbracket
statement = Group(identifier + equal + (quotedString | restOfLine))

#math
mathElements = (numarg | ',' | '+' | '-' | '*' | '/' | '^' | '&' | '>' | '<'
                | '=' | '|' | identifier)
nestedMathDefinition = nestedExpr('(', ')', content=mathElements)
mathDefinition = OneOrMore(mathElements)

section_enclosure2_ = nestedExpr('{', '}')

section_enclosure_ = Forward()
nestedBrackets = nestedExpr('[', ']', content=section_enclosure_)
nestedCurlies = nestedExpr('{', '}', content=section_enclosure_)
section_enclosure_ << (
    statement | Group(identifier + ZeroOrMore(identifier) + nestedCurlies)
    | Group(identifier + '@' + restOfLine) | Word(alphas, alphanums + "_[]")
    | identifier | Suppress(',') | '@' | real)

function_entry_ = Suppress(dbquotes) + Group(
    identifier.setResultsName('functionName') + Suppress(lparen) +
    delimitedList(Group(
コード例 #57
0
fg_not = pp.Literal(NOT).setResultsName('not')
fg_word = pp.Word(pp.alphanums + '*/{}+-\'').setResultsName('word')
fg_binop = pp.oneOf(BINOPS).setResultsName('binop')
fg_unop = pp.oneOf(list(UNOPS.keys())).setResultsName('unop')
fg_regex = pp.Or([
    pp.QuotedString("r'", endQuoteChar="'", escChar='\\'),
    pp.QuotedString('r"', endQuoteChar='"', escChar='\\')
]).setResultsName('regex')
fg_literal = pp.Or(
    [pp.QuotedString("'", escChar='\\'),
     pp.QuotedString('"', escChar='\\')]).setResultsName('literal')
fg_atom = pp.Optional(fg_unop) + (fg_regex ^ fg_literal ^ fg_word)
fg_expr_part = pp.ZeroOrMore(
    pp.Group(fg_not)) + pp.Group(fg_atom) + pp.Optional(pp.Group(fg_binop))
fg_expr = pp.OneOrMore(fg_expr_part)
fg_nested = pp.nestedExpr(content=fg_expr)
fg_syntax_part = pp.ZeroOrMore(pp.Group(fg_not)) + (
    pp.Group(fg_atom) ^ fg_nested) + pp.Optional(pp.Group(fg_binop))
fg_syntax = pp.OneOrMore(fg_syntax_part)


def _tokenise_filter_string(fstr):
    ftokens = None
    error = None
    try:
        # ftokens = fg_expr.parseString(fstr)
        ftokens = fg_syntax.parseString(fstr)
    except (pp.ParseException, pp.ParseFatalException):
        error = 'has-error'
    return ftokens, error
コード例 #58
0
ファイル: block.py プロジェクト: pingleewu/ShareRoot
"""
    CSS blocks
"""

from pyparsing import nestedExpr

block = nestedExpr(opener="{", closer="}")

コード例 #59
0
class OptsSpec(Parser):
    """
    An OptsSpec is a string specification that describes an
    OptionTree. It is a list of tree path specifications (using dotted
    syntax) separated by keyword lists for any of the style, plotting
    or normalization options. These keyword lists are denoted
    'plot(..)', 'style(...)' and 'norm(...)'  respectively.  These
    three groups may be specified even more concisely using keyword
    lists delimited by square brackets, parentheses and braces
    respectively.  All these sets are optional and may be supplied in
    any order.

    For instance, the following string:

    Image (interpolation=None) plot(show_title=False) Curve style(color='r')

    Would specify an OptionTree where Image has "interpolation=None"
    for style and 'show_title=False' for plot options. The Curve has a
    style set such that color='r'.

    The parser is fairly forgiving; commas between keywords are
    optional and additional spaces are often allowed. The only
    restriction is that keywords *must* be immediately followed by the
    '=' sign (no space).
    """

    plot_options_short = pp.nestedExpr('[',
                                       ']',
                                       content=pp.OneOrMore(pp.Word(allowed) ^ pp.quotedString)
                                   ).setResultsName('plot_options')

    plot_options_long = pp.nestedExpr(opener='plot[',
                                      closer=']',
                                      content=pp.OneOrMore(pp.Word(allowed) ^ pp.quotedString)
                                  ).setResultsName('plot_options')

    plot_options = (plot_options_short | plot_options_long)

    style_options_short = pp.nestedExpr(opener='(',
                                        closer=')',
                                        ignoreExpr=None
                                    ).setResultsName("style_options")

    style_options_long = pp.nestedExpr(opener='style(',
                                       closer=')',
                                       ignoreExpr=None
                                   ).setResultsName("style_options")

    style_options = (style_options_short | style_options_long)


    norm_options_short = pp.nestedExpr(opener='{',
                                       closer='}',
                                       ignoreExpr=None
                                   ).setResultsName("norm_options")

    norm_options_long = pp.nestedExpr(opener='norm{',
                                      closer='}',
                                      ignoreExpr=None
                                  ).setResultsName("norm_options")

    norm_options = (norm_options_short | norm_options_long)

    compositor_ops = pp.MatchFirst(
        [pp.Literal(el.group) for el in Compositor.definitions if el.group])

    dotted_path = pp.Combine( pp.Word(ascii_uppercase, exact=1)
                              + pp.Word(pp.alphanums+'._'))


    pathspec = (dotted_path | compositor_ops).setResultsName("pathspec")


    spec_group = pp.Group(pathspec +
                          (pp.Optional(norm_options)
                           & pp.Optional(plot_options)
                           & pp.Optional(style_options)))

    opts_spec = pp.OneOrMore(spec_group)

    # Aliases that map to the current option name for backward compatibility
    aliases = {'horizontal_spacing':'hspace',
               'vertical_spacing':  'vspace',
               'figure_alpha':'    fig_alpha',
               'figure_bounds':   'fig_bounds',
               'figure_inches':   'fig_inches',
               'figure_latex':    'fig_latex',
               'figure_rcparams': 'fig_rcparams',
               'figure_size':     'fig_size',
               'show_xaxis':      'xaxis',
               'show_yaxis':      'yaxis'}

    deprecations = [('GridImage', 'Image')]

    @classmethod
    def process_normalization(cls, parse_group):
        """
        Given a normalization parse group (i.e. the contents of the
        braces), validate the option list and compute the appropriate
        integer value for the normalization plotting option.
        """
        if ('norm_options' not in parse_group): return None
        opts = parse_group['norm_options'][0].asList()
        if opts == []: return None

        options = ['+framewise', '-framewise', '+axiswise', '-axiswise']

        for normopt in options:
            if opts.count(normopt) > 1:
                raise SyntaxError("Normalization specification must not"
                                  " contain repeated %r" % normopt)

        if not all(opt in options for opt in opts):
            raise SyntaxError("Normalization option not one of %s"
                              % ", ".join(options))
        excluded = [('+framewise', '-framewise'), ('+axiswise', '-axiswise')]
        for pair in excluded:
            if all(exclude in opts for exclude in pair):
                raise SyntaxError("Normalization specification cannot"
                                  " contain both %s and %s" % (pair[0], pair[1]))

        # If unspecified, default is -axiswise and -framewise
        if len(opts) == 1 and opts[0].endswith('framewise'):
            axiswise = False
            framewise = True if '+framewise' in opts else False
        elif len(opts) == 1 and opts[0].endswith('axiswise'):
            framewise = False
            axiswise = True if '+axiswise' in opts else False
        else:
            axiswise = True if '+axiswise' in opts else False
            framewise = True if '+framewise' in opts else False

        return dict(axiswise=axiswise,
                    framewise=framewise)


    @classmethod
    def _group_paths_without_options(cls, line_parse_result):
        """
        Given a parsed options specification as a list of groups, combine
        groups without options with the first subsequent group which has
        options.
        A line of the form
            'A B C [opts] D E [opts_2]'
        results in
            [({A, B, C}, [opts]), ({D, E}, [opts_2])]
        """
        active_pathspecs = set()
        for group in line_parse_result:
            active_pathspecs.add(group['pathspec'])

            has_options = (
                'norm_options' in group or
                'plot_options' in group or
                'style_options' in group
            )
            if has_options:
                yield active_pathspecs, group
                active_pathspecs = set()

        if active_pathspecs:
            yield active_pathspecs, {}




    @classmethod
    def apply_deprecations(cls, path):
        "Convert any potentially deprecated paths and issue appropriate warnings"
        split = path.split('.')
        msg = 'Element {old} deprecated. Use {new} instead.'
        for old, new in cls.deprecations:
            if split[0] == old:
                parsewarning.warning(msg.format(old=old, new=new))
                return '.'.join([new] + split[1:])
        return path


    @classmethod
    def parse(cls, line, ns={}):
        """
        Parse an options specification, returning a dictionary with
        path keys and {'plot':<options>, 'style':<options>} values.
        """
        parses  = [p for p in cls.opts_spec.scanString(line)]
        if len(parses) != 1:
            raise SyntaxError("Invalid specification syntax.")
        else:
            e = parses[0][2]
            processed = line[:e]
            if (processed.strip() != line.strip()):
                raise SyntaxError("Failed to parse remainder of string: %r" % line[e:])

        grouped_paths = cls._group_paths_without_options(cls.opts_spec.parseString(line))
        parse = {}
        for pathspecs, group in grouped_paths:
            options = {}

            normalization = cls.process_normalization(group)
            if normalization is not None:
                options['norm'] = normalization

            if 'plot_options' in group:
                plotopts =  group['plot_options'][0]
                opts = cls.todict(plotopts, 'brackets', ns=ns)
                options['plot'] = {cls.aliases.get(k,k):v for k,v in opts.items()}

            if 'style_options' in group:
                styleopts = group['style_options'][0]
                opts = cls.todict(styleopts, 'parens', ns=ns)
                options['style'] = {cls.aliases.get(k,k):v for k,v in opts.items()}

            for pathspec in pathspecs:
                parse[pathspec] = merge_option_dicts(parse.get(pathspec, {}), options)

        return {
            cls.apply_deprecations(path): {
                option_type: Options(**option_pairs)
                for option_type, option_pairs in options.items()
            }
            for path, options in parse.items()
        }

    @classmethod
    def parse_options(cls, line, ns={}):
        """
        Similar to parse but returns a list of Options objects instead
        of the dictionary format.
        """
        parsed = cls.parse(line, ns=ns)
        options_list = []
        for spec in sorted(parsed.keys()):
            options = parsed[spec]
            merged = {}
            for group in options.values():
                merged = dict(group.kwargs, **merged)
            options_list.append(Options(spec, **merged))
        return options_list
コード例 #60
0
    (5, "6j", "certain", "purple     ", "sat",
     (180.0, 141.633, 244.1, 66.056, 71.667, 122.167, 182.2, 83.622, 180.0)),
)


def buildBin(data):
    name = data[0]
    clusters = []
    for item in data[1:]:
        c = suitenamedefs.Cluster(*item)
        clusters.append(c)
    bin = Bin(name, clusters)
    return bin


out = nestedExpr('{', '}').parseString(txt).asList()
out2 = commentize(out)
print(out2)
convert(out2)


# bin8 data is a sample of the output of convert
def formatIntegers(list):
    texts = [f'{n:2}' for n in list]
    text = ', '.join(texts)
    return text


def satelliteConvert(data):
    for k in range(0, 180, 20):
        name = data[k]