예제 #1
0
파일: Slot.py 프로젝트: aleSuglia/YAIEP
    def __init__(self, params):
        self.name = ""
        self.type = None
        self.default_value = None
        self.range = None

        if isinstance(params, list):
            self.name = params[0]
            for attr in params[1:]:
                if attr[0] == 'default':
                    self.default_value = attr[1]
                elif attr[0] == 'type':
                    assert attr[1] == 'string' or attr[1] == 'integer'
                    self.type = attr[1]
                elif attr[0] == 'range':
                    self.range = [attr[1], attr[2]]
                    if int(self.range[0]) >= int(self.range[1]):
                        raise ValueError('Invalid range')

            if not self.default_value is None:
                try:
                    # consistenza di tipo sui valori dello slot corrente
                    parser = Word(alphas) if self.type == 'string' else Word(nums)
                    parser.parseString(self.default_value)
                    # se slot corrente ha un range definito
                    if not self.range is None:
                        spec_range = '{0} >= {1} and {0} <= {2}'.format(self.default_value, self.range[0], self.range[1])
                        if not eval(spec_range):
                            raise ValueError('Default value doesn\'t satisfy range constraints')
                except (ParseException, SyntaxError):
                    raise ValueError('Incorrect default value for slot')
        else:
            self.name = params
예제 #2
0
파일: Slot.py 프로젝트: aleSuglia/YAIEP
 def check_slot_value(self, slot_value):
     try:
         if not self.type is None:
             parser = Word(alphas) if self.type == 'string' else Word(nums)
             parser.parseString(slot_value)
         if not self.range is None and self.type != 'string':
             spec_range = '{0} >= {1} and {0} <= {2}'.format(slot_value, self.range[0], self.range[1])
             if not eval(spec_range):
                 return False
         return True
     except (ParseException, SyntaxError):
         return False
예제 #3
0
 def check_slot_value(self, slot_value):
     try:
         if not self.type is None:
             parser = Word(alphas) if self.type == 'string' else Word(nums)
             parser.parseString(slot_value)
         if not self.range is None and self.type != 'string':
             spec_range = '{0} >= {1} and {0} <= {2}'.format(
                 slot_value, self.range[0], self.range[1])
             if not eval(spec_range):
                 return False
         return True
     except (ParseException, SyntaxError):
         return False
예제 #4
0
파일: pyC14.py 프로젝트: AlephThot/pyC14
 def set_param(self,line):
     # define grammar
     a = line.find("]")
     b = line.find("=")
     if(a>0 and b>0 and (b-a)==1):
         return
     else:
         modele = Word( alphas ) + "[" + Word(nums) + "]" + Word(objectPath) + "=" + Word( divers )
         try:
             pd = modele.parseString( line )
         except ParseException as pe:
             pass
         else:
             obj = pd[0]
             key = pd[4]
             value = pd[6][:len(pd[6])-1]
             nb = int(pd[2])
             if(key[0]=="."):
                 key = key[1:]                           #expect ".keyword"
                 if(key.find(".")<0):                    #a single keyword
                     if(key in ("ref")):
                         setattr(self,key,set_str(value))
                         #print("->  ocd[{id}].{key}={value}".format(id=self.id,key=key,value=value))
                     elif(key in ("start","resolution")):
                         setattr(self,key,float(value))
                         #print("->  ocd[{id}].{key}={value}".format(id=self.id,key=key,value=value))
                     elif(key in ("bp","sigma")):
                         setattr(self,key,np.array([float(x) for x in commaSeparatedList.parseString(set_str(value))]))
예제 #5
0
파일: pyC14.py 프로젝트: AlephThot/pyC14
 def set_param(self,line):
     # define grammar
     a = line.find("]")
     b = line.find("=")
     if(a>0 and b>0 and (b-a)==1):
         return
     else:
         modele = Word( alphas ) + "[" + Word(nums) + "]" + Word(objectPath) + "=" + Word( divers )
         try:
             pd = modele.parseString( line )
         except ParseException as pe:
             pass
         else:
             obj = pd[0]
             key = pd[4]
             value = pd[6][:len(pd[6])-1]
             nb = int(pd[2])
             if(key[0]=="."):
                 key = key[1:]                           #expect ".keyword"
                 if(key.find(".")<0):                    #a single keyword
                     if(key in ocd_str_param):
                         setattr(self,key,set_str(value))
                         #print("->  ocd[{id}].{key}={value}".format(id=self.id,key=key,value=value))
                     elif(key in ocd_int_param):
                         setattr(self,key,int(value))
                         #print("->  ocd[{id}].{key}={value}".format(id=self.id,key=key,value=value))
                 else:
                     keywords = key.split(".")
                     if(keywords[0]=="likelihood"):
                         self.likelihood.set_param(keywords,value)
                     elif(keywords[0]=="posterior"):
                         self.posterior.set_param(keywords,value)
예제 #6
0
def read_stats(result_dir, stats_file_name):
    stat_rule = Word(printables) + Word('nan.%' + nums) + Optional(restOfLine)

    stats = []

    try:
        with open(path.join(result_dir, stats_file_name)) as stats_file:
            i = 0
            for stat_line in stats_file:
                if len(stats) <= i:
                    stats.append(collections.OrderedDict())

                try:
                    stat = stat_rule.parseString(stat_line)
                    key = stat[0]
                    value = stat[1]

                    stats[i][key] = value
                except ParseException as e:
                    # print(e)
                    pass

                if 'End Simulation Statistics' in stat_line:
                    i += 1
    except Exception as e:
        print(e)
        return None
    else:
        return stats
예제 #7
0
파일: gem5_utils.py 프로젝트: zbw233/BJUT
def read_stats(result_dir, stats_file_name):
    stat_rule = Word(printables) + Word('nan.%' + nums) + Optional(restOfLine)

    stats = []

    try:
        with open(path.join(result_dir, stats_file_name)) as stats_file:
            i = 0
            for stat_line in stats_file:
                if len(stats) <= i:
                    stats.append(collections.OrderedDict())

                try:
                    stat = stat_rule.parseString(stat_line)
                    key = stat[0]
                    value = stat[1]

                    stats[i][key] = value
                except ParseException as e:
                    # print(e)
                    pass

                if 'End Simulation Statistics' in stat_line:
                    i += 1
    except Exception as e:
        print(e)
        return None
    else:
        return stats
예제 #8
0
    def open(self, id):
        self.id = id
        item = 'BODY'
        typ, data = self.imap.fetch(id, '(' + item + ')')
        assert typ == 'OK'

        inline_parse = Forward()
        inline_parse << Suppress("(") + Group(OneOrMore(Or([Keyword("NIL"), QuotedString('"'), Word(nums), inline_parse ]))) + Suppress(")")
        parse = Word(nums) + Suppress("(") + Keyword(item) + Suppress("(") + Group(OneOrMore(inline_parse)) + ZeroOrMore(Or([Keyword("NIL"), QuotedString('"'), Word(nums)])) +  Suppress(")") + Suppress(")")
        p = parse.parseString(data[0])

        #print data[0]
        #print p
        #print

        self.attachment = []
        for i in p[2]:
            #while 'NIL' in i:
            #    i.remove('NIL')

            a = {
                'type'          : '/'.join(i[0:2]).lower(),
                i[2][0].lower() : i[2][1],
            }
                
            self.attachment.append(a)
예제 #9
0
    def read_stats(self):
        stat_rule = Word(printables) + Word('nan.%' +
                                            nums) + Optional(restOfLine)

        stats = collections.OrderedDict()

        try:
            with open(self.stats_file_name()) as stats_file:
                i = 0
                for stat_line in stats_file:
                    if 'End Simulation Statistics' in stat_line:
                        i += 1
                    elif i == self.section_num_to_use:
                        try:
                            stat = stat_rule.parseString(stat_line)
                            key = stat[0]
                            value = stat[1]
                            stats[key] = value
                        except ParseException:
                            pass

            return stats
        except:
            traceback.print_exc()
            return None
def get_modules_footprint(device: Device) -> Dict:
    """Return dictionary of modules and their provisioned resources."""
    provisinon_lines = device.ssh.run(
        "tmsh show sys provision").stdout.splitlines()[5:-1]
    expr = Word(alphas) + Word(nums) * 4
    output_dict = dict()
    for line in provisinon_lines:
        parsed_output = expr.parseString(line)
        output_dict[parsed_output[0]] = {
            "cpu": {
                "value": int(parsed_output[1]),
                "unit": "%"
            },
            "memory": {
                "value": int(parsed_output[2]),
                "unit": "MB"
            },
            "host-memory": {
                "value": int(parsed_output[3]),
                "unit": "MB"
            },
            "disk": {
                "value": int(parsed_output[4]),
                "unit": "MB"
            },
        }
    return output_dict
예제 #11
0
파일: io.py 프로젝트: izquierdo/kr
def load_variables(filename):
		"""Load random variables definitions from file (in C45 format but with class at the end).
		File must contain information in format 'Variable Name: Values.' as in the example below:
		A: true,false.
		B: 0,1,2.
		C: c1,c2,c3,c4.
		D: one.
		"""
		from DataStructures.randomvariables import RandomVariable
		RV = []
#		variable = Word(caps + lowers + digits).setResultsName("name") + ": " + OneOrMore(Group(Word(caps + lowers + digits) + Optional("." + Word(caps + lowers + digits))) + Suppress(Optional(","))).setResultsName("domain") + "."

		variable = Word(caps + lowers + digits).setResultsName("name") + ": " + OneOrMore(Word(caps + lowers + digits + ".") + Optional(Suppress(","))).setResultsName("domain")  
		for line in file(filename):
			if not line[0] == '#':
				dataline = line[0:(len(line)-2)]
				#print dataline
				rv = variable.parseString(dataline)
				#print rv.name
				domain = []
				for value in rv.domain:
					#print value,
					value = ''.join(value)
					if value.isdigit():
						#print 'lv#', value
						domain.append(int(value))
					else:
						domain.append(value)
				#print
				var = RandomVariable(rv.name,domain)
				RV.append(var)
		return RV
 def testParsing(self):
     ''' test py parsing library Word element '''
     greet = Word(alphas) + "," + Word(alphas) + "!"
     hello = "Hello, World!"
     parseString = greet.parseString(hello)
     self.assertEqual(4, len(parseString))
     print(hello, "->", parseString)
예제 #13
0
 def parse_level(self):
     parse_level = Word(alphas) + Suppress('level ') + restOfLine
     value = parse_level.parseString(self.storm_line).asList()
     if 'level' in self.dct:
         self.dct['level'].append(value)
     else:
         self.dct['level'] = [value]
예제 #14
0
 def parse(operator, string, digits=None):
     """Parse a valid interval from strings like '1800-1900' or '12'. If operator is one of 
     ('<=', '>=', '>', '<'), only a single number will be parsed and an open interval will be constructed.
     If the optional paramater *digits* is specified, only numbers having exactly this number of digits
     will be considered.
     """
     string = string.strip()
     if digits is not None:
         number = Word(pyparsing.nums, exact=digits)
     else: number = Word(pyparsing.nums)
     try:
         if operator not in ('<=', '>=', '>', '<'):
             # the ^ means xor. | does not work together with parseAll=True (bug in pyparsing?)
             parser = number ^ (number + Suppress('-') + number)
             result = parser.parseString(string, parseAll=True).asList()
             result = [int(r) for r in result]
             if len(result) == 1:
                 return Interval(result[0], result[0])
             else: return Interval(result[0], result[1])
         else:
             result = number.parseString(string)
             date = int(result[0])
             if operator == '>=':
                 return Interval(date, None)
             elif operator == '<=':
                 return Interval(None, date)
             elif operator == '>':
                 return Interval(date+1, None)
             elif operator == '<':
                 return Interval(None, date-1)
             else: assert False
     except pyparsing.ParseException:
         return None
예제 #15
0
def convert_to_xyz(file_dir):

    # converts .txt/.pdb into ordered .xyz files, will be a function for functions.py to call at beginning of viewer.
    file_list = os.listdir(file_dir)

    # list all different file extensions
    xyz = [i for i in file_list if os.path.splitext(i)[1] == '.xyz']
    clt = [i for i in file_list if os.path.splitext(i)[1] == '.clt']
    pdb = [i for i in file_list if os.path.splitext(i)[1] == '.pdb']
    txt = [i for i in file_list if os.path.splitext(i)[1] == '.txt']

    # convert generic txt's to xyz's (may not work for all txt's)
    for elem in range(len(txt)):

        # iterate over each txt file
        file = os.path.join(file_dir, txt[elem])

        # molecule name
        molecule_name = os.path.splitext(txt[elem])[0]

        # open file
        file_object = open(file, 'r')
        lines = file_object.readlines()

        # list to contain extracted information
        full_data_set = []

        # parse lines for data extraction
        for line in lines:

            try:
                # define how to read floats
                float_definition = Regex(r'[+-]?\d+\.\d*')

                # parse for the element and 3 spatial coords
                parse_float = Word(alphas) + float_definition + float_definition + float_definition
                parsed_line = parse_float.parseString(line)
                list_conversion = list(parsed_line)
                full_data_set.append(list_conversion)

            except Exception:
                print('invalid data line')

        # make newfile and prepare to write
        new_file_name = molecule_name + '.xyz'
        new_file = open(new_file_name, 'w')
        new_file.write(str(len(full_data_set)) + '\n' + molecule_name + '\n' + '\n' + '\n')

        # add atomic coords and elements
        for i in full_data_set:
            new_file.write(i[0] + '     ' + i[1] + '     ' + i[2] + '     ' + i[3])
            new_file.write('\n')

    return
예제 #16
0
파일: OMCEanalyser.py 프로젝트: hfr/OMCE
 def ValidateName(self,Nn,T):
     from pyparsing import Word,alphas,nums,ParseException
     G=Word(alphas+"_", alphas+nums+"_")
     G.setDebug(False)
     try:
         ps=G.parseString(Nn, parseAll=True)
         Nn=ps[0]
     except ParseException,PE:
         self.PRINT(PE.line)
         self.PRINT(" "*(PE.column-1) + "^")
         self.ERROR(64,T,PE)
예제 #17
0
def pars_data():            #Парсинг файла с данными
    f = open("test.txt" ,'r')
    all_lines = f.readlines()
    last_line = []
    last_line.append(all_lines[-1])
    for item in last_line:
        item += item
    temperatura = (Word(nums + '+.') | Word(nums + '-.'))
    comma = (Literal(",") | Literal(";")).suppress()
    tem = (temperatura + comma)*5
    full_name_temperatura = Word(nums + '.').suppress() + Word(nums + ':').suppress() + tem # парсинг температуры
    vremenno_temp= full_name_temperatura.parseString(item)                                  # парсинг температуры
    full_name_date = Word(nums + '.')                                   # парсинг даты
    vremenno_date = full_name_date.parseString(item)                    # парсинг даты
    data_date = str(vremenno_date)                                      # парсинг даты
    data_temperatura = []                                                                   # парсинг температуры
    for item in vremenno_temp:                                                              # парсинг температуры
        item = float(item)                                                                  # парсинг температуры
        data_temperatura.append(item)                                                       # парсинг температуры
    f.close
    return data_temperatura
예제 #18
0
파일: io.py 프로젝트: izquierdo/kr
def load_c45_header(filename):
		"""Load random variables definitions from file (in C45 format).
		File must contain information in format 'Variable Name: Values.' as in the example below:
		0,1.
		A: true,false.
		B: 0,1,2.
		C: c1,c2,c3,c4.
		D: one.
		The first line is related to the class object (expressed in last position at the output header)
		"""
		from DataStructures.randomvariables import RandomVariable
		RV = []

		cvariable = OneOrMore(Word(caps + lowers + digits + ".") + Optional(Suppress(","))).setResultsName("domain")  
		variable = Word(caps + lowers + digits).setResultsName("name") + ": " + OneOrMore(Word(caps + lowers + digits + ".") + Optional(Suppress(","))).setResultsName("domain")  
		class_variable = None
		for line in file(filename):
			if not line[0] == '#' and len(line) > 1:
				if class_variable is None:
					dataline = line[0:(len(line)-2)]
					#print dataline
					rv = cvariable.parseString(dataline)
					domain = []
					for value in rv.domain:
						#print value,
						value = ''.join(value)
						if value.isdigit():
							#print 'lv#', value
							domain.append(int(value))
						else:
							domain.append(value)
					#print
					class_variable = RandomVariable('class',domain)
				else:	
					dataline = line[0:(len(line)-2)]
					#print dataline
					rv = variable.parseString(dataline)
					#print rv.name
					domain = []
					for value in rv.domain:
						#print value,
						value = ''.join(value)
						if value.isdigit():
							#print 'lv#', value
							domain.append(int(value))
						else:
							domain.append(value)
					#print
					var = RandomVariable(rv.name,domain)
					RV.append(var)
		RV.append(class_variable)
		return RV
예제 #19
0
def part2():

    policy = Word(nums).setParseAction(fn) + Suppress(Literal(
        '-')) + Word(nums).setParseAction(fn) + Word(alphas) + Suppress(
            Literal(':')) + Word(alphas)

    valid = 0

    with open("../input/2020/day2.txt", 'r') as f:
        for line in f:
            if is_valid_v2(policy.parseString(line)):
                valid += 1

    print(f"Day 2 Part 2: {valid}")
예제 #20
0
    def __init__(self, params):
        self.name = ""
        self.type = None
        self.default_value = None
        self.range = None

        if isinstance(params, list):
            self.name = params[0]
            for attr in params[1:]:
                if attr[0] == 'default':
                    self.default_value = attr[1]
                elif attr[0] == 'type':
                    assert attr[1] == 'string' or attr[1] == 'integer'
                    self.type = attr[1]
                elif attr[0] == 'range':
                    self.range = [attr[1], attr[2]]
                    if int(self.range[0]) >= int(self.range[1]):
                        raise ValueError('Invalid range')

            if not self.default_value is None:
                try:
                    # consistenza di tipo sui valori dello slot corrente
                    parser = Word(alphas) if self.type == 'string' else Word(
                        nums)
                    parser.parseString(self.default_value)
                    # se slot corrente ha un range definito
                    if not self.range is None:
                        spec_range = '{0} >= {1} and {0} <= {2}'.format(
                            self.default_value, self.range[0], self.range[1])
                        if not eval(spec_range):
                            raise ValueError(
                                'Default value doesn\'t satisfy range constraints'
                            )
                except (ParseException, SyntaxError):
                    raise ValueError('Incorrect default value for slot')
        else:
            self.name = params
예제 #21
0
def check_location(symbol, expected):
    pattern = Word(alphanums + '._').setResultsName('actual') + Word(
        hexnums) + Literal(symbol) + LineEnd()
    pattern = SkipTo(pattern) + pattern

    try:
        results = pattern.parseString(contents)
    except ParseException:
        print("check placement fail: '%s' was not found" % (symbol))
        exit(1)

    if results.actual != expected:
        print("check placement fail: '%s' was placed in '%s', not in '%s'" %
              (symbol, results.actual, expected))
        exit(1)

    print("check placement pass: '******' was successfully placed in '%s'" %
          (symbol, results.actual))
예제 #22
0
파일: io.py 프로젝트: izquierdo/kr
def load_graph(filename):
	"""Load a graph from file into an adjacency map.
		input: input filename
		output: graph as adjancency map G
		
		File must contain graph as an adjancency list such as
		A: B,C
		B: C
		C: .
		Ending vertices should also be represented explicitly as linking to a dot
	"""
	G = {}
	node = Word(caps + lowers + digits).setResultsName("node") + ": " + OneOrMore(Word(caps + lowers + digits + empty) + Suppress(Optional(","))).setResultsName("edges")

	for line in file(filename):
		graph = node.parseString(line)
		G[graph.node] = graph.edges
		
	return G
예제 #23
0
def parseTypes(path):
    msgs = set()
    types = {}

    for line in lineGen(path):
        number = Word(nums)
        word = Word(alphanums + "-_")

        wordList = Forward()
        wordList = word + ZeroOrMore(',' + word)

        par = (Literal('NetworkPartition').setResultsName('type') +\
          '(' + Literal('Set') + '(' +\
          wordList.setResultsName('p1') + ')' + ',' +\
          Literal('Set') + '(' +\
          wordList.setResultsName('p2') + ')' + \
          ')')

        subType = (word + Optional(nestedExpr('(', ')'))).setResultsName('msg')
        msg = (Literal('MsgEvent').setResultsName('type') +\
           '(' + word.setResultsName('src') + ',' +\
           word.setResultsName('dst') + ',' +\
           subType  + ')')

        event = Word( nums ) +\
          Literal('Unique') + "(" + (msg | par) + ',' +\
          number.setResultsName('uid')  + ')'

        result = event.parseString(line)

        key = result.uid
        if result.type == 'MsgEvent':
            msg = list2tuple(result.msg.asList())
            value = (result.type, result.src, result.dst, msg)
            msgs.add(msg)
        elif result.type == 'NetworkPartition':
            value = (result.type, result.p1, result.p2)

        types[key] = value

    return types
예제 #24
0
def parseTypes(path):
  msgs = set()
  types = {}

  for line in lineGen(path):
    number = Word(nums)
    word = Word(alphanums + "-_")

    wordList = Forward()
    wordList = word + ZeroOrMore(',' + word)

    par = (Literal('NetworkPartition').setResultsName('type') +\
      '(' + Literal('Set') + '(' +\
      wordList.setResultsName('p1') + ')' + ',' +\
      Literal('Set') + '(' +\
      wordList.setResultsName('p2') + ')' + \
      ')')

    subType = (word + Optional(nestedExpr('(', ')'))).setResultsName('msg')
    msg = (Literal('MsgEvent').setResultsName('type') +\
       '(' + word.setResultsName('src') + ',' +\
       word.setResultsName('dst') + ',' +\
       subType  + ')')

    event = Word( nums ) +\
      Literal('Unique') + "(" + (msg | par) + ',' +\
      number.setResultsName('uid')  + ')'

    result = event.parseString(line)

    key = result.uid
    if result.type == 'MsgEvent':
      msg = list2tuple( result.msg.asList() )
      value = (result.type, result.src, result.dst, msg)
      msgs.add(msg)
    elif result.type == 'NetworkPartition':
      value = (result.type, result.p1, result.p2)

    types[key] = value

  return types
예제 #25
0
def convert_size_to_bytes(size_str: str):
    """
    Converts a size string (eg: "12gb") to bytes.
    """
    multipliers = {
        "kb": 1000,
        "mb": 1000000,
        "gb": 1000000000,
        "tb": 1000000000000
    }  #god help whoever converts a tb file
    expr = Word(nums + ',' + '.').setParseAction(
        lambda toks: float(toks[0])).setResultsName('size') + (
            CaselessLiteral('kb') ^ CaselessLiteral('mb')
            ^ CaselessLiteral('gb') ^ CaselessLiteral('tb')).setParseAction(
                lambda toks: multipliers[toks[0]]).setResultsName('mult')
    result = None
    try:
        result = expr.parseString(size_str.replace(',', ''))
    except ParseException:
        return None
    return result.size * result.mult
예제 #26
0
def __ifaceAttributes___storm_check(storm, dct):

    parse_level = Word(alphas) + Suppress('level ') + restOfLine
    parse_action = Suppress('action ') + Word(alphas)
    parse_type = Word(alphas) + Suppress(Optional("include")) + Word(alphas)
    try:
        value = parse_level.parseString(storm).asList()
        if 'level' in dct:
            dct['level'].append(value)
        else:
            dct['level'] = [value]
        return dct
    except ParseException:
        pass
    try:
        return util.int_dict_parse(parse_action, storm, 'action', dct)
    except ParseException:
        pass
    try:
        return util.int_dict_parse(parse_type, storm, 'type', dct)
    except ParseException:
        pass
예제 #27
0
 def parse(operator, string, digits=None):
     """Parse a valid interval from strings like '1800-1900' or '12'. If operator is one of 
     ('<=', '>=', '>', '<'), only a single number will be parsed and an open interval will be constructed.
     If the optional paramater *digits* is specified, only numbers having exactly this number of digits
     will be considered.
     """
     string = string.strip()
     if digits is not None:
         number = Word(pyparsing.nums, exact=digits)
     else:
         number = Word(pyparsing.nums)
     try:
         if operator not in ('<=', '>=', '>', '<'):
             # the ^ means xor. | does not work together with parseAll=True (bug in pyparsing?)
             parser = number ^ (number + Suppress('-') + number)
             result = parser.parseString(string, parseAll=True).asList()
             result = [int(r) for r in result]
             if len(result) == 1:
                 return Interval(result[0], result[0])
             else:
                 return Interval(result[0], result[1])
         else:
             result = number.parseString(string)
             date = int(result[0])
             if operator == '>=':
                 return Interval(date, None)
             elif operator == '<=':
                 return Interval(None, date)
             elif operator == '>':
                 return Interval(date + 1, None)
             elif operator == '<':
                 return Interval(None, date - 1)
             else:
                 assert False
     except pyparsing.ParseException:
         return None
예제 #28
0
파일: OxCal.py 프로젝트: AlephThot/pyC14
def parse_OxCal_data(oxcal_js_file):
    myOCD = {}
    myCalib = {}

    # Ouverture d'un fichier en *lecture*:
    fichier = open(oxcal_js_file, 'r')

    for ligne in fichier:
        modele = Word( alphas ) + '[' + Word(nums) + ']' + Word( printables )
        try:
            parsed_data = modele.parseString( ligne )
        except ParseException, pe:
            pass
        else:
            flg = parsed_data[0] 
            nb = int(parsed_data[2])
            if(flg==u"ocd"):
                if(not myOCD.has_key(nb)):
                    myOCD[nb] = OxCalData(nb)
                myOCD[nb].set_param(ligne)
            elif(flg==u"calib"):
                if(not myCalib.has_key(nb)):
                    myCalib[nb] = Calibration(nb)
                myCalib[nb].set_param(ligne)
예제 #29
0
from pyparsing import Word, alphas, nums, Optional

# define grammar of a greeting
greet = Word(alphas) + "," + Word(alphas) + "!"

greet_str = "Hello, World!"
print(greet_str, "->", greet.parseString(greet_str))

# parsing assignemt of integers
assgnmt_str = "a = 34"

# option 1
assgnmt = Word(alphas) + "=" + Word(nums)
# assignment of floats is more tricky. below works, but produes 3 groups :-(
# assgnmt = Word(alphas) + "=" + Word(nums) + Optional("." + Word(nums))

res = assgnmt.parseString(assgnmt_str)
print(assgnmt_str, "->", res)
# in order to use the int, we need to parse it manually...
print(2 * int(res[2]))

# option 2 (using named parser parts)
nam = Word(alphas)
integer = Word(nums)

assgnmt_v2 = nam("varname") + "=" + integer("value")
res = assgnmt_v2.parseString(assgnmt_str)
print(assgnmt_str, "->", res, repr(res))
print(res["varname"], res["value"])

# option 3
예제 #30
0
파일: parser2.py 프로젝트: bijanbina/Ijadi
#!/usr/bin/python
#returns the executable file name that is merged in makefile.am
import sys
from pyparsing import Word, alphanums

input_text = sys.argv[1]

exec_name = Word( alphanums )

am_expr = Word("bin_PROGRAMS") + Word("=") + exec_name

am_parsed = am_expr.parseString(str(input_text))

print am_parsed[2]
예제 #31
0
import numpy as np
import matplotlib.pyplot as plt
from pyparsing import Word, alphas, nums

source = open("simpsonsRule_threadstest_100000.out", "r")
line = Word(alphas) + ':' + Word(nums) + ',' 
line = line + Word(alphas) + ':' + Word(nums+'.') + ',' 
line = line + Word(alphas) + ':' + Word(nums+'.') + ','
line = line + Word(alphas) + ':' + Word(nums)

src = [line.strip() for line in source]
time = list()
threads = list()
for _ in src:
    time.append(line.parseString(_)[10])
for _ in src:
    threads.append(line.parseString(_)[14])

print(time)
print(threads)

x_10 = np.array(threads, dtype = float)
y_10 = np.array(time, dtype = float)

source = open("simpsonsRule_threadstest_1410065408.out", "r")
line = Word(alphas) + ':' + Word(nums) + ',' 
line = line + Word(alphas) + ':' + Word(nums+'.') + ',' 
line = line + Word(alphas) + ':' + Word(nums+'.') + ','
line = line + Word(alphas) + ':' + Word(nums)

src = [line.strip() for line in source]
예제 #32
0
def extract_moments(file_directory):

    # list to host directory for each unique .mom file
    mom_files = []
    # search through entire file hierarchy to find all .mom files to parse
    for root, dirs, files in os.walk(file_directory):
        for i in files:
            # select only .mom files and add them to list
            if os.path.splitext(os.path.basename(root + '/' + i))[1] == '.mom':
                mom_files.append(root + '/' + i)

    # define parsing grammar
    # find the df type from .mom file
    df_parser = Keyword("! Based on DF-type :") + Word(alphas)

    # parse floating point numbers
    float_parser = Combine(Optional('-') + Word(nums) + '.' + Word(nums))
    mom_parser = OneOrMore(float_parser)

    # parse a line common to all .mom files with the following structure: ATOM  X   Y   Z   Type <ATOM-TYPE>   Rank K
    atom_line = Word(alphas + nums) + mom_parser + OneOrMore(Word(alphas)) + Word(nums)

    # lists for storing information
    error_array, df_array, coords, atom = ([],)*4

    # json array to compile each atom multipole moment info
    json_result = {'moments': []}

    # empty dataframe to host atom information
    df = pd.DataFrame(data={'atom': [], 'type': [], 'rank': []})

    # dictionary to store sorted moment values
    atom_mom = {}

    # using pyparsing's 'search string' method
    for elem in mom_files:

        # open file
        file_object = open(elem, 'r')
        lines = file_object.readlines()

        mom_name = os.path.basename(elem)
        mom_name = os.path.splitext(mom_name)[-2]

        # parse lines for data extraction
        for line in lines:

            # get df type first
            try:
                res1 = df_parser.parseString(line)
                df_array.append(res1[1])
            except Exception:
                print('No DF-Type is specified in file')
                error_array.append('No DF-Type is specified in file')

            # get information about each atom
            try:
                res2 = atom_line.parseString(line)
                atom.append(res2[0])
                type = res2[5]
                rank = (res2[7])
                df = df.append({'atom': atom[len(atom) - 1], 'type': type, 'rank': rank}, ignore_index=True)

            except Exception:
                print('This is not a atom type line')

            # get moment values
            try:
                res3 = mom_parser.parseString(line)
                for i in res3:
                    coords.append(i)
            except Exception:
                print('This is not a moment value')

        coords_float = []

        for val in coords:
            coords_float.append(np.float(val))

        # assign the correct values from coords to the right moment value
        for i in range(len(atom)):

            # get atom information (+1 is so that Q0 is also counted as well as Q4: Q0, Q1, Q2, Q3, Q4)
            name = df.iloc[i]['atom']
            r = int(df.iloc[i]['rank']) + 1

            array, tot, cum_sum = ([],)*3

            # find correct number of coordinates to fill each moment configuration with
            for ii in range(r):
                s = 2 * ii + 1
                tot.append(s)
                cum_sum.append(np.cumsum(tot).tolist())
                cum_sum = cum_sum[len(cum_sum) - 1]

            total = np.sum(tot)

            # value in tot is the number of values stored in each Q layer
            for value in tot:
                temporary_array = []

                # k in range() ensures the correct number of moments is filled in each layer
                for k in range(value):
                    temporary_array.append(coords_float[k])

                array.append(temporary_array)

                for kkk in temporary_array:
                    coords_float.pop(coords_float.index(kkk))

            # fill a dictionary with atoms as keys and moments as values
            atom_mom[atom[i]] = array

        # need to loop over elements in dataframe to add each value for every atom in atom_mom, here we need to do
        # the JSON method too and probably wipe the contents of the dataframe at the beginning of each new loop
        for idx, kk in enumerate(atom_mom):

            # need to reshape lists, first, find max value list can be
            max_len = max(len(i) for i in atom_mom[kk])

            # pad short lists with NaN
            for col in atom_mom[kk]:
                col.extend((max_len - len(col)) * [np.nan])

            # convert to array
            arr = np.asarray(atom_mom[kk]).T

            # create indices depending on rank
            indices = []
            for ind in range(1, len(atom_mom[kk])):
                one = f'{ind}s'
                two = f'{ind}c'
                indices.append(one)
                indices.append(two)
            indices.insert(0, '0')

            # create df depending on rank also
            df_mom = pd.DataFrame(
                arr,
                columns=[f'Q{i}' for i in range(0, len(atom_mom[kk]))],
                index=indices
            )

            moments_string = df_mom.to_string().splitlines()

            mom_data = {

                'atom': atom[idx],
                'scheme': mom_name,
                'type': df['type'][idx],
                'rank': df['rank'][idx],
                'moments': moments_string,
                'file': elem

            }
            json_result['moments'].append({kk: mom_data})

        # save this information to a json file called mom_test.json
        # with open("mom_test.json", "w") as mom_json:
        #     json.dump(json_result, mom_json, indent=4)

    return json_result
예제 #33
0
from pyparsing import Word, Suppress, alphas, nums, alphanums

# example 1:
pattern = Word(alphas) + "," + Word(alphas) + "!" # a tokenized pattern
parsed = pattern.parseString("Hello,     World!") # spaces between tokens are skipped by default
print(parsed)                                     # ['Hello', ',', 'World', '!'] 
parsed = pattern.parseString("Hello,John!")
print(parsed)                                     # ['Hello', ',', 'John', '!']

# example 2:
pattern = Word(alphas, max=1) + "=" + Word(nums) + Word("+-*/", max=1) + Word(nums)
parsed = pattern.parseString("x=2+2")
print(parsed)                                     # ['x', '=', '2', '+', '2']
parsed = pattern.parseString("r= 1234/ 100000")
print(parsed)                                     # ['r', '=', '1234', '/', '100000']

# example 3:
pattern = Suppress('<') + Word(alphanums + '_-') + Suppress('>') # Suppress: clears matched tokens
parsed = pattern.parseString("<my_var>")
print(parsed)                                     # ['my_var']

예제 #34
0
def extract_coordinates(file_directory):
    # list molecule files: below is an example for arg:
    clt_files = glob.glob(file_directory + '*.clt')
    xyz_files = glob.glob(file_directory + '*.xyz')

    # empty dict to host df's
    molecules_from_files = {}

    # definitions of all the parsing tools used
    clt_parser = Word(alphas + nums) + Word(nums + '.' + nums) + Word(printables + '.' + printables) + \
                 Word(printables + '.' + printables) + Word(printables + '.' + printables)

    xyz_parser = Word(alphas) + Word(printables + '.' + printables) + Word(printables + '.' + printables) + \
                 Word(printables + '.' + printables)

    bohr = Keyword('Bohr')
    angstrom = Keyword('Angstrom')
    word = ~bohr + Word(alphas)
    sentence = OneOrMore(word)
    split_bohr = sentence('unit') + bohr + sentence('degree')
    split_angstrom = Keyword('Units') + angstrom
    unit_array = []
    error_array = []

    # empty dataframe to host data
    df = pd.DataFrame(data={'label': [], 'atomic_number': [], 'x': [], 'y': [], 'z': []})

    if len(clt_files) or len(xyz_files) or (len(clt_files) + len(xyz_files)) > 1:

        # extract information from .clt files
        for elem in clt_files:

            # open file
            file_object = open(elem, 'r')
            lines = file_object.readlines()

            molecule_name = os.path.basename(elem)
            molecule_name = os.path.splitext(molecule_name)[-2]

            # parse lines for data extraction
            for line in lines:

                # find out units of distance in file
                try:
                    res1 = split_bohr.parseString(line)
                    if res1[1] == 'Bohr':
                        unit_array.append(res1[1])
                except Exception:
                    print('invalid unit line')
                try:
                    res2 = split_angstrom.parseString(line)
                    if res2[1] == 'Angstrom':
                        unit_array.append(res2[1])

                except Exception:
                    print('not valid data line')

                try:
                    parsed_lines = clt_parser.parseString(line)
                    list_conversion = list(parsed_lines)
                    df = df.append(
                        {'label': list_conversion[0][0], 'atomic_number': float(list_conversion[1]),
                         'x': float(list_conversion[2]), 'y': float(list_conversion[3]),
                         'z': float(list_conversion[4])}, ignore_index=True)
                    print('valid data line')
                except Exception:
                    print('not valid data line')

            # perform necessary unit conversions
            if len(unit_array) == 1:
                if unit_array[0] == 'Angstrom':
                    print('Units are Angstrom')
                elif unit_array[0] == 'Bohr':
                    df['x'] = df['x'] / 1.89
                    df['y'] = df['y'] / 1.89
                    df['z'] = df['z'] / 1.89
            # ambiguous case, just have to assume it's bohr but add line in summary file explaining ambiguity
            elif len(unit_array) == 2:
                print('ambiguous units')
                unit_array.clear()
                unit_array.append('Unknown')
                error_array.append('Ambiguous unit in file.')

            molecules_from_files[str(molecule_name)] = df

            if len(clt_files) > 1:
                print('there are too many clt files, ambiguous')
                error_array.append('There are too many .clt files, see file origin above for file used.')

        # extract information from .xyz files:
        for elem in xyz_files:

            # empty dataframe to host data
            df_2 = pd.DataFrame(data={'label': [], 'atomic_number': [], 'x': [], 'y': [], 'z': []})

            # open file
            file_object = open(elem, 'r')
            lines = file_object.readlines()

            molecule_name = os.path.basename(elem)
            molecule_name = os.path.splitext(molecule_name)[-2]

            # parse lines for data extraction
            for line in lines:

                try:
                    parsed_lines = xyz_parser.parseString(line)
                    list_conversion = list(parsed_lines)
                    atomic_number_df = atom_database.database[atom_database.database['symbol'] == list_conversion[0]]
                    atomic_number = atomic_number_df.iloc[0][0]
                    df_2 = df_2.append(
                        {'label': list_conversion[0], 'atomic_number': atomic_number,
                         'x': float(list_conversion[1]), 'y': float(list_conversion[2]),
                         'z': float(list_conversion[3])}, ignore_index=True)
                    print('valid data line')
                except Exception:
                    print('not valid data line')
            molecules_from_files[str(molecule_name)] = df_2

        if len(error_array) == 0:
            error_array.append('No errors')

        # create a json object that will be saved to a summary file
        for key, value in molecules_from_files.items():
            coord_string = value.to_string().splitlines()

            coord_data = {

                'coordinates': {

                    'file': file_directory,
                    'data frame': df.to_json(),
                    'errors': error_array,
                    'units': unit_array[0],
                    'coordinates': coord_string

                }

            }

            # save information to json file called coord_test.json
            # with open("coord_test.json", "w") as coord_json:
            #     json.dump(coord_data, coord_json, indent=4)

    else:

        # extract information from .clt files
        for elem in clt_files:

            # open file
            file_object = open(elem, 'r')
            lines = file_object.readlines()

            molecule_name = os.path.basename(elem)
            molecule_name = os.path.splitext(molecule_name)[-2]

            # parse lines for data extraction
            for line in lines:

                # find out units of distance in file
                try:
                    res1 = split_bohr.parseString(line)
                    if res1[1] == 'Bohr':
                        unit_array.append(res1[1])
                except Exception:
                    print('invalid unit line')
                try:
                    res2 = split_angstrom.parseString(line)
                    if res2[1] == 'Angstrom':
                        unit_array.append(res2[1])

                except Exception:
                    print('not valid data line')

                try:
                    parsed_lines = clt_parser.parseString(line)
                    list_conversion = list(parsed_lines)
                    df = df.append(
                        {'label': list_conversion[0][0], 'atomic_number': float(list_conversion[1]),
                         'x': float(list_conversion[2]), 'y': float(list_conversion[3]),
                         'z': float(list_conversion[4])}, ignore_index=True)
                    print('valid data line')
                except Exception:
                    print('not valid data line')

            # perform necessary unit conversions
            if len(unit_array) == 1:
                if unit_array[0] == 'Angstrom':
                    print('Units are Angstrom')
                elif unit_array[0] == 'Bohr':
                    df['x'] = df['x'] / 1.89
                    df['y'] = df['y'] / 1.89
                    df['z'] = df['z'] / 1.89
            # ambiguous case, just have to assume it's bohr but add line in summary file explaining ambiguity
            elif len(unit_array) == 2:
                print('ambiguous units')
                unit_array.clear()
                unit_array.append('Unknown')
                error_array.append('Ambiguous unit in file.')

        # extract information from .xyz files:
        for elem in xyz_files:

            # empty dataframe to host data
            df_2 = pd.DataFrame(data={'label': [], 'atomic_number': [], 'x': [], 'y': [], 'z': []})

            # open file
            file_object = open(elem, 'r')
            lines = file_object.readlines()

            molecule_name = os.path.basename(elem)
            molecule_name = os.path.splitext(molecule_name)[-2]

            # parse lines for data extraction
            for line in lines:

                try:
                    parsed_lines = xyz_parser.parseString(line)
                    list_conversion = list(parsed_lines)
                    atomic_number_df = atom_database.database[atom_database.database['symbol'] == list_conversion[0]]
                    atomic_number = atomic_number_df.iloc[0][0]
                    df_2 = df_2.append(
                        {'label': list_conversion[0], 'atomic_number': atomic_number,
                         'x': float(list_conversion[1]), 'y': float(list_conversion[2]),
                         'z': float(list_conversion[3])}, ignore_index=True)
                    print('valid data line')
                except Exception:
                    print('not valid data line')

        if len(error_array) == 0:
            error_array.append('No errors')

        # create a json object that will be saved to a summary file

    coord_string = df.to_string().splitlines()

    coord_data = {

        'coordinates': {

            'file': file_directory,
            'data frame': df.to_json(),
            'errors': error_array,
            'units': unit_array[0],
            'coordinates': coord_string

        }

    }

    # with open("coord_test.json", "w") as coord_json:
    #     json.dump(coord_data, coord_json, indent=4)

    return coord_data
예제 #35
0
from pyparsing import punc8bit, OneOrMore, nums, Word, alphas, Group
import pymongo
# define grammar
quotes = '"'
colon = ":"
title = quotes + "title" + quotes

punc = '/' + "(" + ")" + "." + "-" + "," + "'" + "&" + "?" + "!" + "*" + "+" + "#" + "@"
punc = punc + "%" + "=" + ";" + "~" + "`" + "|" + "$" + "\\"
val = quotes + Group(OneOrMore(Word(alphas + nums + punc))) + quotes

lbracket = '{'
rbracket = '}'
lparen = "("
rparen = "("
# sample line from file
## (12 {"title":"Autism fooo"})

val = Word(nums) + lbracket + title + colon + val + rbracket
for i in open("/tmp/nohup.out"):
    try:
        strng = i[1:-1]
        val_array = val.parseString(strng)
        print val_array[0], " ".join(val_array[5])
    except:
        pass
예제 #36
0
    # list to contain extracted information
    full_data_set = []

    # parse lines for data extraction
    for line in lines:

        try:
            # define how to read floats
            float_definition = Regex(r'[+-]?\d+\.\d*')

            # parse for the element and 3 spatial coords
            parse_float = Word(
                alphas
            ) + float_definition + float_definition + float_definition
            parsed_line = parse_float.parseString(line)
            list_conversion = list(parsed_line)
            full_data_set.append(list_conversion)

        except Exception:
            print('invalid data line')

    # make newfile and prepare to write
    new_file_name = os.path.splitext(txt[elem])[0] + '.xyz'
    new_file = open(new_file_name, 'w')
    new_file.write(
        str(len(full_data_set)) + '\n' + os.path.splitext(txt[elem])[0] +
        '\n' + '\n' + '\n')

    # add atomic coords and elements
    for i in full_data_set:
예제 #37
0
    mom_name = os.path.splitext(mom_name)[-2]

    # parse lines for data extraction
    for line in lines:

        # get df type first
        try:
            res1 = df_parser.parseString(line)
            df_array.append(res1[1])
        except Exception:
            print('No DF-Type is specified in file')
            error_array.append('No DF-Type is specified in file')

        # get information about each atom
        try:
            res2 = atom_line.parseString(line)
            atom.append(res2[0])
            type = res2[5]
            rank = (res2[7])
            df = df.append({'atom': atom[len(atom) - 1], 'type': type, 'rank': rank}, ignore_index=True)

        except Exception:
            print('This is not a atom type line')

        # get moment values
        try:
            res3 = mom_parser.parseString(line)
            for i in res3:
                coords.append(i)
        except Exception:
            print('This is not a moment value')
예제 #38
0
# vim:fileencoding=utf-8 
#
# greetingInGreek.py
#
# Demonstration of the parsing module, on the prototypical "Hello, World!" example
#
from pyparsing import Word 

# define grammar
alphas = u''.join(unichr(x) for x in xrange(0x386, 0x3ce)) 
greet = Word(alphas) + u',' + Word(alphas) + u'!' 

# input string
hello = "Καλημέρα, κόσμε!".decode('utf-8') 

# parse input string
print greet.parseString( hello )

예제 #39
0
from pyparsing import punc8bit,OneOrMore,nums,Word, alphas,Group
import pymongo
# define grammar
quotes = '"'
colon = ":"
title = quotes+"title"+quotes


punc = '/'+"("+")"+"."+"-"+","+"'"+"&"+"?"+"!"+"*"+"+"+"#"+"@" 
punc = punc +"%"+"="+";"+"~"+"`"+"|"+"$"+"\\" 
val = quotes+Group(OneOrMore(Word(alphas+nums+punc)))+quotes

lbracket = '{'
rbracket = '}'
lparen = "("
rparen = "("
# sample line from file 
## (12 {"title":"Autism fooo"}) 

val = Word(nums)+lbracket+title+colon+val+rbracket
for i in open("/tmp/nohup.out"):
	try:
		strng = i[1:-1]   
     		val_array= val.parseString(strng)
		print val_array[0]," ".join(val_array[5])
	except:
		pass;
예제 #40
0
파일: parser.py 프로젝트: AsierO/menu_9
def file_len(fname):
    with open(fname) as f:
        for i, l in enumerate(f):
            pass
    return i + 1


symbols="-'"



txt=open("dale_chall.txt")
n_lines=file_len("dale_chall.txt")
n_count=1
word_list=[]
print n_lines

for i in range(n_lines):
    line=txt.readline()
    print 'line', line
    parse1=Word(alphas)
    parsed=parse1.parseString(line)
    print 'parsing', parsed[0],parsed
    word_list.append(parsed[0])

print len(word_list), word_list

pickle.dump( word_list, open('dale_chall.p', "wb" ) )


    starting_index = site_contents.find(
        "Fayetteville", site_contents.find("Soybean Processors")
    )  # starting index of section of website to gather data from
    ending_index = site_contents.find(
        "Source", starting_index
    )  # ending index of section of website to gather data from

    city_names = ["FAYETTEVILLE", "RALEIGH"]  # names of two cities
    city_list = []  # will hold data for each city
    line = Word(alphas) + ZeroOrMore(
        Word(printables)
    )  # line starts with a city name and is either followed by numbers or no data
    break_point = site_contents.find("\r\n", starting_index)
    fayetteville = line.parseString(
        site_contents[starting_index:break_point]
    )  # fayetteville data is from the city name to the end of the line
    raleigh = line.parseString(
        site_contents[break_point:ending_index]
    )  # raleigh data is from the end of the previous line to the end of the website section
    fayetteville.insert(1, 0)  # insert 0 after city name in case there is no data for that city
    raleigh.insert(1, 0)
    city_list.append(fayetteville)  # add city data to city_list
    city_list.append(raleigh)

    y = 0
    while y < 2:
        headings = ["Date", "Meal (48% Protein) Dollars per ton"]
        data = {
            "Date": [string_date],
            "Meal (48% Protein) Dollars per ton": [city_list[y][len(city_list[y]) - 1]],
예제 #42
0
    fn = sys.argv[1]
except IndexError:
    raise Exception('Input a filename to translate')

with open(fn, 'r') as fh:
    for line in fh:
        if line.startswith('#'):
            continue
        elements = line.rstrip('\n').split(' ')
        if elements[0] != 'set_property':
            continue
        if elements[1] == 'IOSTANDARD':
            pinline = elements[-1].rstrip(']').rstrip('}').lstrip('{')
            iostd = elements[2]
            try:
                name, foo, idx, bar = vector_pin.parseString(pinline)
                is_vec = True
            except ParseException:
                name = scalar_pin.parseString(pinline)[0]
                is_vec = False
            except:
                raise Exception(pinline)
            if name not in list(pins.keys()):
                pins[name] = {}
            if 'IOSTD' not in list(pins[name].keys()):
                pins[name]['IOSTD'] = {}
            if is_vec:
                pins[name]['IOSTD'][int(idx)] = iostd
            else:
                pins[name]['IOSTD'] = iostd
예제 #43
0
from pyparsing import Literal,CaselessLiteral,Word,Combine,Group,Optional,\
    ZeroOrMore,OneOrMore,Forward,nums,alphas, SkipTo, alphanums, \
    srange, quotedString, dblQuotedString
import operator

# define grammar
greet = Word( alphas ) + "," + Word( alphas ) + "!"

# input string
hello = "Hello, World!"

# parse input string
print hello, "->", greet.parseString( hello )

dotString = ""
with open("dot_test.dot", 'r') as f:
	dotString = f.read()

parsing_test = "digraph" + Word( alphas ) + "{" + Word( alphas )
print dotString, "->", parsing_test.parseString( dotString )

exprStack = []

def pushFirst( strg, loc, toks ):
	exprStack.append( toks[0] )
	print exprStack
def pushUMinus( strg, loc, toks ):
	if toks and toks[0]=='-': 
		exprStack.append( 'unary -' )

plus  = Literal( "+" )
예제 #44
0
# greeting.py
#
# Demonstration of the parsing module, on the prototypical "Hello, World!" example
#
# Copyright 2003, by Paul McGuire
#
from pyparsing import Word, alphas

# define grammar
greet = Word(alphas) + "," + Word(alphas) + "!"

# input string
hello = "Hello, World!"

# parse input string
print hello, "->", greet.parseString(hello)
예제 #45
0
# ejemplo en Python 2.x
from pyparsing import Word, alphas

greet = Word(alphas) + "," + Word(alphas) + "!"
greeting = greet.parseString("Una2palabra, World!")
print greeting
예제 #46
0
파일: parser4.py 프로젝트: bijanbina/Ijadi
def toStr(buf):
    buffer = buf[0] 
    for i in range(1,len(buf)) :
        buffer = buffer + buf[i]
    return buffer

input_file = sys.argv[1]
input_exec_name = sys.argv[2]

f = open(input_file, "r")
list = f.readlines()

for i in range(0,len(input_file)-1) :
    if list[i].find("bin_PROGRAMS") != -1 :
        amtext = list[i]
        break

exec_name = Word( alphanums )

am_expr = Word("bin_PROGRAMS") + Word("=") + exec_name

am_parsed = am_expr.parseString(str(amtext))

ptext = am_parsed[2] 

for i in range(0,len(list)):
    list[i] = list[i].replace(ptext , input_exec_name)

print toStr(list)
    
예제 #47
0
from pyparsing import Word, alphas, OneOrMore, Literal, oneOf

# define grammar
greet = Word(alphas) + "," + Word(alphas) + "!"

# input string
hello = "Hello, World!"

# parse input string
print hello, "->", greet.parseString(hello)


# define grammer for more complex case
word = Word(alphas+"'.")
salutation = OneOrMore(word)
comma = Literal(",")
greete = OneOrMore(word)
endpunc = oneOf("? !")
greeting = salutation + comma + greete + endpunc

test_cases = ["Hello, Sidharth!", "Hello, Sidharth how is your day?"]
print map(greeting.parseString, test_cases)
예제 #48
0
            try:
                res1 = split_bohr.parseString(line)
                if res1[1] == 'Bohr':
                    unit_array.append(res1[1])
            except Exception:
                print('No units given in this line')
            try:
                res2 = split_angstrom.parseString(line)
                if res2[1] == 'Angstrom':
                    unit_array.append(res2[1])

            except Exception:
                print('No units given in this line')

            try:
                parsed_lines = clt_parser.parseString(line)
                list_conversion = list(parsed_lines)
                df = df.append(
                    {
                        'label': list_conversion[0][0],
                        'atomic_number': float(list_conversion[1]),
                        'x': float(list_conversion[2]),
                        'y': float(list_conversion[3]),
                        'z': float(list_conversion[4])
                    },
                    ignore_index=True)

            except Exception:
                print('Not a valid data line')

        # perform necessary unit conversions
예제 #49
0
# vim:fileencoding=utf-8 
#
# greetingInGreek.py
#
# Demonstration of the parsing module, on the prototypical "Hello, World!" example
#
from pyparsing import Word 

# define grammar
alphas = ''.join(chr(x) for x in range(0x386, 0x3ce)) 
greet = Word(alphas) + ',' + Word(alphas) + '!' 

# input string
hello = "Καλημέρα, κόσμε!".decode('utf-8') 

# parse input string
print(greet.parseString( hello ))

예제 #50
0
#
# greetingInGreek.py
#
# Demonstration of the parsing module, on the prototypical "Hello, World!" example
#
# Copyright 2004-2016, by Paul McGuire
#
from pyparsing import Word, pyparsing_unicode as ppu

# define grammar
alphas = ppu.Greek.alphas
greet = Word(alphas) + ',' + Word(alphas) + '!'

# input string
hello = "Καλημέρα, κόσμε!"

# parse input string
print(greet.parseString(hello))
예제 #51
0
input_file = sys.argv[1]
input_version = sys.argv[2]

f = open(input_file, "r")
list = f.readlines()

for i in range(0,len(input_file)-1) :
    if list[i].find("AC_INIT") != -1 :
        actext = list[i]
        break

name = Word( alphanums )
version =  Word ( alphanums + '.' )
bug_report = Word ( alphanums )
tar_name = Word ( alphanums )

ac_expr = Word('AC_INIT') + Word('(') + Word('[') + name + Word(']') + Word(',') + Word('[') + version + Word(']') + Optional(Word(',') + Word('[') + bug_report + Word(']')) + Optional(Word(',') + Word('[') + tar_name + Word(']')) + Word(')')

ac_parsed = ac_expr.parseString(str(actext))

ac_parsed[7] = input_version 

#stick list elements together
buffer = toStr(ac_parsed) + "\n"

list[i] = buffer

print toStr(list)

예제 #52
0
파일: ex0.py 프로젝트: hugolu/learn-test
from pyparsing import Word, StringEnd, alphas

noEnd = Word(alphas)
print(noEnd.parseString('Dorking...'))

withEnd = Word(alphas) + StringEnd()
print(withEnd.parseString('Dorking...'))
예제 #53
0
 def __init__(self, tempo):
     tempoPars = Word(nums) + Suppress(':') + Word(nums)
     tempo = tempoPars.parseString(tempo)
     self.horas = int(tempo[0])
     self.minutos = int(tempo[1])
예제 #54
0
'''
Created on Jun 2, 2015

@author: root
'''
import pyparsing
from pyparsing import Word, alphas
from scipy.constants.constants import alpha

#"Construct the gramma"
greet = Word(alphas)+","+Word(alphas)
greeting = greet.parseString("hello, world")
print greeting
예제 #55
0
# -*- coding: utf-8 -*-

# escrito por Marco Alfonso, 2004 Noviembre

# importamos los símbolos requeridos desde el módulo
from pyparsing import Word, alphas, oneOf, nums, Group, OneOrMore, pyparsing_unicode as ppu

# usamos las letras en latin1, que incluye las como 'ñ', 'á', 'é', etc.
alphas = ppu.Latin1.alphas

# Aqui decimos que la gramatica "saludo" DEBE contener
# una palabra compuesta de caracteres alfanumericos
# (Word(alphas)) mas una ',' mas otra palabra alfanumerica,
# mas '!' y esos seian nuestros tokens
saludo = Word(alphas) + ',' + Word(alphas) + oneOf('! . ?')
tokens = saludo.parseString("Hola, Mundo !")

# Ahora parseamos una cadena, "Hola, Mundo!",
# el metodo parseString, nos devuelve una lista con los tokens
# encontrados, en caso de no haber errores...
for i, token in enumerate(tokens):
    print("Token %d -> %s" % (i, token))

#imprimimos cada uno de los tokens Y listooo!!, he aquí a salida
# Token 0 -> Hola
# Token 1 -> ,
# Token 2-> Mundo
# Token 3 -> !

# ahora cambia el parseador, aceptando saludos con mas que una sola palabra antes que ','
saludo = Group(OneOrMore(Word(alphas))) + ',' + Word(alphas) + oneOf('! . ?')
예제 #56
0
from pyparsing import Word, OneOrMore, ZeroOrMore
from string import lowercase

word = Word(lowercase)

print word.parseString('hello')

sentnce = OneOrMore(word)


print sentnce.parseString('hello world')

two_words = word + word

print two_words.parseString('cosa loca')
# -*- coding: utf-8 -*-

# http://pythonhosted.org/pyparsing/

from pyparsing import Word, alphas, Suppress, Group, OneOrMore, FollowedBy, ZeroOrMore

# define grammar of a greeting
greet = Word(alphas) + "," + Word(alphas) + "!"

hello = "Hello, World!"
print(hello, "->", greet.parseString(hello))
print("length : ", len(greet.parseString(hello)))

token = Suppress(",")
grammar = Word(alphas) + token + Group(OneOrMore(Word(alphas))) + "!"
yes = "Yes, You can!"
print(grammar.parseString(yes))
print("length : ", len(grammar.parseString(yes)))

# http://pythonhosted.org/pyparsing/pyparsing.OneOrMore-class.html

# Class OneOrMore
# Repetition of one or more of the given expression.

# Parameters:

# expr - expression that must match one or more times
# stopOn - (default=None) - expression for a terminating sentinel (only required if the sentinel would ordinarily match the repetition expression)

data_word = Word(alphas)
label = data_word + FollowedBy(':')
예제 #58
0
    fn = sys.argv[1]
except IndexError:
    raise Exception('Input a filename to translate')

with open(fn, 'r') as fh:
    for line in fh:
        if line.startswith('#'):
            continue
        elements = line.rstrip('\n').split(' ')
        if elements[0] != 'set_property':
            continue
        if elements[1] == 'IOSTANDARD':
            pinline = elements[-1].rstrip(']').rstrip('}').lstrip('{')
            iostd = elements[2]
            try:
                name, foo, idx, bar = vector_pin.parseString(pinline)
                is_vec = True
            except ParseException:
                name = scalar_pin.parseString(pinline)[0]
                is_vec = False
            except:
                raise Exception(pinline)
            if name not in pins.keys():
                pins[name] = {}
            if 'IOSTD' not in pins[name].keys():
                pins[name]['IOSTD'] = {}
            if is_vec:
                pins[name]['IOSTD'][int(idx)] = iostd
            else:
                pins[name]['IOSTD'] = iostd