def parseImpl(self, instring, loc, doActions=True): try: result = self.pattern.parseString(instring[loc:]) if not result.uid.isdigit(): raise pyp.ParseException(instring, loc, self.errmsg, self) uid = int(result.uid) return loc + len(result.uid) + 1, db.get(uid) except pyp.ParseException: # Nope! Raise ours instead. raise pyp.ParseException(instring, loc, self.errmsg, self) except KeyError: raise NoSuchUidError("#{}".format(result.uid), loc, self.errmsg, self)
def parseImpl(self, instring, loc, doActions=True): if self._match_datetime(instring[loc:], self.format): return len(instring), instring[loc:] max_delim = max(self.format.count(self.delim), self.max_delim) previous_delim = loc - 1 for i in range(max_delim): previous_delim = instring.find(self.delim, previous_delim + 1) if previous_delim < 0: raise pp.ParseException(instring, loc, self.errmsg, self) if self._match_datetime(instring[loc:previous_delim], self.format): return previous_delim, instring[loc:previous_delim] raise pp.ParseException(instring, loc, self.errmsg, self)
def load_graph(*, filename, graph=None): assert filename if not graph: with open(filename) as fh: graph = fh.read() try: q = parse_pp(graph) except pyparsing.ParseException as e: raise pyparsing.ParseException(pstr=e.pstr, loc=e.loc, msg='{} in file {}'.format( e.msg, filename), elem=e.parserElement) from None top_level = sort_graph(q) for cus_comp in top_level['customcomponent']: cus_comp.graph = sort_graph(cus_comp.contents) # this line severly cripples the import mechanism, by making every # component loaded essentially global ConnectableRegistry.register(cus_comp) return Graph(**top_level)
def makeDictAction(s,l,t): targs = {} for (k,v) in args.items(): if isinstance(v, tuple) and len(v) == 2: try: targs[k] = t.asList()[v[0]:v[1]] except: targs[k] = t[v[0]:v[1]] elif not isinstance(v, (str, unicode, int)): targs[k] = v else: try: targs[k] = t[v].asList() except: try: targs[k] = t[v] except: if isinstance(k, int): raise try: i = cls(**targs) # need to reset last error on first positive match self._lastParseError = None return i except types.TypeRecoverableInstanceCreationError as e: self._lastParseError = e.message raise pp.ParseException(s,l, self._lastParseError) except types.TypeInstanceCreationError as e: raise pp.ParseFatalException(s,l,e.message)
def CheckRange(unused_string, unused_location, tokens): """Parse the arguments.""" try: check_number = tokens[0] except IndexError: check_number = -1 if check_number < lower_bound: raise pyparsing.ParseException( u'Value: {0:d} precedes lower bound: {1:d}'.format( check_number, lower_bound)) if check_number > upper_bound: raise pyparsing.ParseException( u'Value: {0:d} exceeds upper bound: {1:d}'.format( check_number, upper_bound))
def ProcessMaxMinSize(tokens): # IN Arrays can have both a minSize and maxSize, whereas OUT Arrays can only have a minSize, # so need separate definitions for each. The following is done instead of having separate parse # expressions and associated parse actions for IN and OUT arrays. # todo: May want to change the names of the named tokens, so I'm not mixing min/max so much. maxSize = None minSize = None if tokens.direction == codeTypes.DIR_IN: maxSize = tokens.maxSize if (tokens.minSize != TokenNotSet): minSize = tokens.minSize else: # Must be DIR_OUT minSize = tokens.maxSize if tokens.minSize != TokenNotSet: # This is an error, so raise an parse exception. # todo: Use empty string for required parameter, but may need to revisit this # when better error reporting is added. raise pyparsing.ParseException('') # One or both of maxSize and minSize could be a previously DEFINEd value, in which case the # value would be a string giving the name. This needs to be evaluated to get the actual value. if isinstance(maxSize, basestring): maxSize = EvaluateDefinition(maxSize) if isinstance(minSize, basestring): minSize = EvaluateDefinition(minSize) return maxSize, minSize
def percentage(columns, maximum=100, name=None): """ Creates the grammar for a Numeric (N) field storing a percentage and accepting only the specified number of characters. It is possible to set the maximum allowed value. By default this is 100 (for 100%), and if modified it is expected to be reduced, not increased. The three first digits will be for the integer value. The columns can't be lower than 3. :param columns: number of columns for this field :param maximum: maximum allowed value :param name: name for the field :return: grammar for the float numeric field """ if name is None: name = 'Percentage Field' if columns < 3: message = 'The values can not be lower than 3' raise pp.ParseException(message) field = basic.numeric_float(columns, 3) field.addParseAction(lambda v: _assert_is_percentage(v[0], maximum)) field.setName(name) return field
def execute(self, context): self.fundef.execute(context) try: return self.call.execute(context) except Exception as err: raise pp.ParseException(err)
def convert_to_date(s, loc, tokens): try: return str( datetime(int(tokens.year), int(tokens.month), int(tokens.day)).date()) except Exception as ex: errmsg = "Error in convert_to_date " + str(ex) raise pp.ParseException(s, loc, errmsg)
def execute(self, context): val = self.terms[0].execute(context) for op, term in zip(self.ops, self.terms[1:]): try: val = ops_table[op](val, term.execute(context)) except TypeError as err: raise pp.ParseException(err, self.loc) return val
def execute(self, context): val = self.factors[0].execute(context) for op, factor in zip(self.ops, self.factors[1:]): try: val = ops_table[op](val, factor.execute(context)) except TypeError as err: raise pp.ParseException(err) return val
def execute(self, context): value = context.values[self.value] if self.array_access: try: return value[int(self.array_access.execute(context))] except (TypeError, IndexError) as err: raise pp.ParseException(err) return value
def checkUnindent(s, l, t): # pylint: disable=invalid-name del t if l >= len(s): return curCol = pp.col(l, s) # pylint: disable=invalid-name if not (_indentation_stack and curCol < _indentation_stack[-1] and curCol <= _indentation_stack[-2]): raise pp.ParseException(s, l, 'not an unindent') _indentation_stack.pop()
def EvaluateDefinition(expr): try: result = codeTypes.EvaluateDefinition(expr) except NameError as error: raise pyparsing.ParseException("name error in expression '%s' : %s" % (expr.strip(), error)) except SyntaxError as error: raise pyparsing.ParseException("syntax error in expression '%s'" % expr.strip()) except: raise pyparsing.ParseException("unknown error in expression '%s'" % expr.strip()) return result
def convertToDatetime(s, loc, tokens): try: # note that the year, month, and day fields were already # converted to ints from strings by the parse action defined # on the integer expression above return datetime(tokens.year, tokens.month, tokens.day).date() except Exception as ve: errmsg = "'%s/%s/%s' is not a valid date, %s" % \ (tokens.year, tokens.month, tokens.day, ve) raise pp.ParseException(s, loc, errmsg)
def execute(self, player, args): name_words = args["name"] destination = args["destination"] to = name_words.pop() name = " ".join(name_words) if to != "to": # this is me compensating for pyparsing's failings raise pyp.ParseException(name, len(name) - len(to), None, None) exit = db.Exit(name, player.location, destination) db.store(exit) player.send("Opened {} to {}.".format(exit, destination))
def CheckRange(unused_string, unused_location, tokens): """Parse the arguments. Args: location (int): location within the string where the match was made. tokens (list[str]): tokens. """ try: check_number = tokens[0] except IndexError: check_number = -1 if check_number < lower_bound: raise pyparsing.ParseException( 'Value: {0:d} precedes lower bound: {1:d}'.format( check_number, lower_bound)) if check_number > upper_bound: raise pyparsing.ParseException( 'Value: {0:d} exceeds upper bound: {1:d}'.format( check_number, upper_bound))
def _assert_is_percentage(value, maximum=100): """ Makes sure the received value is a percentage. Otherwise an exception is thrown. :param value: the value to check """ if value < 0 or value > maximum: message = 'The value on a percentage field should be between 0 and %s' \ % maximum raise pp.ParseException(message)
def guild_factor_action(toks): if len(toks) == 1: # This is just a role name. role = role_converter_from_name(guild, toks[0]) if role is None: raise pp.ParseException( "Did not find a role with that name") return [set(role.members)] elif toks[0] == self.COMPLEMENT_TOKEN: return [set(guild.members).difference(toks[1])] else: # this is an expression in parentheses return [toks[1]]
def parseImpl(self, instring, loc, doActions=True): result = self.re.match(instring, loc) if not result or len(result.groups()) == 0 or len("".join( result.groups())) == 0: raise pyparsing.ParseException(instring, loc, self.errmsg, self) loc = result.end() d = result.groupdict() ret = ParseResults(result.group()) if d: for k in d: ret[k] = d[k] return loc, ret
def CallGraphFromDotSource(dot_source: str) -> nx.MultiDiGraph: """Create a call graph from an LLVM-generated dot file. Args: dot_source: The dot source generated by the LLVM -dot-callgraph pass. Returns: A directed multigraph, where each node is a function (or the special "external node"), and edges indicate calls between functions. Raises: pyparsing.ParseException: If dotfile could not be parsed. ValueError: If dotfile could not be interpretted / is malformed. """ try: parsed_dots = pydot.graph_from_dot_data(dot_source) except TypeError as e: raise pyparsing.ParseException("Failed to parse dot source") from e if len(parsed_dots) != 1: raise ValueError(f"Expected 1 Dot in source, found {len(parsed_dots)}") dot = parsed_dots[0] graph = nx.drawing.nx_pydot.from_pydot(dot) # Nodes are given a fairly arbitrary name by pydot, instead, we want to name # the nodes by their label, which, for all except the magic "external node" # node, is the name of a function. node_name_to_label = {} nodes_to_delete: List[str] = [] for node, data in graph.nodes(data=True): if "label" not in data: nodes_to_delete.append(node) continue label = data["label"] if label and not (label.startswith('"{') and label.endswith('}"')): raise ValueError(f"Invalid label: `{label}`") label = label[2:-2] node_name_to_label[node] = label # Remove unneeded data attributes. labtypes.DeleteKeys(data, {"shape", "label"}) # Remove unlabelled nodes. for node in nodes_to_delete: graph.remove_node(node) nx.relabel_nodes(graph, node_name_to_label, copy=False) return graph
def test_sqlite_construct(self, mock_graph): store = TripleStore.connect("SQLITE", "", "") sq = """CONSTRUCT ?s ?p ?o WHERE {?o ?p ?s . }""" g = Graph() g.add((URIRef("http://example.org/doc1"), RDFS.comment, Literal("Hey"))) g.add((URIRef("http://example.org/doc2"), RDFS.comment, Literal("Ho"))) res = Mock res.graph = g mock_graph.return_value.query.return_value = res self.assertEqual(g, store.construct(sq)) mock_graph.return_value.query.side_effect = pyparsing.ParseException("Syntax error") with self.assertRaises(errors.SparqlError): store.construct(sq)
def _check_not_empty(string): """ Checks that the string is not empty. If it is empty an exception is raised, stopping the validation. This is used for compulsory alphanumeric fields. :param string: the field value """ string = string.strip() if len(string) == 0: message = 'The string should not be empty' raise pp.ParseException(message)
def _check_above_value_float(string, minimum): """ Checks that the number parsed from the string is above a minimum. This is used on compulsory numeric fields. If the value is not above the minimum an exception is thrown. :param string: the field value :param minimum: minimum value """ value = float(string) if value < minimum: message = 'The Numeric Field value should be above %s' % minimum raise pp.ParseException(message)
def associate(s, l, t): t = list(t) line = pp.lineno(l, s) while len(t) > 1: if assoc == 'none': if len(t) != 3: raise pp.ParseException(s, l, name + ' is not associative') t[0:3] = [dobinop(line, *t[0:3])] elif assoc == 'left': t[0:3] = [dobinop(line, *t[0:3])] elif assoc == 'right': t[-4:-1] = [dobinop(line, *t[-4:-1])] else: assert not "unhandled associativity" return t[0]
def eval(self, vars, state): # Sanity-check the mods, convert nodes to values. for mod in self.mods: if mod[0] == 'e': explode_min = (int(mod[1]) if len(mod) > 1 else self.max_face( vars, state)) if explode_min <= self.min_face(vars, state): raise pyparsing.ParseException("All rolls explode!") name = str(self) key = "%s count" % name count = vars.get(key, 0) + 1 vars[key] = count id = "%s#%d" % (name, count) state['current roll'] = id rolls = self._eval(vars, state) if 'rolls' not in state: state['rolls'] = {} state['rolls'][id] = rolls if state.get('desc', False): if 'rolldescs' not in state: state['rolldescs'] = {} state['rolldescs'][id] = OrderedDict( (i, str(r)) for i, r in rolls.iteritems()) if not state.get('maxroll', False) and not state.get('minroll', False): for mod in self.mods: args = [ a.eval(vars, state)[0] if isinstance(a, EvalNode) else a for a in mod[1:] ] self.mod_funcs[mod[0]](self, rolls, vars, state, *args) result = Sequence(rolls.itervalues()) else: result = sum(rolls) if state.get('desc', False): if len(state['rolldescs'][id]) > 1: rollsdesc = '+'.join(state['rolldescs'][id].values()) + '=' else: rollsdesc = '' rollsdesc += str(sum(result)) # Show runtime sides value. sides = (self.sides.eval(vars, state)[1] if isinstance( self.sides, EvalNode) else str(self.sides)) die = "%sd%s%s" % (self.num_dice, sides, self.mod_desc()) desc = "%s[%s]" % (die, rollsdesc) else: desc = '' return result, desc
def ConvertInterfaceType(interfaceType): # If we are processing an imported file, then the types defined in that file will be prefixed # by the file name. First try the type as-is, and if that is not found, try with file name. # TODO: I'm wondering if there could be some issue here with trying both with and without the # prefix. More testing and thought is needed. value = None if interfaceType in DefinedInterfaceTypes: value = DefinedInterfaceTypes[interfaceType]['value'] elif ImportName: importedInterfaceType = "%s.%s" % (ImportName, interfaceType) if importedInterfaceType in DefinedInterfaceTypes: value = DefinedInterfaceTypes[importedInterfaceType]['value'] if value is None: #print ("unknown type '%s'" % interfaceType) raise pyparsing.ParseException("unknown type '%s'" % interfaceType) return value
def _to_boolean(string): """ Transforms a string into a boolean value. If a value which is not 'Y' or 'N' is received, a ParseException is thrown. :param: string: the string to transform :return: True if the string is 'Y', False if it is 'N' """ if string == 'Y': result = True elif string == 'N': result = False else: raise pp.ParseException(string, msg='Is not a valid boolean value') return result
def XXXX_create_cast_expression(self, tok): if tok.typeof_arg: type_expression = self.type_manager.get_type_of( tok.typeof_arg.first) else: type_expression = tok.simple_type # Check that casting makes sense. target = self.type_manager.get_type_of(type_expression) if not target: raise pyparsing.ParseException("%s is not a type" % target) return c_ast.CFunctionCall( function_name='()', arguments=[ c_ast.CLiteral(target), tok.expression, ], )
def test_sqlite_select(self, mock_graph): store = TripleStore.connect("SQLITE", "", "") sq = """SELECT ?p FROM <http://example.org/ctx> WHERE {?s ?p ?o . }""" res = mock_graph.return_value.get_context.return_value.query.return_value want = [{"s": "http://example.org/doc1", "p": "http://www.w3.org/2000/01/rdf-schema#comment", "o": "Hello"}] res.bindings = want self.assertEqual(want, store.select(sq, format="python")) mock_graph.reset_mock() store.select(sq, "sparql") mock_graph.return_value.get_context.return_value.query.return_value.serialize.assert_called_with(format="xml") store.select(sq, "json") mock_graph.return_value.get_context.return_value.query.return_value.serialize.assert_called_with(format="json") mock_graph.return_value.get_context.return_value.query.side_effect = pyparsing.ParseException("Syntax error") with self.assertRaises(errors.SparqlError): store.select(sq)