def root_rule(proto): stream = ANTLRFileStream(proto) lexer = proto_lexer.proto_lexer(stream) tokens = CommonTokenStream(lexer) parser = proto_parser.proto_parser(tokens) rule = parser.proto() return rule.tree
def parse_file(filename): f = open(filename) char_stream = ANTLRInputStream(f) lexer = jedLexer(char_stream) tokens = CommonTokenStream(lexer) parser = jedParser(tokens) res = parser.eval() f.close() return res
def parse_code(cls): """ Parse the source code. """ cls.lexer = LatteLexer(cls.filestream) cls.tokens = CommonTokenStream(cls.lexer) Status.set_token_stream(cls.tokens) cls.parser = LatteParser(cls.tokens) cls.parsed_prog = cls.parser.prog() if Status.errors() > 0: Status.add_error(LatteError('parsing failed'), fatal=True) debug('----------------- AST -------------------------') debug('Tree: ', cls.parsed_prog.tree.toStringTree())
def load_file_and_parse_module(filepath): try: char_stream = ANTLRFileStream(filepath) lexer = ScribbleLexer(char_stream) tokens = CommonTokenStream(lexer) parser = ScribbleParser(tokens) tree = parser.module().tree checker = CommonErrorNodeChecker() checker._check_for_errornode(tree) return tree except IOError as e: print e sys.exit(1) except RecognitionException as e: print e sys.exit(1)
def norm(formula): '''Computes the bounds of the given TWTL formula and returns a 2-tuple containing the lower and upper bounds, respectively. ''' lexer = twtlLexer(ANTLRStringStream(formula)) tokens = CommonTokenStream(lexer) parser = twtlParser(tokens) phi = parser.formula() # CommonTree t = phi.tree # compute TWTL bound nodes = CommonTreeNodeStream(t) nodes.setTokenStream(tokens) boundEvaluator = bound(nodes) boundEvaluator.eval() return boundEvaluator.getBound()
def translate(formula, kind='both', norm=False, optimize=True): '''Converts a TWTL formula into an FSA. It can returns both a normal FSA or the automaton corresponding to the relaxed infinity version of the specification. If kind is: (a) DFAType.Normal it returns only the normal version; (b) DFAType.Infinity it returns only the relaxed version; and (c) 'both' it returns both automata versions. If norm is True then the bounds of the TWTL formula are computed as well. The functions returns a tuple containing in order: (a) the alphabet; (b) the normal automaton (if requested); (c) the infinity version automaton (if requested); and (d) the bounds of the TWTL formula (if requested). The ``optimize'' flag is used to specify that the annotation data should be optimized. Note that the synthesis algorithm assumes an optimized automaton, while computing temporal relaxations is performed using an unoptimized automaton. ''' if kind == 'both': kind = [DFAType.Normal, DFAType.Infinity] elif kind in [DFAType.Normal, DFAType.Infinity]: kind = [kind] else: raise ValueError('DFA type must be either DFAType.Normal, ' + 'DFAType.Infinity or "both"! {} was given!'.format(kind)) lexer = twtlLexer(ANTLRStringStream(formula)) lexer.setAlphabet(set()) tokens = CommonTokenStream(lexer) parser = twtlParser(tokens) phi = parser.formula() # CommonTree t = phi.tree alphabet = lexer.getAlphabet() result= [alphabet] if DFAType.Normal in kind: setDFAType(DFAType.Normal) nodes = CommonTreeNodeStream(t) nodes.setTokenStream(tokens) translator = twtl2dfa(nodes) translator.props = alphabet translator.eval() dfa = translator.getDFA() dfa.kind = DFAType.Normal result.append(dfa) if DFAType.Infinity in kind: setDFAType(DFAType.Infinity) setOptimizationFlag(optimize) nodes = CommonTreeNodeStream(t) nodes.setTokenStream(tokens) translator = twtl2dfa(nodes) translator.props = alphabet translator.eval() dfa_inf = translator.getDFA() dfa_inf.kind = DFAType.Infinity result.append(dfa_inf) if norm: # compute TWTL bound nodes = CommonTreeNodeStream(t) nodes.setTokenStream(tokens) boundEvaluator = bound(nodes) boundEvaluator.eval() result.append(boundEvaluator.getBound()) if logging.getLogger().isEnabledFor(logging.DEBUG): for mode, name in [(DFAType.Normal, 'Normal'), (DFAType.Infinity, 'Infinity')]: if mode not in kind: continue elif mode == DFAType.Normal: pdfa = dfa else: pdfa = dfa_inf logging.debug('[spec] spec: {}'.format(formula)) logging.debug('[spec] mode: {} DFA: {}'.format(name, pdfa)) if mode == DFAType.Infinity: logging.debug('[spec] tree:\n{}'.format(pdfa.tree.pprint())) logging.debug('[spec] No of nodes: {}'.format(pdfa.g.number_of_nodes())) logging.debug('[spec] No of edges: {}'.format(pdfa.g.number_of_edges())) return tuple(result)
def learn_policy(self, identifier): ''' Learn a policy file. Args: identifier: a string, either a path to a policy file or the text of the policy itself. Keep in mind a policy can be comprised of more than one policy file (a file containing valid policy DSL) or string containing policy DSL. This way you break your rule set, imports, and policy attributes across any number of files. See reason-method for more. Returns: The resulting File Node. Raises: ValueError: if the policy already exists in knowledge. TypeError: if parameter 'identifier' is a NoneType, or is not a string representing either a file path to a policy or the text of the policy itself. ''' isFile = False if identifier: if isinstance(identifier, basestring): if not os.path.isfile(identifier): ''' Try treating 'identifier' as a String containing the text of a policy. ''' stream = ANTLRStringStream(identifier) lexer = PolicyLexer(stream) tokens = CommonTokenStream(lexer) tokens.discardOffChannelTokens = True indentedSource = PolicyTokenSource(tokens) tokens = CommonTokenStream(indentedSource) parser = PolicyParser(tokens) with RedirectStdError() as stderr: try: # ANTL3 may raise an exception, and doing so the stderror # will not be printed hiding the underlying problem. GRRR!!!! file_node = parser.file() except Exception as e: if stderr.getvalue().rstrip() != "": trace = sys.exc_info()[2] raise Exception(stderr.getvalue().rstrip()), None, trace else: raise e # Some times the previous parser.file() will print to stderr, # but not throw an exception. In this case, the parser may # attempt to correct and continue onward, but we should # print the msg to stderr for the benefit of the policy # author if stderr.getvalue().rstrip() != "": print >> sys.stderr, stderr.getvalue().rstrip() else: ''' Try treating 'identifier' as a file path ''' if Intellect.filepath_regex.match(identifier): if os.path.exists(identifier): self.log("Learning policy from file path: {0}".format(identifier)) stream = FileStream(identifier) isFile = True else: raise IOError, "Policy not found: {0}".format(identifier) else: ''' assume the intention was to pass 'identifier' as a String containing the text of a policy, and raise the exception. ''' raise e lexer = PolicyLexer(stream) tokens = CommonTokenStream(lexer) tokens.discardOffChannelTokens = True indentedSource = PolicyTokenSource(tokens) tokens = CommonTokenStream(indentedSource) parser = PolicyParser(tokens) with RedirectStdError() as stderr: try: # ANTL3 may raise an exception, and doing so the stderror # will not be printed hiding the underlying problem. GRRR!!!! file_node = parser.file() except Exception as e: if stderr.getvalue().rstrip() != "": trace = sys.exc_info()[2] raise Exception(stderr.getvalue().rstrip()), None, trace else: raise e # Some times the previous parser.file() will print to stderr, # but not throw an exception. In this case, the parser may # attempt to correct and continue onward, but we should # print the msg to stderr for the benefit of the policy # author if stderr.getvalue().rstrip() != "": print >> sys.stderr, stderr.getvalue().rstrip() # set path attribute file_node.path = identifier if isFile else None # associate the path to all descendants file_node.set_file_on_descendants(file_node, file_node) try: # determine if the policy already exists in knowledge self.policy.files.index(file_node) raise ValueError, "Policy already exists in knowledge: {0}".format(identifier) except: pass # store add the policy file to the policy self.policy.append_child(file_node) self.log("learned a policy file") return file_node else: raise TypeError, "parameter 'identifier' must be a string, either a file path to a policy or the text of the policy itself" else: raise TypeError, "parameter 'identifier' cannot be a NoneType."
def learn_policy(self, identifier): ''' Learn a policy file. Args: identifier: a string, either a URL to a policy file or the text of the policy itself. Keep in mind a policy can be comprised of more than one policy file (a file containing valid policy DSL) or string containing policy DSL. This way you break your rule set, imports, and policy attributes across any number of files. See reason-method for more. Returns: The resulting File Node. Raises: ValueError: if the policy already exists in knowledge. TypeError: if parameter 'identifier' is a NoneType, or is not a string representing either a file path to a policy or the text of the policy itself. ''' is_file = False if identifier: if isinstance(identifier, basestring): if urlparse(identifier).scheme: # Treat 'identifier' as an URL self.log("Learning policy from URL: {0}".format(identifier)) stream = ANTLRStringStream(Intellect.policy_from(identifier)) is_file = True else: #Treat 'identifier' as policy string self.log("Learning policy from string") stream = ANTLRStringStream(identifier) lexer = PolicyLexer(stream) tokens = CommonTokenStream(lexer) tokens.discardOffChannelTokens = True indented_source = PolicyTokenSource(tokens) tokens = CommonTokenStream(indented_source) parser = PolicyParser(tokens) with RedirectStdError() as stderr: try: # ANTL3 may raise an exception, and doing so the stderror # will not be printed hiding the underlying problem. GRRR!!!! file_node = parser.file() except Exception as e: if stderr.getvalue().rstrip() != "": trace = sys.exc_info()[2] raise Exception(stderr.getvalue().rstrip()), None, trace else: raise e # The ANTLR3 Recognizer class prints a number of ANTLR3 Exceptions to # stderr vice throwing an exception, because it will try to recover and # continue parsing. # # In the case of NoViableAltException, I've chosen to raise an # exception. # # Otherwise, all the other error message that Recognizer writes to # stderr will be returned for the benefit of the policy author. if stderr.getvalue().rstrip() != "": # check for stderror msg indicating an NoViableAltException occured. # if did raise an exception with the stderror message. if "no viable alternative at input" in stderr.getvalue().rstrip(): raise Exception("Error parsing policy: {0}\n{1}".format(identifier, stderr.getvalue().rstrip())) else: print >> sys.stderr, stderr.getvalue().rstrip() # set path attribute file_node.path = identifier if is_file else None # associate the path to all descendants file_node.set_file_on_descendants(file_node, file_node) try: # determine if the policy already exists in knowledge self.policy.files.index(file_node) raise ValueError("Policy already exists in knowledge: {0}".format(identifier)) except: pass # store add the policy file to the policy self.policy.append_child(file_node) self.log("learned a policy file") return file_node else: raise TypeError("parameter 'identifier' must be a string, either a file path to a policy or the text of the policy itself") else: raise TypeError("parameter 'identifier' cannot be a NoneType.")
def learn_policy(self, identifier): ''' Learn a policy file. Args: identifier: a string, either a path to a policy file or the text of the policy itself. Keep in mind a policy can be comprised of more than one policy file (a file containing valid policy DSL) or string containing policy DSL. This way you break your rule set, imports, and policy attributes across any number of files. See reason-method for more. Returns: The resulting File Node. Raises: ValueError: if the policy already exists in knowledge. TypeError: if parameter 'identifier' is a NoneType, or is not a string representing either a file path to a policy or the text of the policy itself. ''' if identifier: if isinstance(identifier, basestring): isFile = False try: ''' Try treating 'identifier' as a String containing the text of a policy. ''' stream = ANTLRStringStream(identifier) lexer = PolicyLexer(stream) tokens= CommonTokenStream(lexer) tokens.discardOffChannelTokens = True indentedSource = PolicyTokenSource(tokens) tokens = CommonTokenStream(indentedSource) parser = PolicyParser(tokens) with IO.capture_stderr() as stderr: file_node = parser.file() if stderr.getvalue() != "": raise Exception, "Error in String-based policy: {0}".format(stderr.getvalue()) except Exception as e: ''' Try treating 'identifier' as a file path ''' if Intellect.filepath_regex.match(identifier): if os.path.exists(identifier): self.log("Learning policy from file path: {0}".format(identifier)) stream = FileStream(identifier) isFile = True else: raise IOError, "Policy not found: {0}".format(identifier) else: ''' assume the intention was to pass 'identifier' as a String containing the text of a policy, and raise the exception. ''' raise e lexer = PolicyLexer(stream) tokens= CommonTokenStream(lexer) tokens.discardOffChannelTokens = True indentedSource = PolicyTokenSource(tokens) tokens = CommonTokenStream(indentedSource) parser = PolicyParser(tokens) with IO.capture_stderr() as stderr: file_node = parser.file() if stderr.getvalue() != "": raise Exception, "Error in file-based policy for path {0}: {1}".format(identifier, stderr.getvalue()) # set path attribute file_node.path = identifier if isFile else None # associate the path to all descendants file_node.set_file_on_descendants(file_node, file_node) try: # determine if the policy already exists in knowledge self.policy.files.index(file_node) raise ValueError, "Policy already exists in knowledge: {0}".format(identifier) except: pass # store add the policy file to the policy self.policy.append_child(file_node) return file_node else: raise TypeError, "parameter 'identifier' must be a string, either a file path to a policy or the text of the policy itself" else: raise TypeError, "parameter 'identifier' cannot be a NoneType."
from antlr3 import ANTLRInputStream, CommonTokenStream, RecognitionException try: print sys.argv[1] f = open(sys.argv[1]) except IOError as e: print "I/O error({0}): {1}".format(e.errno, e.strerror) sys.exit() except: print "Unexpected error:", sys.exc_info()[0] sys.exit() try: char_stream = ANTLRInputStream(f) lexer = jedLexer(char_stream) tokens = CommonTokenStream(lexer) parser = jedParser(tokens) res = parser.eval() bits = res['fuses'] print "0s", bits.count(0) print "1s", bits.count(1) import csv from bitarray import bitarray mapf = open( '/media/F02472C324728BFA/Xilinx/14.7/ISE_DS/ISE/xbr/data/xc2c256.map') reader = csv.reader(mapf, delimiter='\t') mapdata = [row for row in reader] for i in range(len(mapdata[0])):