Example #1
0
def L_AND_S(rules, text):
    try:
        lex = Lexer(text)
        lex.run(show_states=False, show_spaces=False)
        #lex.show()

        table = Run(rules,
                    lr_letter="LR",
                    ft_letter="FR",
                    word="",
                    show_all=False)

        runner = Runner_cl(1, table, "$")

        for i in lex.list:
            if i[1] != "new_line" and i[1] != "Comment":
                runner.Run(i[1], i[2])
                #print(runner.pos_stack)

        is_end = runner.Run("$", "end_end")
        if not is_end:
            raise Exception("Промахнулись с концом", runner.current_pos,
                            runner.pos_stack)

    except Exception as e:
        print("Не подходит")
        print(e)
    else:
        print("Подходит")
Example #2
0
    def testParseEdgeList(self):
        # edgeList -> ID | ID '->' edgeList

        # Left off first required ID.
        p = Parser(Lexer(''))
        g = Graph()
        self.assertRaises(SyntaxError, p._parseEdgeList, g)

        # Using non-existing label should create a new vertex.
        g = Graph()
        p = Parser(Lexer('A'))
        p._parseEdgeList(g)
        self.assertEqual(g._vertices['v0'].label, 'A')

        # Using an existing label should not create a new vertex.
        g = Graph()
        g.addVertex(Vertex('u0', 'A'))
        p = Parser(Lexer('A'))
        p._parseEdgeList(g)
        self.assertEqual(g._vertices['u0'].label, 'A')

        # Left off second ID.
        p = Parser(Lexer('A ->'))
        g = Graph()
        self.assertRaises(SyntaxError, p._parseEdgeList, g)

        # Simple transition should create two vertices and connect them.
        g = Graph()
        p = Parser(Lexer('A -> B'))
        p._parseEdgeList(g)
        self.assertEquals(len(g._vertices), 2)
        self.assertEquals(g._vertices['v0'].label, 'A')
        self.assertEquals(g._vertices['v1'].label, 'B')
        self.assertEquals(g._edges['v0'][0].label, 'B')
class Semantica:
    def __init__(self, code):
        self.lex = Lexer()
        self.tokens = self.lex.tokens
        self.parser = Parser(code)
        self.lex.insertSimbols(code, "")
        self.table = self.lex.table
    def buildIncludeStatement(self) -> None:
        self.delmov()
        # verify syntax
        self.checkToks([T_STRING, T_INCLUDER])
        path = self.current_token.value

        if path not in self.includeCache:

            # \see loadRaw
            rawdata = self.loadRaw(path)

            # create tokens from new file, and insert them in this PreProcessor's
            # tokenlist
            lex = Lexer(path, rawdata)

            tokens = lex.getTokens()

            self.includeCache[path] = tokens
            self.includeMulti[path] = False
        elif self.includeMulti[path]:
            tokens = self.includeCache[path]
        else:
            tokens = []
        self.delmov()

        # emplace the new tokens ahead of the current position
        #self.tokens[self.tkidx:self.tkidx] = tokens[:-1]
        self.insertTokens(self.tkidx, self.tkidx, tokens[:-1])
        self.update()
Example #5
0
 def testConsumeLineEnding(self):
     lex = Lexer("\nhello")
     lex._consume()  # consume the newline
     self.assertEqual(lex.lineNum, 2)  # line has increased
     self.assertEqual(lex.charNum, 1)  # charNum as been reset
     self.assertEqual(lex.p, 1)  # p has advanced
     self.assertEqual(lex.c, 'h')  # c is the next character
Example #6
0
def update_co_occurrence():
    if len(sys.argv) != 3:
        print('No Co-Occurrence input')
    else:
        tokens_desired = Lexer(sys.argv[2])
        token_stream = Lexer(sys.argv[1])
        co_occurrence(tokens_desired, token_stream)
Example #7
0
    def rev_input(self, text):
        self.lexer = Lexer(text)
        self.current_token = self.lexer.get_next_token()
        rev_text = ''
        while self.current_token.type is not TokenType.EOF:
            word = self.current_token.value
            # print("Word je " + str(word))
            if self.current_token.type is TokenType.STRING:
                # print('String je')
                self.eat(TokenType.STRING)
                # print("Curr token je " + str(self.current_token.value))
                if word in Library.RESERVED_METHOD_WORDS:
                    # print("Rez rec")
                    try:
                        self.eat(TokenType.LPAREN)
                        word += "(" + self.current_token.value + ")"
                        self.eat(TokenType.STRING)
                        self.eat(TokenType.RPAREN)

                    except Exception:
                        pass
                # print("Appendujem " + str(word))
                rev_text = ' ' + str(word) + rev_text
            else:
                # print("Appendujem " + str(word))
                rev_text = ' ' + str(word) + rev_text
                self.current_token = self.lexer.get_next_token()
        # print(rev_text)
        return rev_text
Example #8
0
    def testParseConfig(self):
        # config -> ID '=' (ID | NUMBER)

        # Missing first ID raises an error.
        l = Lexer('= 123;')
        p = Parser(l)
        self.assertRaises(SyntaxError, p._parseConfig)

        # Missing '=' raises an error.
        l = Lexer('A 123;')
        p = Parser(l)
        self.assertRaises(SyntaxError, p._parseConfig)

        # Missing right-hand side raises an error.
        l = Lexer('A = ;')
        p = Parser(l)
        self.assertRaises(SyntaxError, p._parseConfig)

        # Simple ID=ID.
        l = Lexer('A = B')
        p = Parser(l)
        p._parseConfig()
        self.assertTrue('A' in p.config)
        self.assertEquals(p.config['A'], 'B')

        # Simple ID=NUMBER.
        l = Lexer('C = 123')
        p = Parser(l)
        p._parseConfig()
        self.assertTrue('C' in p.config)
        self.assertEquals(p.config['C'], '123')
Example #9
0
 def __init__(self, path):
     lex = Lexer(path)
     lex.tokenize()
     self.TOKENS = lex.tokenList
     self.INDEX = 0
     tree = Tree()
     self.TREE = tree
Example #10
0
    def testParseConfiguration(self):
        # configuration -> 'configuration' '{' config_list '}'

        # Missing 'configuration' raises an error.
        l = Lexer('{ A = B; }')
        p = Parser(l)
        self.assertRaises(SyntaxError, p._parseConfiguration)

        # Left off '{' raises an error.
        l = Lexer('configuration A = B; }')
        p = Parser(l)
        self.assertRaises(SyntaxError, p._parseConfiguration)

        # Left off '}' raises an error.
        l = Lexer('configuration { A = B;')
        p = Parser(l)
        self.assertRaises(SyntaxError, p._parseConfiguration)

        # Left off config list is completely valid.
        l = Lexer('configuration { }')
        p = Parser(l)
        p._parseConfiguration()
        self.assertEqual(len(p.config), 0)

        # Simple valid configuration.
        l = Lexer('configuration { A = B; }')
        p = Parser(l)
        p._parseConfiguration()
        self.assertEquals(p.config['A'], 'B')
Example #11
0
def test_math_symbols():
    lexer = Lexer('+ - * /')

    assert lexer.next_token() == Token(TokenTypes.ADD)
    assert lexer.next_token() == Token(TokenTypes.SUB)
    assert lexer.next_token() == Token(TokenTypes.MUL)
    assert lexer.next_token() == Token(TokenTypes.DIV)
Example #12
0
def test_next_token():
    lexer = Lexer('1+ 3')

    assert lexer.next_token() == Token(TokenTypes.INT, 1)
    assert lexer.next_token() == Token(TokenTypes.ADD)
    assert lexer.next_token() == Token(TokenTypes.INT, 3)
    assert lexer.next_token() == Token(TokenTypes.EOF)
Example #13
0
def run_ide():
    print(": Welcome to Dymond V0.0.0!")
    print(
        ": Play around a little in this nice IDE type simple statements in and we will process them :)"
    )
    print(": Type exit() to exit")
    user_in = ""
    total_user_in = ""

    while user_in != "exit()":
        user_in = input(">>> ")

        if (user_in == "exit()"):
            break

        try:
            test = total_user_in + user_in

            lexer = Lexer(test)
            se_parser = Parser(lexer, "ide")
            semantic_analyzer = SemanticAnalyzer(se_parser)

            semantic_analyzer.analyze()
            lexer = Lexer(test)
            in_parser = Parser(lexer, "ide")
            semantic_analyzer.current_scope.reset_multi_scope_vars()
            interpreter = Interpreter(in_parser,
                                      semantic_analyzer.current_scope)
            interpreter.interpret()

            if "print(" not in user_in and "input(" not in user_in:
                total_user_in += "\n" + user_in
        except Exception as ex:
            print(ex)
Example #14
0
 def __init__(self, string: str = None, lexer: Lexer = None):
     if str is not None:
         self.__lexer = Lexer(string)
     elif lexer is not None:
         self.__lexer = lexer
     else:
         raise ValueError("String or Lexer object should be passed")
Example #15
0
	def state_act(self, stat, buff, next_state):

		if stat == 4 or stat == 5 or stat == 6 or stat == 9 or stat == 41:
			buff.add_buff(self.curr_symbs[0])

		#print("buff ", buff.get_buff(), " next state ", next_state)
		if  buff.get_buff() == "-" and next_state == 4:
			list_d = ["+", "*", "-", "/", ",", "(", "[", ">", "<", ":=", "-=", "+=", "*=", "/="] 
			#print("last lexem", self.Lexems_arr[-1].get_orig())
			if not (self.Lexems_arr[-1].get_orig() in list_d):
				self.Lexems_arr.append(Lexer(self.curr_p - len(buff.get_buff())+1, "минус", buff.get_buff(), "имя"))
				buff.clear_buff()


		if next_state == 1 or stat == 3:
			if stat == 4:
				type_s = 'число'
			elif stat == 5:
				type_s = 'строка'
			elif stat == 6:
				type_s = 'идентификатор'
			elif stat == 9:
				type_s = 'знак'
			else:
				type_s = stat


			new_lex = buff.get_buff()
			if new_lex != "":
				self.Lexems_arr.append(Lexer(self.curr_p - len(new_lex)+1, type_s, new_lex, "имя"))
				buff.clear_buff()
Example #16
0
def test_input(text: str):
    lexer = Lexer()
    tokens = lexer.tokenize(text)

    print('''
The lexer input was:
{}

The Tokenized output from it was:
{}
'''.format(text, tokens))

    parser = Parser(tokens)
    node_tree = parser.parse()

    print('''
The Parser then created this Node Tree:
{}
'''.format(node_tree))

    runner = Runner(node_tree, infoLevel)
    print('''
The Runner ran the node tree, and came up with this result:
{}
'''.format(runner.run()))
Example #17
0
def test_skip_whitespace():
    lexer = Lexer('1   +3 9')

    assert lexer.next_token() == Token(TokenTypes.INT, 1)
    assert lexer.next_token() == Token(TokenTypes.ADD)
    assert lexer.next_token() == Token(TokenTypes.INT, 3)
    assert lexer.next_token() == Token(TokenTypes.INT, 9)
    assert lexer.next_token() == Token(TokenTypes.EOF)
Example #18
0
 def Parse(self, input, **kwargs):
     self.lexer = Lexer()
     self.lexer.Build(input, **kwargs)
     self.parser = yacc.yacc(module=self, **kwargs)
     program = self.parser.parse(lexer=self.lexer.lexer)
     for command in program:
         #print(command)
         command.line(self)
Example #19
0
 def getTokens(self, filename):
     """ lex file and return list of tokens (tokenizer) """
     lexer = Lexer()
     try:
         tokenizer = lexer.fileScan(filename)
     except Exception, inst:
         sys.stderr.write(str(inst)+'\n')
         sys.exit(1)
Example #20
0
 def __init__(self, filename):
     self._lexer = Lexer(filename)  # Lexical analyzer instance
     self._lexer.analyze()  # Let the lexer do it's thing
     self._command_type = None  # The type of command we're parsing now
     self._symbol = None  # The current a- or l-command symbol
     self._dest = None  # The current c-command dest field
     self._comp = None  # The current c-command comp field
     self._jump = None  # The current c-command jump field
 def __init__(self, parent=None):
     """
     Constructor
     
     @param parent parent widget of this lexer
     """
     QsciLexerSQL.__init__(self, parent)
     Lexer.__init__(self)
Example #22
0
 def __init__(self, content):
     lexer = Lexer()
     lexer.main(content)
     # 要分析的tokens
     self.tokens = lexer.tokens
     # tokens下标
     self.index = 0
     # 最终生成的语法树
     self.tree = SyntaxTree()
 def evaluate(self):
     #lexer
     lexer = Lexer(self.input)
     token_list = lexer.lex()
 
     #parser
     parser = Parser(token_list)
     value = parser.parse()
     return value
Example #24
0
def lexer():
    global content
    lexer = Lexer()
    lexer.main(content)
    buffer = []
    for token in lexer.tokens:
        #print ('(%s, %s)' % (token.type, token.value))
        buffer.append('(%s, %s)' % (token.type, token.value))
    return buffer
Example #25
0
 def __init__(self, file=None, stream=None, encoding="utf-8"):
     self.lexer = Lexer(file, stream, encoding)
     for NodeType in [
             AST.Rule, AST.Define, AST.Pattern, AST.Section,
             AST.SectionReference
     ]:
         setattr(self, NodeType.__name__,
                 self.create_line_wrapper(NodeType))
     self.rules_file = [self.Section('::main::', None)]
Example #26
0
    def __init__(self, program):
        self.lexer = Lexer(program)
        self.tokens = self.lexer.tokenize()

        self.cursor = 0
        if self.tokens:
            self.current = self.tokens[self.cursor]
        else:
            self.current = None
Example #27
0
def run_code(code: str):
    lexer = Lexer()
    tokens = lexer.tokenize(code)

    parser = Parser(tokens)
    node_tree = parser.parse()

    runner = Runner(node_tree)
    return runner.run()
Example #28
0
def main():
    string = sys.stdin.read()
    lexer = Lexer(string)
    while True:
        tmp = lexer.next()
        print(tmp, end=" ")
        if tmp.get_token_type() == Type.EOF:
            break
    print()
Example #29
0
    def testScanString(self):
        lexer = Lexer()
        lexer.input("def string = \"hello, world!\"")
        (tokens, errors) = lexer.allTokens()

        # assert that no error occured
        self.assertEqual(errors, [])

        # assert correct token scanning
        self.assertEqual(types(tokens), 'DEF ID = STRING')
Example #30
0
    def testScanFunction(self):
        lexer = Lexer()
        lexer.input("def f(x) = x")
        (tokens, errors) = lexer.allTokens()

        # assert that no error occured
        self.assertEqual(errors, [])

        # assert correct token scanning
        self.assertEqual(types(tokens), 'DEF ID ( ID ) = ID')
Example #31
0
    def testScanTuple(self):
        lexer = Lexer()
        lexer.input("def t = (a, b)")
        (tokens, errors) = lexer.allTokens()

        # assert that no error occured
        self.assertEqual(errors, [])

        # assert correct token scanning
        self.assertEqual(types(tokens), 'DEF ID = ( ID , ID )')
Example #32
0
    def testScanList(self):
        lexer = Lexer()
        lexer.input("def t = [a, b]")
        (tokens, errors) = lexer.allTokens()

        # assert that no error occured
        self.assertEqual(errors, [])

        # assert correct token scanning
        self.assertEqual(types(tokens), 'DEF ID = [ ID , ID ]')
Example #33
0
    def testScanLambda(self):
        lexer = Lexer()
        lexer.input("def f(x) = lambda(y) x")
        (tokens, errors) = lexer.allTokens()

        # assert that no error occured
        self.assertEqual(errors, [])

        # assert correct token scanning
        self.assertEqual(types(tokens), 'DEF ID ( ID ) = LAMBDA ( ID ) ID')
Example #34
0
    def testScanNegativeInteger(self):
        lexer = Lexer()
        lexer.input("def number = -102")
        (tokens, errors) = lexer.allTokens()

        # assert that no error occured
        self.assertEqual(errors, [])

        # assert correct token scanning
        self.assertEqual(types(tokens), 'DEF ID = INT')
Example #35
0
    def testScanVariable(self):
        lexer = Lexer()
        lexer.input("def x = y")
        (tokens, errors) = lexer.allTokens()

        # assert that no error occured
        self.assertEqual(errors, [])

        # assert correct token scanning
        self.assertEqual(types(tokens), 'DEF ID = ID')
Example #36
0
    def testScanBooleanTrue(self):
        lexer = Lexer()
        lexer.input("def boolean = true")
        (tokens, errors) = lexer.allTokens()

        # assert that no error occured
        self.assertEqual(errors, [])

        # assert correct token scanning
        self.assertEqual(types(tokens), 'DEF ID = BOOL')
 def __init__(self, parent=None):
     """
     Constructor
     
     @param parent parent widget of this lexer
     """
     QsciLexerVHDL.__init__(self, parent)
     Lexer.__init__(self)
     
     self.commentString = QString("--")
 def __init__(self, parent=None):
     """
     Constructor
     
     @param parent parent widget of this lexer
     """
     QsciLexerMakefile.__init__(self, parent)
     Lexer.__init__(self)
     
     self.commentString = QString("#")
     self._alwaysKeepTabs = True
Example #39
0
    def __init__(self, parent=None):
        """
        Constructor
        
        @param parent parent widget of this lexer
        """
        QsciLexerPOV.__init__(self, parent)
        Lexer.__init__(self)

        self.commentString = QString("//")
        self.streamCommentString = {"start": QString("/* "), "end": QString(" */")}
        self.boxCommentString = {"start": QString("/* "), "middle": QString(" * "), "end": QString(" */")}
 def __init__(self, parent=None):
     """
     Constructor
     
     @param parent parent widget of this lexer
     """
     QsciLexerHTML.__init__(self, parent)
     Lexer.__init__(self)
     
     self.streamCommentString = {
         'start' : QString('<!-- '),
         'end'   : QString(' -->')
     }
class TestLinearEquation():
    @classmethod
    def setupClass(self):
        rules = {
        "Number": r"\d+(\.\d+)?",
        "Operator": r"[-+()*]", 
        }
        self.lex = Lexer(rules, False)
        pass
    def TestKnownValues(self):
        """ Tests some linear equation by testing against known output """
        var = ['Cost','PaintA','PaintB','PaintC','Quantity']
        
        tokens = [(group,value) for group, value in self.lex.scan("2PaintA+3PaintB-0.5Cost")]
        par = Parser(tokens, {}, var)
        assert par.parseLinearEquation() == [(2, 1), (3, 2), (-0.5, 0)]
        
        tokens = [(group,value) for group, value in self.lex.scan("5Cost*3.1+(-2)8*Quantity-(-8)PaintA")]
        par = Parser(tokens, {}, var)
        assert par.parseLinearEquation() == [(15.5, 0), (-16, 4), (8, 1)]
        
        tokens = [(group,value) for group, value in self.lex.scan("2(1+0.5)PaintA+33*Quantity-13Cost*(2.5+1)")]
        par = Parser(tokens, {}, var)
        assert par.parseLinearEquation() == [(3.0, 1), (33, 4), (-45.5, 0)]
        
    def TestNonLinearEquations(self):
        """ Tests that non-linear equations are flagged as Malformed Equation """
        var = ['x','y','z']
        test_eqn = [
            "2xy+y-3z",
            "2x+3xyz-8z",
            "x-y-xy+2z"
        ]
        for eqn in test_eqn:
            tokens = [(group,value) for group, value in self.lex.scan(eqn)]
            par = Parser(tokens, {}, var)
            nose.tools.assert_raises(MalformedEquationError, par.parseLinearEquation)
            
    def TestImproperBrackets(self):
        """ Tests that improperly placed brackets in the equation raises MalformedEquationError 
        Though mathematically correct, for the function to work, brackets must be properly placed
        and should not envelop more than one variable"""
        var = ['x','y','z']
        test_eqn = [
            "(2x+y)-3z",
            "(2*4x-(-3)y)-8z",
        ]
        for eqn in test_eqn:
            tokens = [(group,value) for group, value in self.lex.scan(eqn)]
            par = Parser(tokens, {}, var)
            nose.tools.assert_raises(MalformedEquationError, par.parseLinearEquation)
Example #42
0
 def TestVariablesNaming(self):
     """ Numeric unknowns must be preceded by alphabetic unknowns
     can also be phrased as,
     variable names cannot begin with numerical digit.
     Equations to test:
     23x15sin(zy12*100u) + AF1atan(99c)
     """
     lex = Lexer(self.rules)
     test = []
     for g,v in lex.scan("23x15sin(zy12*100u) + AF1atan(99c)"):
         if g == 'Unknown' and re.match(r"\d+(\.\d+)?", v):
             assert test[-1][0] == 'Unknown' and re.match(r"^[_a-zA-Z]$",test[-1][1])
         test.append((g,v))
     pass
 def __init__(self, parent=None):
     """
     Constructor
     
     @param parent parent widget of this lexer
     """
     QsciLexerCSS.__init__(self, parent)
     Lexer.__init__(self)
     
     self.commentString = QString("#")
     self.streamCommentString = {
         'start' : QString('/* '),
         'end'   : QString(' */')
     }
Example #44
0
def main():
    var = []
    const = []
    eqn = []
    reserved = ['exp','log','log10','acos','asin','atan','cos','sin','tan','cosh','sinh','tanh']
    
    f = open(r'C:\Stuff\work\inputmv.txt', 'r')
    for line in f:
        if line[-1]=="\n":
            line = line[:-1]
        if line.find('var',0,3) == 0:
            var = line[line.find(' ')+1:].split(',')
        elif line.find('const',0,5) == 0:
            const = line[line.find(' ')+1:].split(',')
        elif len(line) > 0:
            eqn.append(line)
    f.close()

    # Make sure they are sorted by length
    if len(const) > 0:
        const.sort(lambda x, y: len(x)-len(y))
    if len(var) > 0:
        var.sort(lambda x, y: len(x)-len(y))
    for word in reserved:
        if word in var or word in const:
            raise InvalidNameError(word)
    
    rules = {
        "Function": r"(exp|log|log10|acos|asin|atan|cos|sin|tan|cosh|sinh|tanh)\(",
        "Number": r"\d+(\.\d+)?",
        "Operator": r"[-+()*^]", 
    }
 
    lex = Lexer(rules, False)
    out = ['[']
    for equation in eqn:
        tokens = [(group,value) for group, value in lex.scan(equation)]
        par = Parser(tokens, const, var)
        out.append(par.parseTokens())
        out.append(',')
    out.pop()
    out.append(']')
    ret = ''.join(out)
    print ret
    # Rewrite code including safe locals and globals dict
    #f = lambda x: eval(ret)
    #result = fsolve(f,[1,1])
    #print result
    pass
Example #45
0
 def TestFunctionPriority(self):
     """ Function names must be given top preference
         Equations snippets to check:
             exp(check)
             log10(tx)
             sin(ccos(y))
             eexp(22)
     """
     lex = Lexer(self.rules)
     assert [(g,v) for g,v in lex.scan("exp(check)")].pop(0) == ('Function', 'exp(')
     assert [(g,v) for g,v in lex.scan("log10(tx)")].pop(0) == ('Function', 'log10(')
     dualFunc = [(g,v) for g,v in lex.scan("sin(ccos(y))")]
     assert dualFunc[0] == ('Function', 'sin(')
     assert dualFunc[2] == ('Function', 'cos(')
     assert [(g,v) for g,v in lex.scan("eexp(22)")][1] == ('Function', 'exp(')
     pass
Example #46
0
class TestPyPOS(unittest.TestCase):
	def setUp(self):
		'''Tests Setup'''
		self.lexer = Lexer()
		self.tagger = POSTagger()
		self.start = time.time()
	def stringTest(self,string):
		'''Common Testing Function'''
		self.words = self.lexer.lex(string)
		self.tags = self.tagger.tag(self.words)
		self.end = time.time()
		self.difference = self.end - self.start
		for tag in self.tags:
			print " / ".join(tag)
	def test_1_Short(self):
		'''Test Short String'''
		global shortTestString
		self.stringTest(shortTestString)
	def test_2_Long(self):
		'''Test Long String'''
		global testString
		self.stringTest(testString)
	def tearDown(self):
		print "Tokenized and tagged %s words in %s seconds" % (len(self.words),self.difference)
		print "Running time at test end was: %s seconds" % (time.time() - STARTTIME)
 def setupClass(self):
     rules = {
     "Number": r"\d+(\.\d+)?",
     "Operator": r"[-+()*]", 
     }
     self.lex = Lexer(rules, False)
     pass
 def setupClass(self):
     rules = {
         "Function": r"(exp|log|log10|acos|asin|atan|cos|sin|tan|cosh|sinh|tanh)\(",
         "Number": r"\d+(\.\d+)?",
         "Operator": r"[-+()*^]", 
     }
     self.lex = Lexer(rules)
     pass
Example #49
0
 def TestNumbersTokens(self):
     """ Numeric constants cannot appear immediately after unknowns.
     They must be seperated by either * or () or any other operator
     In other words:
     Numerical constants must not be preceded by unknowns.
     They can follow only after operator or functions.
     Equation to test:
     12xy2*15cz(2.54) + tan(23y)18c19(0.01)
     """
     lex = Lexer(self.rules)
     test = []
     for g,v in lex.scan("12xy2*15cz(2.54) + tan(23y)18c19(0.01)"):
         if g == 'Number':
             if len(test) > 0:
                 assert test[-1] != 'Unknown'
         test.append((g,v))
     pass
Example #50
0
 def TestCaseInsensitivity(self):
     """ Function names should be case-insensitive
     exp() = Exp() = EXP()
     Equations to test:
     aExp(bX) + SIN(theta) + cos(z)
     dLog(LOG(x)) + hcosH(y)
     """
     lex = Lexer(self.rules, False)
     t1 = [(g,v) for g,v in lex.scan("aExp(bX) + SIN(theta) + cos(z)")]
     assert t1[1] == ('Function', 'Exp(')
     assert t1[6] == ('Function', 'SIN(')
     assert t1[14] == ('Function', 'cos(')
     t2 = [(g,v) for g,v in lex.scan("dLog(LOG(x)) + hcosH(y)")]
     assert t2[1] == ('Function', 'Log(')
     assert t2[2] == ('Function', 'LOG(')
     assert t2[8] == ('Function', 'cosH(')
     pass
     
 def keywords(self, kwSet):
     """
     Public method to get the list of keywords.
     
     @param kwSet number of keyword set (integer, one based)
     @return list of supported keywords (string) or None
     """
     
     return Lexer.keywords(self, kwSet)
Example #52
0
 def __init__(self, parent=None):
     """
     Constructor
     
     @param parent parent widget of this lexer
     """
     QsciLexerD.__init__(self, parent)
     Lexer.__init__(self)
     
     self.commentString = QString("//")
     self.streamCommentString = {
         'start' : QString('/+ '),
         'end'   : QString(' +/')
     }
     self.boxCommentString = {
         'start'  : QString('/* '),
         'middle' : QString(' * '),
         'end'    : QString(' */')
     }