def test_parse_will_build_an_if_statement_with_expression_inside(self):
        """
                if
            /       \
            (       {
           ==       |
          / \       |
        2    3      =
                  /   \
                 x     *
                     /   \
                    2    3
        :return:
        """
        lexer = LexerStateMachine(' if ( 2 == 3 ) { x = 2 * 3 ; } ', self.context)
        parser = Parser(lexer, self.manager)
        self.manager.setParser(parser)

        token = parser.parse(0)
        self.assertEqual('if', token.id)
        self.assertEqual('(', token.data[0].id)
        self.assertEqual('==', token.data[0].data[0].id)
        self.assertEqual(2, token.data[0].data[0].data[0].data[0])
        self.assertEqual(3, token.data[0].data[0].data[1].data[0])
        self.assertEqual('{', token.data[1][0].id)
        self.assertEqual('=', token.data[1][0].data[0].id)
        self.assertEqual('x', token.data[1][0].data[0].data[0].data[0])
        self.assertEqual('*', token.data[1][0].data[0].data[1].id)
        self.assertEqual(2, token.data[1][0].data[0].data[1].data[0].data[0])
        self.assertEqual(3, token.data[1][0].data[0].data[1].data[1].data[0])
    def test_parse_will_build_an_if_AST_that_contain_expression(self):
        """
                if
            /       \
            (       {
            |
           ==
          / \
        2    3
        :return:
        """
        lexer = LexerStateMachine(' if ( 2 == 3 ) { 5 * 6 ; } ', self.context)
        parser = Parser(lexer, self.manager)
        self.manager.setParser(parser)

        token = parser.parse(0)

        self.assertEqual('if', token.id)
        self.assertEqual('(', token.data[0].id)
        self.assertEqual('==', token.data[0].data[0].id)
        self.assertEqual(2, token.data[0].data[0].data[0].data[0])
        self.assertEqual(3, token.data[0].data[0].data[1].data[0])
        self.assertEqual('{', token.data[1][0].id)
        self.assertEqual('*', token.data[1][0].data[0].id)
        self.assertEqual(5, token.data[1][0].data[0].data[0].data[0])
        self.assertEqual(6, token.data[1][0].data[0].data[1].data[0])
    def test_parse_will_build_an_if_else_AST(self):
        """
                if      -  else
            /       \        |
            (       {        {
            |
           ==
          / \
        2    3
        :return:
        """
        lexer = LexerStateMachine(' if ( 2 == 3 ) { }\
                        else { } ', self.context)
        parser = Parser(lexer, self.manager)
        self.manager.setParser(parser)

        token = parser.parse(0)


        self.assertEqual('if', token.id)
        self.assertEqual('(', token.data[0].id)
        self.assertEqual('==', token.data[0].data[0].id)
        self.assertEqual(2, token.data[0].data[0].data[0].data[0])
        self.assertEqual(3, token.data[0].data[0].data[1].data[0])
        self.assertEqual('{', token.data[1][0].id)
        self.assertEqual('else', token.data[2].id)
        self.assertEqual('{', token.data[2].data[0][0].id)
    def test_parse_nested_do_while_loop(self):
        lexer = LexerStateMachine('do { x = 2 + 3 ; do y = 4 + 5 ; while ( 1 ) ; z = 6 - 7 ; } while ( 1 ) ;', self.context)
        parser = Parser(lexer, self.manager)
        self.manager.setParser(parser)

        token = parser.parse(0)

        self.assertEqual('do', token.id)
        self.assertEqual(1, token.data[0].data[0])
        self.assertEqual('{', token.data[1][0].id)
        self.assertEqual('=', token.data[1][0].data[0].id)
        self.assertEqual('x', token.data[1][0].data[0].data[0].data[0])
        self.assertEqual('+', token.data[1][0].data[0].data[1].id)
        self.assertEqual(2, token.data[1][0].data[0].data[1].data[0].data[0])
        self.assertEqual(3, token.data[1][0].data[0].data[1].data[1].data[0])

        self.assertEqual('do', token.data[1][0].data[1].id)
        self.assertEqual(1, token.data[1][0].data[1].data[0].data[0])
        self.assertEqual('=', token.data[1][0].data[1].data[1][0].id)
        self.assertEqual('y', token.data[1][0].data[1].data[1][0].data[0].data[0])
        self.assertEqual('+', token.data[1][0].data[1].data[1][0].data[1].id)
        self.assertEqual(4, token.data[1][0].data[1].data[1][0].data[1].data[0].data[0])
        self.assertEqual(5, token.data[1][0].data[1].data[1][0].data[1].data[1].data[0])

        self.assertEqual('=', token.data[1][0].data[2].id)
        self.assertEqual('z', token.data[1][0].data[2].data[0].data[0])
        self.assertEqual('-', token.data[1][0].data[2].data[1].id)
        self.assertEqual(6, token.data[1][0].data[2].data[1].data[0].data[0])
        self.assertEqual(7, token.data[1][0].data[2].data[1].data[1].data[0])
    def test_parse_while_1_do_few_statements(self):
        """
            while
            /   \
           1     {
                 |- 2 + 3
                 |- i
                 |- j
        :return:
        """
        lexer = LexerStateMachine('while ( 1 ) { 2 + 3 ; i ; j ; }', self.context)
        parser = Parser(lexer, self.manager)
        self.manager.setParser(parser)

        token = parser.parse(0)

        self.assertEqual('while', token.id)
        self.assertEqual('(literal)', token.data[0].id)
        self.assertEqual(1, token.data[0].data[0])
        self.assertEqual('{', token.data[1][0].id)
        self.assertEqual('+', token.data[1][0].data[0].id)
        self.assertEqual('(literal)', token.data[1][0].data[0].data[0].id)
        self.assertEqual(2, token.data[1][0].data[0].data[0].data[0])
        self.assertEqual('(literal)', token.data[1][0].data[0].data[1].id)
        self.assertEqual(3, token.data[1][0].data[0].data[1].data[0])
        self.assertEqual('(identifier)', token.data[1][0].data[1].id)
        self.assertEqual('i', token.data[1][0].data[1].data[0])
        self.assertEqual('(identifier)', token.data[1][0].data[2].id)
        self.assertEqual('j', token.data[1][0].data[2].data[0])
    def test_injectRegisterRequired_will_give_min_and_max_register_to_each_of_the_token(self):
        lexer = LexerStateMachine('{ x = y + 8 * 16 / 180 - 20 ; }', self.context)
        parser = Parser(lexer, self.manager)
        self.manager.setParser(parser)

        token = parser.parseStatement(0)
        if token[0].id == '{':
            token = token[0].data[0]
        self.informationInjector.injectRegisterRequired(token)

        self.assertEqual(1, token.data[0].maxRequiredRegister)
        self.assertEqual(1, token.data[0].minRequiredRegister)
        self.assertEqual(6, token.maxRequiredRegister)
        self.assertEqual(2, token.minRequiredRegister)
        self.assertEqual(5, token.data[1].maxRequiredRegister)
        self.assertEqual(2, token.data[1].minRequiredRegister)
        self.assertEqual(1, token.data[1].data[1].maxRequiredRegister)
        self.assertEqual(1, token.data[1].data[1].minRequiredRegister)
        self.assertEqual(4, token.data[1].data[0].maxRequiredRegister)
        self.assertEqual(2, token.data[1].data[0].minRequiredRegister)
        self.assertEqual(1, token.data[1].data[0].data[0].maxRequiredRegister)
        self.assertEqual(1, token.data[1].data[0].data[0].minRequiredRegister)
        self.assertEqual(3, token.data[1].data[0].data[1].maxRequiredRegister)
        self.assertEqual(2, token.data[1].data[0].data[1].minRequiredRegister)
        self.assertEqual(2, token.data[1].data[0].data[1].data[0].maxRequiredRegister)
        self.assertEqual(2, token.data[1].data[0].data[1].data[0].minRequiredRegister)
        self.assertEqual(1, token.data[1].data[0].data[1].data[1].maxRequiredRegister)
        self.assertEqual(1, token.data[1].data[0].data[1].data[1].minRequiredRegister)
        self.assertEqual(1, token.data[1].data[0].data[1].data[0].data[0].maxRequiredRegister)
        self.assertEqual(1, token.data[1].data[0].data[1].data[0].data[0].minRequiredRegister)
        self.assertEqual(1, token.data[1].data[0].data[1].data[0].data[1].maxRequiredRegister)
        self.assertEqual(1, token.data[1].data[0].data[1].data[0].data[1].minRequiredRegister)
    def test_parse_will_build_an_if_statement_without_the_brace(self):
        """
                if
            /       \
           ==       {
          / \       |
        2    3      =
                  /   \
                 x     *
                     /   \
                    2    3
        :return:
        """

        lexer = LexerStateMachine('if ( 2 == 3 ) x = 2 * 3 ; ', self.context)
        parser = Parser(lexer, self.manager)
        self.manager.setParser(parser)
        token = parser.parse(0)

        self.assertEqual('if', token.id)
        self.assertEqual('(', token.data[0].id)
        self.assertEqual('==', token.data[0].data[0].id)
        self.assertEqual(2, token.data[0].data[0].data[0].data[0])
        self.assertEqual(3, token.data[0].data[0].data[1].data[0])
        self.assertEqual('=', token.data[1][0].id)
        self.assertEqual('x', token.data[1][0].data[0].data[0])
        self.assertEqual('*', token.data[1][0].data[1].id)
        self.assertEqual(2, token.data[1][0].data[1].data[0].data[0])
        self.assertEqual(3, token.data[1][0].data[1].data[1].data[0])
Exemple #8
0
def parse_asm_file(file_name):
    """ Gets the commands list using the parser and scans it twice
        first time searching for labels, second time uses the code to translate
        the A and C commands to machine code.
        Adds the machine code to a new .hack file
        Input: file_name - the .asm file needed to be translated
        Output: the translated file_name.hack file
    """
    line = 0
    symbols_table = SymbolsTable()
    hack_lines = []
    Parser.parse(file_name)
    # First pass
    for command in Parser.get_commands():
        if command.type == Command.L_COMMAND:
            symbols_table.add_label(command.content, line)
        else:
            line += 1
    # Second pass
    for command in Parser.get_commands():
        if command.type == Command.A_COMMAND:
            if not str(command.content).isnumeric():
                if not symbols_table.contains(command.content):
                    # a new variable
                    symbols_table.add_variable(command.content)
                command.content = symbols_table.get_address(command.content)
        elif command.type == Command.L_COMMAND:
            continue
        hack_lines.append(Code.code(command))

    #writes the hack file
    with open(file_name[:-ASM_SUFF_LEN] + HACK_SUFF, mode=W_FILE_MODE, encoding=DEF_ENCODING) as hack_file:
        for line in hack_lines:
            hack_file.write('%s\n' % line)
Exemple #9
0
  def testGLHeaderParsing(self):
    header = open("data/gl-cpp.h").read()
    
    functions = re.findall(r".+\s(\w+)\s*\(.+\);", header)
    
    l = Parser.parseSource(header)
    assert len(l.functions) == len(functions)

    # check that all functions are there    
    for f in functions:
      assert f in l.functions
      
    # check that nothing extra is there
    for f in l.functions.keys():
      assert f in functions
      
    # read in the ordinals
    defFile = open("data/opengles11u.def").read()
    
    # allow missing functions since we don't have the EGL header
    for function, ordinal in Parser.parseDefFile(defFile):
      if function in l.functions:
        l.functions[function].ordinal = ordinal
    
    # make sure all functions have ordinals
    for f in l.functions.values():
      # extensions don't have ordinals
      if f.name.endswith("OES"): continue
      assert f.ordinal is not None
Exemple #10
0
def initialize():
    anotacoes = parser.parseAnnotation()
    dominios = parser.parseMapas()
    participantes = anotacoes[1]
    targets = {"1":["rest3"],"2":["cafe1"],"3":["drug3"],"4":["chur3"],"5":["pub1"],"6":["chur2","chur3"],"7":["rest1","rest2"],"8":["drug2","drug3"],"9":["drug3","drug4"],"10":["rest4","rest5"],"11":["rest3"],"12":["cafe1"],"13":["rest4","rest5"],"14":["chur3"],"15":["pub1"],"16":["chur2","chur3"],"17":["rest1","rest2"],"18":["drug2","drug3"],"19":["drug3","drug4"],"20":["rest4","rest5"]}
    atributos = ["type","in","name","next-to","in-front-of","other","right-to","left-to","behind"]
    return dominios, targets, anotacoes[0], atributos, participantes
Exemple #11
0
class TestParser_Assign(unittest.TestCase):
	def setUp(self):
		self.stateProvider = InternalStateProvider()
		self.parser = Parser(self.stateProvider)
	def tearDown(self):
		self.stateProvider.clearState()

	def test_giveniAnd5_Assigns5Toi(self):
		self.parser.assign("i", "5")
		result = self.stateProvider.internalVariables["i"]
		self.assertEqual(result.value, 5)
	def test_giveniAnd15_Assigns15Toi(self):
		self.parser.assign("i", "15")
		result = self.stateProvider.internalVariables["i"]
		self.assertEqual(result.value, 15)
	def test_givenNameAndString_AssignsStringToName(self):
		self.parser.assign("name", "\"string\"")
		result = self.stateProvider.internalVariables["name"]
		self.assertEqual(result.value, "string")
	def test_givenNameAndString_AssignsToNameTypeOfString(self):
		self.parser.assign("name", "\"string\"")
		result = self.stateProvider.internalVariables["name"]
		self.assertEqual(result.type, "string")
	def test_givenNameAndDoubleQuotes_AssignsDoubleQuotesToName(self):
		self.parser.assign("name", "\"\\\"\\\"\"")
		result = self.stateProvider.internalVariables["name"]
		self.assertEqual(result.value, "\\\"\\\"")
	def run(self):
		tst = Lexer("")
		prs = Parser(tst)
		
		while True:
			try:
				compound = 0
				tst.flush()
				text = ""
				descend = False;
				text = input('mpr> ')
				if '{' in text:
					descend = True;
					compound += 1
				while compound > 0 or (text != "" and text[-1] != ';' and text[-1] != '}'):
					inpt = input('...  ')
					if '{' in inpt:
						compound += 1
					if '}' in inpt:
						compound -= 1
					text += inpt
			except EOFError:
				break;
			tst.append(text)
			try:
				self.interpret(descend, prs.compound())
			except ValueError as err:
				print(err)
			except SyntaxError as err:
				print(err)
			except TypeError as err:
				print(err)
			except KeyError as err:
				print("Variable {var} not defined!".format(var=err))
Exemple #13
0
    def test_cmp_lt(self):
        scanner = Scanner('a > b')
        scanner.lex()

        parser = Parser(scanner.tokens)
        ast = parser.expr()
        self.assertEqual(ast, test_asts['a > b'])
	def testParams(self):
		self.assertEqual(Parser.parse_800_params("SEC_CODES|DEVICE=android| \
						MODEL=sdk|APP_VERSION=1.6.3|RELEASE_MODE=production| \
						OS_VERSION=4-1.6|APP_ID=?"),{'DEVICE':'android', \
						'MODEL':'sdk', 'APP_VERSION':'1.6.3', 'RELEASE_MODE' \
						:'production','OS_VERSION':'4-1.6','APP_ID':'?'})
		self.assertEqual(Parser.parse_800_params(""),{})
    def test_byteCodeGenerator_will_generate_code_for_the_while_loop_that_contain_no_statements(self):
        lexer = LexerStateMachine('while (x+ (3 - 4 ) * 100 == 200 / 100 * (310 -400) + 120) { } ', self.context)
        parser = Parser(lexer, self.manager)
        self.manager.setParser(parser)
        token = parser.parse(0)
        self.byteCodeGenerator.variablesInThisAST['x'] = 4

        self.informationInjector.injectRegisterRequired(token)
        self.byteCodeGenerator.initGeneration()
        byteCodes = token.generateByteCode()
        byteCodes = self.byteCodeGenerator.injectPrologue(byteCodes)
        self.assertEqual(self.byteCodeGenerator.loadValue([0, 3]),byteCodes[0])
        self.assertEqual(self.byteCodeGenerator.loadValue([5, 4]),byteCodes[1])
        self.assertEqual(self.byteCodeGenerator.subRegister([0, 0, 5]),byteCodes[2])
        self.assertEqual(self.byteCodeGenerator.loadValue([5, 100]),byteCodes[3])
        self.assertEqual(self.byteCodeGenerator.multiplyRegister([0, 0, 5]),byteCodes[4])
        self.assertEqual(self.byteCodeGenerator.loadRegister([5, 7, 4]),byteCodes[5])
        self.assertEqual(self.byteCodeGenerator.addRegister([0, 5, 0]),byteCodes[6])
        self.assertEqual(self.byteCodeGenerator.loadValue([1, 200]),byteCodes[7])
        self.assertEqual(self.byteCodeGenerator.loadValue([5, 100]),byteCodes[8])
        self.assertEqual(self.byteCodeGenerator.divideRegister([1, 1, 5]),byteCodes[9])
        self.assertEqual(self.byteCodeGenerator.loadValue([2, 310]),byteCodes[10])
        self.assertEqual(self.byteCodeGenerator.loadValue([5, 400]),byteCodes[11])
        self.assertEqual(self.byteCodeGenerator.subRegister([5, 2, 5]),byteCodes[12])
        self.assertEqual(self.byteCodeGenerator.multiplyRegister([1, 1, 5]),byteCodes[13])
        self.assertEqual(self.byteCodeGenerator.loadValue([5, 120]),byteCodes[14])
        self.assertEqual(self.byteCodeGenerator.addRegister([5, 1, 5]),byteCodes[15])
        self.assertEqual(self.byteCodeGenerator.compareIfEqual([0, 0, 5]),byteCodes[16])
        self.assertEqual(self.byteCodeGenerator.branchIfTrue([0,1]),byteCodes[17])
        self.assertEqual(self.byteCodeGenerator.branch([0]),byteCodes[18])
Exemple #16
0
def parse_vm_file(file_name):
    """ Gets the commands list using the parser and scans it twice
        first time searching for labels, second time uses the code to translate
        the A and C commands to machine code.
        Adds the machine code to a new .asm file
        Input: file_name - the .vm file needed to be translated
        Output: the translated file_name.asm file
    """
    clean_file_name = file_name.split('/')[-1]
    func_name = ''
    delim = ''
    Parser.parse(file_name)
    CodeWriter.set_vm_file(file_name)
    for command in Parser.get_commands():
        if command.type == Parser.CommandType.C_ARITHMETIC:
            CodeWriter.write_arithmetic(command.content[0])
        elif command.type == Parser.CommandType.C_PUSH or \
              command.type == Parser.CommandType.C_POP:
            CodeWriter.write_push_pop(command.type, command.content[1], command.content[2])
        elif command.type == Parser.CommandType.C_LABEL:
            CodeWriter.writeLabel(func_name + delim + command.content[1])
        elif command.type == Parser.CommandType.C_GOTO:
            CodeWriter.writeGoto(func_name + delim + command.content[1])
        elif command.type == Parser.CommandType.C_IF:
            CodeWriter.writeIf(func_name + delim + command.content[1])
        elif command.type == Parser.CommandType.C_CALL:
            CodeWriter.writeCall(command.content[1], command.content[2])
        elif command.type == Parser.CommandType.C_RETURN:
            CodeWriter.writeReturn()
        elif command.type == Parser.CommandType.C_FUNCTION:
            func_name = command.content[1]
            delim = ':'
            CodeWriter.writeFunction(func_name, command.content[2])
Exemple #17
0
def main():
    verbosity = False
    args = sys.argv
    if len(args) == 3 and sys.argv[1] in ["-v", "--verbose"]:
        verbosity = True
        args.pop(1)
    f = open(args[1], 'r')
    program = f.read()
    f.close()
    token_list = Lexer.lex_program(program)
    if verbosity:
        print("Token list:\n", [x["type"] for x in token_list])
    Parser.parse_program(token_list, verbosity)
    cst_root = Tree.Tree({"type": "Program"})
    cst_root.generate_cst(token_list)
    if verbosity:
        print("CST:")
        cst_root.print_tree(1)
        print()
    ast_root = Tree.Tree({"type": "Block"})
    ast_root.generate_ast(cst_root.children[0])
    if verbosity:
        print("AST:")
        ast_root.print_tree(1)
        print()
    symbol_table = Semantics.Scope(ast_root)
    if verbosity:
        symbol_table.print_table(0)
    code = CodeGen.ExecEnv(ast_root, symbol_table)
    print("\nCompilation successful!")
    def test_generateByteCode_will_generate_code_for_do_while_loop_with_statements_inside(self):
        lexer = LexerStateMachine('do{x = 1500;\
                                    y = 2500;\
                                    z = 5500;\
                                    }while(x == 321); ', self.context)
        parser = Parser(lexer, self.manager)
        self.manager.setParser(parser)
        token = parser.parse(0)
        self.byteCodeGenerator.variablesInThisAST['x'] = 4
        self.byteCodeGenerator.variablesInThisAST['y'] = 8
        self.byteCodeGenerator.variablesInThisAST['z'] = 12

        self.informationInjector.injectRegisterRequired(token)
        self.byteCodeGenerator.initGeneration()
        byteCodes = token.generateByteCode()
        byteCodes = self.byteCodeGenerator.injectPrologue(byteCodes)
        self.assertEqual(self.byteCodeGenerator.loadRegister([0, 7, 4]),byteCodes[0])
        self.assertEqual(self.byteCodeGenerator.loadValue([5, 1500]),byteCodes[1])
        self.assertEqual(self.byteCodeGenerator.storeRegister([5, 7, 4]),byteCodes[2])
        self.assertEqual(self.byteCodeGenerator.loadRegister([0, 7, 8]),byteCodes[3])
        self.assertEqual(self.byteCodeGenerator.loadValue([5, 2500]),byteCodes[4])
        self.assertEqual(self.byteCodeGenerator.storeRegister([5, 7, 8]),byteCodes[5])
        self.assertEqual(self.byteCodeGenerator.loadRegister([0, 7, 12]),byteCodes[6])
        self.assertEqual(self.byteCodeGenerator.loadValue([5, 5500]),byteCodes[7])
        self.assertEqual(self.byteCodeGenerator.storeRegister([5, 7, 12]),byteCodes[8])
        self.assertEqual(self.byteCodeGenerator.loadRegister([0, 7, 4]), byteCodes[9])
        self.assertEqual(self.byteCodeGenerator.loadValue([5, 321]), byteCodes[10])
        self.assertEqual(self.byteCodeGenerator.compareIfEqual([0, 0, 5]), byteCodes[11])
        self.assertEqual(self.byteCodeGenerator.branchIfTrue([0, 1]), byteCodes[12])
        self.assertEqual(self.byteCodeGenerator.branch([-14]), byteCodes[13])
Exemple #19
0
def save_features(features_filename,category_location,technique_name,technique_class,technique_method):
    data = load_features(features_filename)
    verifier_code =  vr.verify_write_xml_features(data,category_location,technique_name,technique_class,technique_method)
    if verifier_code == "OK": 
       pr.write_xml_features(features_filename,category_location,technique_name,technique_class,technique_method)

    return verifier_code
    def test_default_context(self):
        manager = ContextManager()
        context = Context(manager)
        defaultContext = DefaultContext(manager)
        flowControlContext = FlowControlContext(manager)
        expressionContext = ExpressionContext(manager)

        manager.addContext('Default', defaultContext)
        manager.addContext('FlowControl', flowControlContext)
        manager.addContext('Expression', expressionContext)
        defaultContext.addKeyword('if')
        defaultContext.addKeyword('while')
        manager.setCurrentContexts([defaultContext])
        lexer = LexerStateMachine('if', context)
        parser = Parser(lexer, manager)
        manager.setParser(parser)
        try:
            token = parser.parseStatement(0)
            self.fail()
        except SyntaxError as e:
            pass
        lexer = LexerStateMachine('123 if', context)
        parser = Parser(lexer, manager)
        manager.setParser(parser)
        try:
            token = parser.parseStatement(0)
            self.fail()
        except SyntaxError as e:
            pass
 def test_parse_will_build_an_AST_for_longer_expression_in_the_brace(self):
     """
         {
         |
         +
       /   \
      2     /
          /  \
         *   9
       /  \
      3    8
     :return:
     """
     lexer = LexerStateMachine(' { 2 + 3 * 8 / 9 ; }', self.context)
     parser = Parser(lexer, self.manager)
     self.manager.setParser(parser)
     token = parser.parseStatements(0)
     self.assertEqual('{', token[0].id)
     self.assertEqual('+', token[0].data[0].id)
     self.assertEqual(2, token[0].data[0].data[0].data[0])
     self.assertEqual('/', token[0].data[0].data[1].id)
     self.assertEqual('*', token[0].data[0].data[1].data[0].id)
     self.assertEqual(3, token[0].data[0].data[1].data[0].data[0].data[0])
     self.assertEqual(8, token[0].data[0].data[1].data[0].data[1].data[0])
     self.assertEqual(9, token[0].data[0].data[1].data[1].data[0])
 def test_parse_will_build_an_ast_for_expressions_in_the_brace(self):
     """
     {----------------------
         |       |         |
         +       *         /
       /   \    /   \    /   \
      2     3  3     4  5     9
     :return:
     """
     lexer = LexerStateMachine('{ 2 + 3 ; \
                     3 * 4 ; \
                     5 / 9 ; \
                     }', self.context)
     parser = Parser(lexer, self.manager)
     self.manager.setParser(parser)
     token = parser.parseStatements(0)
     self.assertEqual('{', token[0].id)
     self.assertEqual('+', token[0].data[0].id)
     self.assertEqual(2, token[0].data[0].data[0].data[0])
     self.assertEqual(3, token[0].data[0].data[1].data[0])
     self.assertEqual('*', token[0].data[1].id)
     self.assertEqual(3, token[0].data[1].data[0].data[0])
     self.assertEqual(4, token[0].data[1].data[1].data[0])
     self.assertEqual('/', token[0].data[2].id)
     self.assertEqual(5, token[0].data[2].data[0].data[0])
     self.assertEqual(9, token[0].data[2].data[1].data[0])
def command_Processer(command):
    
    if command == 'get information' or command == 'get help' or command == 'help':        
        get_help()
        
            
    elif (command == 'start new process') or (command == 'startApp'):
        start_Process()
        
    elif command == 'parse file':        
        file = input(colored.cyan('Enter Name of File: '))
        Parser.parse_file(file)
        
    elif command == 'config settings' or command == 'config auto start' or command == 'open settings':
        settings_config.start_process()
        
    
    else:        
        puts(
        colored.red(command) + 
        colored.cyan(' is not a valid command, please enter a command from the list below:')
        )
        
        puts(
            colored.green('Valid Commands:') + 
            colored.green('\n1. \"get help\" or \"help\" or \"get information\"') + 
            colored.cyan('\n      opens the help menu and provides information on the app') +
            colored.green('\n2. \"start new process\" or \"startApp\"') + 
            colored.cyan('\n      displays incoming data from the gps and allows the user to record incoming data') +
            colored.green('\n3. \"config settings\" or \"open settings\" or \"config auto start\"') +
            colored.cyan('\n      allows the user to change whether or not data is recorded automatically when the pi boots up') + 
            colored.green('\n4. \"exit\"') + 
            colored.cyan('\n      exits out of the Ultimate GPS Tracker')  
            )      
Exemple #24
0
def plot_convergence_iterations_for_data(parsed_data, exp_num):
    convergence_data, _ = Parser.get_convergence_and_distinct_patterns_from_log_v1(parsed_data)
    # for all data: 4 buckets, containing 4x100 data points.

    # convergence_stats = Parser.get_convergence_avgs(convergence_data)
    ds = []
    for i in range(4):
        ds.append([convergence_data[0][i*100:i*100 + 100], convergence_data[1][i*100:i*100 + 100],
                   convergence_data[2][i*100:i*100 + 100], convergence_data[3][i*100:i*100 + 100]])
    average_iters_convergence, sigmas = Parser.get_average_recall_ratios(ds[exp_num])

    # Plotting:
    x = np.asarray([2, 3, 4, 5])
    plt.rcParams.update({'font.size': 25})

    p1 = plt.plot(x, average_iters_convergence, color='b', marker='o', linestyle='--', linewidth=3.0)
    p2 = plt.plot(x, sigmas, color='r', marker='^', linestyle='--', linewidth=3.0)
    # p1 = plt.plot(x, [2.78, 19.02, 15.85, 15.58], color='r', marker='o', linestyle='--')

    plt.ylabel('Average #iterations before convergence')
    plt.xlabel('Set size')
    plt.title('Average #iterations before convergence by set size')
    plt.xticks(x, ('2', '3', '4', '5'))

    plt.legend((p1[0], p2[0]), ('Sync., turnover for every learnt set', 'Standard deviation'))
               # bbox_to_anchor=(0.445, 1))
    plt.grid(True)
    plt.show()
Exemple #25
0
def plot_perfect_recall_rates_for_dg_weightings_no_err_bars(parsed_data, additional_plot_title):
    set_size_buckets = Parser.get_dictionary_list_of_convergence_and_perfect_recall_for_dg_weightings(parsed_data)

    # x, y_iters, std_iters, y_ratios, std_ratios
    x = range(30)
    results_2 = Parser.get_avg_convergence_for_x_and_set_size(2, set_size_buckets, x)
    results_3 = Parser.get_avg_convergence_for_x_and_set_size(3, set_size_buckets, x)
    results_4 = Parser.get_avg_convergence_for_x_and_set_size(4, set_size_buckets, x)
    results_5 = Parser.get_avg_convergence_for_x_and_set_size(5, set_size_buckets, x)

    plt.rcParams.update({'font.size': 25})
    plt.ylabel('Convergence ratio')
    plt.xlabel('Turnover rate')
    plt.title('Average convergence rate by DG-weighting, ' + additional_plot_title)

    p2 = plt.plot(results_2[0], results_2[3])
    p3 = plt.plot(results_3[0], results_3[3])
    p4 = plt.plot(results_4[0], results_4[3])
    p5 = plt.plot(results_5[0], results_5[3])

    plt.legend((p2[0], p3[0], p4[0], p5[0]), ('2x5', '3x5', '4x5', '5x5'))
               # bbox_to_anchor=(1, 0.9), ncol=1, fancybox=True, shadow=True)
    plt.grid(True)
    plt.margins(0.01)

    plt.yticks(np.arange(0, 1.1, .1))

    plt.show()
Exemple #26
0
def tester(testfile):
	"""Ajaa kaikki annetun testitiedoston testit, ja tulostaa virheilmon epäonnistuneista testeistä"""
	skipped_lines = 0
	total_lines = 0
	failed_tests = 0

	file = open(testfile, 'r')
	matcher = None
	regexstr = None
	for line in file:
		total_lines += 1
		arg = line[1:].strip()
		if line[0] == '=': # vaihdetaan käytettävää regexpiä
			(regexstr, expected_postfix) = arg.split('=')

			try:
				parser = Parser(regexstr)
				matcher = DfaMatcher(parser.parse())
			except Exception as e:
				print(str.format("Parsing /{0}/ failed unexpectedly: {1}", regexstr, str(e)))
				failed_tests += 1
				continue
			if expected_postfix != '' and parser.postfix != expected_postfix:
				print(str.format("Parsing regex '{0}' failed: expected postfix '{1}', got '{2}'", regexstr, expected_postfix, parser.postfix))
				failed_tests += 1
		elif line[0] == '+' or line[0] == '-': # koitetaan tunnistaa annettu merkkijono nykyisellä regexpillä
			expected = line[0] == '+'
			if expected != bool(matcher.match(arg)):
				print(str.format("Matching '{0}' against /{1}/ caused unexpected result '{2}' instead of '{3}'", arg, regexstr, expected, not expected))
				failed_tests += 1
		else:
			skipped_lines += 1
	total_tests = total_lines - skipped_lines
	print(str.format("{0}/{1} tests passed.", total_tests - failed_tests, total_tests))
Exemple #27
0
def test_hasVariables():
    import Parser
    pred = "A(X,a,b,c)"
    predf = "A(a,d,e)"
    pred = Parser._parse_pred(pred)
    predf = Parser._parse_pred(predf)
    assert pred.hasVariables() == True
    assert predf.hasVariables() == False
 def test_parseStatement_call_buildScope_when_declaration(self):
     lexer = LexerStateMachine('int x;', self.context)
     parser = Parser(lexer, self.contextManager)
     self.contextManager.setParser(parser)
     mockScopeBuilder = MagicMock(wraps=ScopeBuilder())
     parser.scopeBuilder = mockScopeBuilder
     token = parser.parseStatement(0)
     mockScopeBuilder.buildScope.assert_called_once_with(token[0])
Exemple #29
0
def main():
	most_watch_movie_name=Parser.get_most_watch_movie()
	print("most watch movie")
	print(most_watch_movie_name)
	most_active_user=Parser.get_most_active_user()
	print("most active user")
	print(most_active_user)
	getdata.most_watch_genre()
def run_validation(dominios, folds, featureVector, participantesInput, incluirParticipante):
    [inputs, frequencias, participantes] = [{}, {}, {}]
    
    # Inicializa Vetor de Caracteristicas
    for fold in folds:
        frequencias[fold] = utils.countAttributeFrequencyIndividual(folds, fold)
        participantes[fold] = parser.descriptionsMeans(folds, fold)
        inputs[fold] = parser.parseSVMInput(folds[fold], featureVector, participantes[fold], frequencias[fold], incluirParticipante)
    
    [acertos, total] = [{}, {}]
    
    keys = folds.keys()
    keys.sort()
    for fold in folds:
#             print "Fold: ", fold
#             print 50 * "-"
        resultados = svm.run(inputs, fold)
        
        for combination in resultados.keys():
            if combination not in acertos.keys():
                acertos[combination] = {}
                total[combination] = {}
            
            for svm1 in resultados[combination][0].keys():
                if svm1 not in acertos[combination].keys():
                    acertos[combination][svm1] = 0.0
                    total[combination][svm1] = 0.0
                
                acertos[combination][svm1] = acertos[combination][svm1] + resultados[combination][0][svm1]
                total[combination][svm1] = total[combination][svm1] + resultados[combination][1][svm1]
    
    combinacoes = {}
    acuracias = {}
    for combination in acertos.keys():
        aux = combination.split(",")
        C = float(aux[0])
        gamma = float(aux[1])
        
#             print 20 * "-"
#             print str(C) + " - " + str(gamma)
#             print 20 * "-"
        
        for svm1 in acertos[combination].keys():
            if total[combination][svm1] == 0:
                acuracia = 0
            else:
                acuracia = acertos[combination][svm1] / total[combination][svm1]
#                 print str(svm1) + ": " + str(acuracia)
            if svm1 not in combinacoes.keys():
                acuracias[svm1] = acuracia
                combinacoes[svm1] = {}
                combinacoes[svm1]["C"] = C
                combinacoes[svm1]["gamma"] = gamma
            elif acuracia > acuracias[svm1]:
                acuracias[svm1] = acuracia
                combinacoes[svm1]["C"] = C
                combinacoes[svm1]["gamma"] = gamma
    return combinacoes     
Exemple #31
0
 l = logging.getLogger('locales.' + loc)
 try:
     lst = open(
         Paths.get_path('browser', loc, 'searchplugins/list.txt'),
         'r')
 except IOError:
     l.error("Locale " + loc + " doesn't have search plugins")
     details[Paths.get_path('browser', loc,
                            'searchplugins/list.txt')] = {
                                'error': 'not found'
                            }
     continue
 sets[loc] = {'list': []}
 regprop = Paths.get_path(
     'browser', loc, 'chrome/browser-region/region.properties')
 p = Parser.getParser(regprop)
 p.read(regprop)
 orders = {}
 for key, val in p:
     m = re.match('browser.search.order.([1-9])', key)
     if m:
         orders[val.strip()] = int(m.group(1))
     elif key == 'browser.search.defaultenginename':
         sets[loc]['default'] = val.strip()
 sets[loc]['orders'] = orders
 for fn in lst:
     name = fn.strip()
     if len(name) == 0:
         continue
     leaf = 'searchplugins/' + name + '.xml'
     _path = Paths.get_path('browser', 'en-US', leaf)
Exemple #32
0
def secondPass():
  Parser.fixVariables()
Exemple #33
0
def main():

    ################################################################################
    # finding the .vm files on the given filepath

    # Filepath of VM code
    filepath = sys.argv[1]

    # list of project files
    vmFiles = []

    # name of VM project folder
    projectName = ""

    # if the provided filepath refers to a directory
    if os.path.isdir(filepath):

        # for each item in the directory,
        for item in os.scandir(filepath):

            # if the item is a file
            if item.is_file():

                # see if the filename ends in .vm
                file = item.name
                i = file.find(".vm")
                if i > 0:
                    # if it is, append the file to the list of project files
                    vmFiles.append(item.name)

        # change the current working directory to the project file and store
        # the name of the directory as the projectName
        os.chdir(filepath)
        projectName = os.path.basename(filepath)

    # if the provided path refers to a file
    else:
        # change the current working directory to the parent directory
        os.chdir(os.path.dirname(filepath))

        # append the name of the vm file to the vm file list
        vmName = os.path.basename(filepath)
        vmFiles.append(vmName)

        # make the project name the name of the vm file and remove ".vm"
        # extention
        projectName = vmName[:len(vmName) - 3]

    # store the assembly file name
    asmFile = projectName + ".asm"

    ################################################################################
    # actual translating of VM code

    # for each file in the project
    for file in vmFiles:

        # write code comments for debugging that say what file we are opening
        translator.change_vmFile(file)

        # initialize a Parser object to parse the VM code
        vmFile = Parser(file)

        # while there is more commands
        while vmFile.hasMoreCommands:

            # advance to the next command
            vmFile.advance()

            # if we are not out of commands
            if vmFile.hasMoreCommands is True:

                # returns the command type of the current command
                commandCode = vmFile.commandType()
                cmdStr = commandList[commandCode]

                # if the command type is arithmetic, finish parsing and write
                # out the command to the assembly file
                if cmdStr == "arithmetic":
                    cmd = vmFile.arg1()
                    translator.writeArithmetic(cmd)

                # if the command type is push/pop, finish parsing and write
                # out the command to the assembly file
                elif cmdStr == "push" or cmdStr == "pop":
                    cmd = vmFile.commandType()
                    seg = vmFile.arg1()
                    ind = vmFile.arg2()
                    translator.writePushPop(cmd, seg, ind)

                # if label, finish parsing and write
                # out the command to the assembly file
                elif cmdStr == "label":
                    label = vmFile.arg1()
                    translator.writeLabel(label)

                # if goto, finish parsing and write
                # out the command to the assembly file
                elif cmdStr == "goto":
                    label = vmFile.arg1()
                    translator.writeGoto(label)

                # if if-goto, finish parsing and write
                # out the command to the assembly file
                elif cmdStr == "if-goto":
                    label = vmFile.arg1()
                    translator.writeIf(label)

                # if function, finish parsing and write
                # out the command to the assembly file
                elif cmdStr == "function":
                    functionName = vmFile.arg1()
                    numLocals = vmFile.arg2()
                    translator.writeFunction(functionName, numLocals)

                # if return, finish parsing and write
                # out the command to the assembly file
                elif cmdStr == "return":
                    translator.writeReturn()

                # if return, finish parsing and write
                # out the command to the assembly file
                elif cmdStr == "call":
                    functionName = vmFile.arg1()
                    numArgs = vmFile.arg2()
                    translator.writeCall(functionName, numArgs)

        # close the current VM file
        vmFile.close()

    # close the assembly file
    translator.close()
Exemple #34
0
 def __init__(self):
     self.parser = Parser.Parser()
     self.cm = pCommon.common()
Exemple #35
0
 def __init__(self):
     self.clauses = []
     self.parser = Parser()
def getDay():
    print("Enter a date (YYYY-MM-DD):")
    return Parser.parseDate(sys.stdin.readline())
Exemple #37
0
                fileList.append(os.path.join(os.getcwd(), file))

        outputFileName = userInput
        os.chdir('..')

    else:
        if isValidFile(userInput):
            fileList.append(userInput)
            outputFileName = userInput[0:-3]

else:
    print('Your input is invalid. Try again')
    sys.exit()

#numOfLine = 0
ps = Parser.Parser()
cw = CodeWriter.CodeWriter()
outputFile = open(outputFileName + '.asm', 'a')

#Add bootstrap codes first
bootStrapCode = cw.writeBootstrapCode()
for codes in bootStrapCode:
    outputFile.write(codes + '\n')

for file in fileList:
    currentFileContent = open(file).readlines()
    currentFileName = getCurrentFileName(file)
    cw.fileName = currentFileName

    for line in currentFileContent:
        commandType, command = ps.parse(line)
Exemple #38
0
def main():

    file_name = '/Users/wen/github/Nand2tetris/nand2tetris/projects/06/pong/Pong.asm'
    new_file_name = file_name[:file_name.rfind('.')] + '.hack'
    new_file = open(new_file_name, 'w')

    parser1 = Parser(file_name)
    parser2 = Parser(file_name)
    code = Code()
    table = SymbolTable()
    PC = 0
    begin = 16

    # For each label declaration(LABEL) that appears in the source code, add the pair<LABEL, n> to the symbol table.
    while parser1.advance():
        if parser1.command_type() == 'L_COMMAND':
            table.symbol_table[parser1.symbol()] = str(PC)
        else:
            PC += 1

    # March again through the source code, and process each line.
    while parser2.advance():
        if parser2.command_type() == 'L_COMMAND':
            continue
        symbol = parser2.symbol()
        if symbol:
            if not str.isdigit(symbol):
                if symbol in table.symbol_table:
                    symbol = table.symbol_table[symbol]
                else:
                    table.symbol_table[symbol] = str(begin)
                    symbol = str(begin)
                    begin += 1
            line = str('{:016b}'.format(int(symbol))) + '\n'
            new_file.write(line)
        else:
            line = '111' + code.comp(parser2.comp()) + code.dest(
                parser2.dest()) + code.jump(parser2.jump()) + '\n'
            new_file.write(line)
Exemple #39
0
 def __init__(self):
     log.info("Loading wlacz.tv")
     self.parser = Parser.Parser()
     self.common = pCommon.common()
     self.navigation = Navigation.RecordNav()
     self.exception = Errors.Exception()
                        #print(f"Error: {match2}")
                        return
        # Catch all for errors
        elif errors.search(match):
            #print(f"Error: {match}")
            return
    return tokenized_list

# Opening the file
try:
    with open("Test.txt") as file:
        characters = file.read()
        output = re.split(r'\n', characters)

        #for x in range(len(output)):
            #if output[x]:
                #print(fr"Input: {output[x]}")

except FileNotFoundError:
    print("File was not found")
    sys.exit()

except IndexError:
    print("Index Error")
    sys.exit()

# Puts the tokenized list into the token_list variable
token_list = tokenizer(characters)

Parser.main(token_list)
Exemple #41
0
import CSVDBErrors

test_dir = "./csvdb_tests"

tests = os.listdir(test_dir)
num_total = len(tests)
num_passed = 0

for test in tests:
    print("Running test " + test + "...")
    curr_test_dir = os.path.join(test_dir, test)

    is_exception = False
    with open(os.path.join(curr_test_dir, 'test.sql'), 'r') as f:
        command_text = f.read()
        command_parser = Parser.Parser(command_text)
        while True:
            try:
                command_node = command_parser.parse_command()
            except CSVDBErrors.CSVDBSyntaxError as e:
                print("Syntax error in test " + test + ": " + str(e))
                is_exception = True
                break
            if command_node is None:
                break

            # try:
            command_node.execute(curr_test_dir)
            # except Exception as e:
            #     print("Runtime error in test " + test + ": " + str(e))
            #     is_exception = True
Exemple #42
0
import Parser
test_json = "data/a2_b21/a31.json"
t_list = Parser.parse_test_file(test_json)
for t in t_list:
    print(t['event_type'], t)
Exemple #43
0
import optparse
import Parser
import Serializer

if __name__ == '__main__':
    """pass"""

    p = optparse.OptionParser()
    document, xml_output = p.parse_args()[1]

    f = open(document, "r")
    document = f.read()
    f.close()

    p = Parser.Parser()
    m = p.parse(document)

    s = Serializer.Serializer()
    xml = s.serialize(m)

    f = open(xml_output, "w")
    f.write(xml)
    f.close()
Exemple #44
0
def main():
    #get filename to open
    running = True
    while (running):
        uInput = input("Please enter filename:")
        try:
            file = open(uInput)
            running = False
        except:
            print("Invalid file")

    #parse file and get alphabet, nodes, and final states
    fileParser = Parser(file)
    fileParser.parse()

    #parse file and store states
    nodes = fileParser.getNodes()
    finalStates = fileParser.getFinalStates()
    alphabet = fileParser.getAlphabet()

    #get string to check against
    running = True
    while (running):
        uInput = input("Please enter string or quit: ")
        if (uInput == "quit"):
            running = False
        else:
            #we always begin at 0
            testCase = uInput
            testCase = testCase + testCase[-1]
            currentNode = 0
            error = False
            counter = 0
            for letter in testCase:
                #check if letter is in alphabet
                if letter in alphabet:
                    #test
                    #print("letter is {0} and node at {1}".format(letter,currentNode));
                    #end test
                    #check if letter belongs to current node
                    for node in nodes:
                        if node.mValue == currentNode and node.mLetter == letter:
                            print("{} in state {}".format(
                                node.mLetter, node.mValue))
                            #if letter is last letter
                            if (counter == len(testCase) - 1):
                                #and it is in a final state then string is accepted
                                if (currentNode in finalStates):
                                    break
                                else:
                                    print(
                                        "Error last letter not in final state")
                                    error = True
                                    break
                            #advance cursor
                            currentNode = node.mGoto
                            break
                #if letter not in language set error flag and quit
                else:
                    print("Error letter not in alphabet")
                    error = True
                    break
                counter += 1
            if (error):
                print("Not valid!")
            else:
                print("String accepted")
Exemple #45
0
# João Paulo de Souza    - 0035329                                     #
# Leandro Souza Pinheiro - 0015137                                     #
# Trabalho Compiladores  - Front-end do compilador para a linguagem P  #
# Data: 31/10/2019                                                     #
########################################################################

#Importacoes
import Scanner as scanner
import Type as type
import Parser as parser
import sys as s

#MAIN
if __name__ == "__main__":
    arg = s.argv  # recebe os parametros passados por linha de comando
    parser = parser.Parser()  #chama o parser
    if (
            len(arg) == 2
    ):  # caso a quantidade de argumentos seja 2 indica que nao precisa printar a tabela de simbolos
        parser.interpreter(
            arg[1]
        )  # chama metodo para realizar interpretacao do arquivo passado
        #arquivo a ser interpretado se encontra na posicao 1 do vetor de argumentos
    elif (
            len(arg) == 4
    ):  #caso a quantidade de argumentos seja 4 indica que precisa printar a tabela de simbolos no arquivo
        if (
                arg[2] == '-t'
        ):  # obrigatoriamente na posicao 2 dos argumentos necessariamente deve ser -t
            parser.interpreter(
                arg[1]
def Train():
    print('reading word embedding from ./data/vec.npy')
    word_embedding = np.load('./tmp_data/vec.npy')
    print('reading training data')
    if Flags.data_mode_setting != 'LAS_Coarse':
        train_data = pkl.load(open('./tmp_data/fine_train.pkl', 'rb'))
    else:
        train_data = pkl.load(open('./tmp_data/coarse_train.pkl', 'rb'))
    print('train discourse numbers: ', len(train_data))
    with tf.Graph().as_default():
        # config GPU mode of TensorFlow
        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        sess = tf.Session(config=config)
        with sess.as_default():
            initializer = tf.truncated_normal_initializer(
                mean=0.0, stddev=Flags.stddev_setting)
            print('build model begin')
            with tf.variable_scope("model",
                                   reuse=None,
                                   initializer=initializer):
                if Flags.neural_model_setting == 'Basic_model':
                    m = Basic_model(is_training=True,
                                    word_embedding=word_embedding)
                elif Flags.neural_model_setting == 'Basic_model_ex':
                    m = Basic_model_ex(is_training=True,
                                       word_embedding=word_embedding)
                else:
                    m = Refine_mem_model(is_training=True,
                                         word_embedding=word_embedding)
            print('build model over')
            global_step = tf.Variable(0, name="global_step", trainable=False)
            # set learning rate
            learning_rate = Flags.learning_rate
            # TODO try other Optimizer
            optimizer = tf.train.AdamOptimizer(learning_rate)
            train_op = optimizer.minimize(m.en_loss, global_step=global_step)
            sess.run(tf.global_variables_initializer())
            saver = tf.train.Saver(max_to_keep=10)
            arc_eager_parser = Parser.Arc_Eager_Parser()
            for one_epoch in range(Flags.epoch_nums):
                correct_num, all_num = 0.0, 0.0
                time_str = datetime.datetime.now().isoformat()
                print(str(time_str) + ': epoch ' + str(one_epoch) + ' starts')
                temp_order = list(range(len(train_data)))
                random.shuffle(temp_order)
                discourse_count = 0
                for discourse_index in temp_order:
                    train_discourse = train_data[discourse_index]
                    print('deal with discourse ' + str(discourse_count))
                    discourse_count += 1
                    correct, all = arc_eager_parser.train(
                        train_discourse, sess, m, train_op)
                    correct_num += correct
                    all_num += all
                print('acc', correct_num / all_num)
                print('##################saving model#######################')
                path = saver.save(
                    sess,
                    Flags.model_save_path + 'ATT_GRU_model-' + str(one_epoch))
                time_str = datetime.datetime.now().isoformat()
                tempstr = "{}: have saved model to ".format(time_str) + path
                print(tempstr)
                Dev(one_epoch)
                Test(one_epoch)
Exemple #47
0
 def __init__(self):
     self.parser = Parser.Parser()
     self.channel = Channels()
     self.player = Player()
Exemple #48
0
from Board import Board
import Parser
import sys

if len(sys.argv) < 2:
    print(
        "Needs reference to file containing sudoku board as an argument.\nExiting..."
    )
    sys.exit()
else:
    print("Using sudoku from file %s" % sys.argv[1])
    file = open(sys.argv[1], "r")

    board = Board()
    Parser.fill_board(file, board)
    print(board)
Exemple #49
0
def main(protein_fasta_open_file,
         list_codon_usage_open_files,
         output_destination,
         thresh=0.05,
         restriction_enzymes="",
         run_from_server=True,
         protein_file_is_string=False):
    # parse protein
    if protein_file_is_string:
        sequence = Seq(protein_fasta_open_file)
    else:
        record = Parser.parse_fasta_file(protein_fasta_open_file)
        name, id, sequence = record.name, record.id, record.seq
    creatures = {}
    # parse table
    if len(list_codon_usage_open_files) == 0:
        raise Exception("Error: Empty codon table filnames")
    # parses organism files , assuming they are already open

    for fname, open_file in list_codon_usage_open_files:
        creature_name = fname.split('.')[0]
        codon_usage_dict, codon_to_protein_dict, AA_list = Parser.parse_kazusa_codon_usage_table(
            open_file)
        creatures[
            creature_name] = codon_usage_dict, codon_to_protein_dict, AA_list

    # creates AA
    Amino_Acids_obj_list = []
    AA_LIST = creatures[creature_name][2]
    codon_to_protein_dict = creatures[creature_name][1]
    for aa in AA_LIST:
        AA = AminoAcid.AminoAcid(aa, codon_to_protein_dict)
        Amino_Acids_obj_list.append(AA)
    for creature_name, creature_tuple in creatures.items():
        codon_usage_dict, codon_to_protein_dict, AA_list = creature_tuple
        for AA in Amino_Acids_obj_list:
            AA.add_organism_codons(codon_usage_dict, creature_name)

    prot_analisys = ProtParam.ProteinAnalysis(sequence._data)
    aa_count_dict = prot_analisys.count_amino_acids()

    # replaces aa with codons from codon pool
    ouput_protein_list = Calculator.compute_and_Switch(Amino_Acids_obj_list,
                                                       sequence, aa_count_dict,
                                                       thresh)
    final_sequence = "".join(ouput_protein_list)
    final_sequence = final_sequence.replace("U", "T")
    # analyse final sequance
    if len(final_sequence) != len(sequence) * 3:
        raise Exception(
            "final sequance length does not match input sequence length")
    # output_file_name = os.path.join(output_destination, "Ouput.fasta")
    record = SeqRecord.SeqRecord(Seq(final_sequence))
    if record.translate().seq != sequence:
        raise Exception(
            "error- resulting DNA does not translate back to protein")

    # restriction enzymes- verifies they do not cut the sequence. if they do, pick the least cut sequence
    if restriction_enzymes != "":
        restriction_enzymes_list = restriction_enzymes.replace(
            ",", " ").replace('\n', ' ').replace("\t", " ").split()
        sys.stdout.write(str(restriction_enzymes_list))
        batch = RestrictionBatch(restriction_enzymes_list)
        num_cutting = len(
            check_restriction(Seq(final_sequence, generic_dna), batch))

        # CHECK FOR SpeI-XbaI
        if COMBO_RSTRICTION in final_sequence or COMP_COMBO_RSTRICTION in final_sequence:
            num_cutting += 1
        best_num_cutting = np.inf
        best_sequ = final_sequence
        iterations = 10000
        no_enzymes_cut = num_cutting == 0
        # if the original sequence had a restriction site, repeat the sequence building 100 times , or until
        # a non- cut sequence is found
        while iterations > 0 and num_cutting > 0:
            ouput_protein_list = Calculator.compute_and_Switch(
                Amino_Acids_obj_list, sequence, aa_count_dict, thresh)
            final_sequence = "".join(ouput_protein_list)
            final_sequence = final_sequence.replace("U", "T")
            # analyse final sequance
            if len(final_sequence) != len(sequence) * 3:
                raise Exception(
                    "final sequance length does not match input sequence length"
                )
            # output_file_name = os.path.join(output_destination, "Ouput.fasta")
            record = SeqRecord.SeqRecord(Seq(final_sequence, generic_dna))
            if record.translate().seq != sequence:
                raise Exception(
                    "error- resulting DNA does not translate back to protein")
            # if achieved non cutting sequence, save and return
            num_cutting = len(
                check_restriction(Seq(final_sequence, generic_dna), batch))
            # CHECK FOR SpeI-XbaI
            if COMBO_RSTRICTION in final_sequence or COMP_COMBO_RSTRICTION in final_sequence:
                num_cutting += 1
            if num_cutting == 0:
                if run_from_server:
                    return record.format("fasta")
                else:
                    check_restriction(Seq(final_sequence, generic_dna),
                                      batch,
                                      to_print=True)
                    SeqIO.write(record, output_destination, "fasta")
                    return "Output Sucsessful"
            best_num_cutting = min(best_num_cutting, num_cutting)
            if best_num_cutting == num_cutting:
                best_sequ = final_sequence

            iterations -= 1
        # return best sequence, as in one that is cut by the least amount of restriction enzymes
        if best_num_cutting > 0:
            cutting = check_restriction(Seq(best_sequ, generic_dna),
                                        batch,
                                        to_print=True)
            record = SeqRecord.SeqRecord(Seq(best_sequ, generic_dna))
            if run_from_server:
                return record.format("fasta")
            SeqIO.write(record, output_destination, "fasta")
            return "The enzymes the cut the sequence are:" + str(
                cutting) + "\n Output printed to specified location."

    SeqIO.write(record, output_destination, "fasta")
    if run_from_server:
        return record.format("fasta")
    return "ouput sucsessful"
Exemple #50
0
class Assembler(object):

    ##########################################
    #Constructor

    def __init__(self, fileName):

        index = fileName.find('.asm')
        if (index < 1):
            raise RuntimeError("error, cannot use the filename: " + fileName)

        self.inputFileName = fileName
        self.outputFileName = self.inputFileName[:index] + '.hack'

        self.parser = Parser(self.inputFileName)

        self.code = Code()
        self.st = SymbolTable()

##########################################
#public methods

    def assemble(self):
        '''Does the assembly and creates the file of machine commands,
           returning the name of that file '''
        self.__firstPass__()
        return self.__output__(self.__secondPass__())

##########################################
#private/local methods

    def __output__(self, codeList):
        ''' outpute the machine code codeList into a file and returns the filename'''

        file = open(self.outputFileName, "w")
        file.write("\n".join(codeList))
        file.close()
        return self.outputFileName

    def __firstPass__(self):
        ''' Passes over the file contents to populate the symbol table'''

        dictOfLables = self.parser.processLabels()  #clearly name object

        for key in dictOfLables:  # process labels
            if not self.st.contains(key):  # if label isn't in symbol table
                self.st.addEntry(
                    key, dictOfLables[key])  #     add it to symbol table

    def __secondPass__(self):
        ''' Manage the translation to machine code, returning a list of machine instructions'''

        machineCode = []

        command = self.parser.advance(
        )  # progress through each command in file
        while (command):  # while there is

            # parse each command based on given logic
            if (self.parser.commandType(command) == self.parser.A_COMMAND):
                bitString = self.__assembleA__(command)
            elif (self.parser.commandType(command) == self.parser.C_COMMAND):
                bitString = self.__assembleC__(command)
            else:
                symStr = self.parser.symbol(command)
                raise RuntimeError(
                    'There should be no labels on second pass, errant symbol is '
                    + symStr)

            machineCode.append(
                bitString)  # add each computed bit string to list object
            command = self.parser.advance()  # increment/go to next command

        return machineCode  # return all of the bit strings

    def __assembleC__(self, command):
        ''' Do the mechanical work to translate a C_COMMAND, returns a string representation
            of a 16-bit binary word.'''

        jumpValue = self.code.jump(
            self.parser.jump(command))  #parse jump command, do jump lookup
        mRegister = self.code.dest(
            self.parser.dest(command))  #parse dest command, do dest lookup
        aluValue = self.code.comp(
            self.parser.comp(command))  #parse comp command, do comp lookup

        return '111' + aluValue + mRegister + jumpValue  # put all parst of command together

    def __assembleA__(self, command):
        '''
         Do the mechanical work to translate an A_COMMAND, returns a string representation
            of a 16-bit binary word.
        '''

        command = self.parser.symbol(command)

        if self.st.contains(command):
            dictValue = self.st.getAddress(
                command)  # if command is in symbol table
            return '0' + "{0:015b}".format(
                int(dictValue))  #    look up value, turn it to binary
        elif command.isdigit():  # if its a digit
            return '0' + "{0:015b}".format(
                int(command))  #    convert to binary
        else:  # otherwise, add the entry to symbol table
            self.st.addEntry(command, self.st.getNextVariableAddress()
                             )  #    with the next available memory address
            return '0' + "{0:015b}".format(self.st.getAddress(
                command))  #    format that newly generated address to binary
Exemple #51
0
import scanner
import codegen
import Parser
import sys
import os

if __name__ == '__main__':
    if len(sys.argv) == 1:
        print("No argument")
        sys.exit()
    exp = sys.argv[1]
    Parse_ch = scanner.scan(exp)
    RPN = ''.join(Parser.expr(Parse_ch))
    codegen.gen(RPN)
    os.system("python3 ./a.out")
 def translate(self, inputfile, code_writer):
     parser = Parser.Parser(inputfile)
     while parser.has_more_commands():
         parser.advance()
         self._write_code(parser, code_writer)
     code_writer.write_endloop()
Exemple #53
0
def parse(tokens,varss,vectors,semi):
    parser_t = Parser.Parser(tokens,varss,semi)
    parser_t.run()
Exemple #54
0
    vars = {
        'distance_table': file,
        'group_file': params['group_file'],
        'pdf_file': pdf_file,
        'distance_name': distance_name
    }
    r_job.format(vars)
    r_job.write(R_file)
    r_job.run()
    image_trans(pdf_file, png_file)


if __name__ == '__main__':
    params = read_params(sys.argv)
    mkdir(params['out_dir'])
    r_job = rp.Rparser()
    if params["two_legend"]:
        if params['with_boxplot']:
            r_job.open(this_script_path +
                       '/../src/template/04_beta_pcoa_two.Rtp')
        else:
            r_job.open(this_script_path +
                       '/../src/template/04_beta_pcoa_two.Rtp')
    else:
        if params['with_boxplot']:
            r_job.open(this_script_path +
                       '/../src/template/04_beta_pcoa_with_boxplot.Rtp')
        else:
            r_job.open(this_script_path + '/../src/template/04_beta_pcoa.Rtp')

    for name in ['weighted_unifrac', 'unweighted_unifrac']:
Exemple #55
0
def firstPass():
  Parser.removeWhiteSpace()
  Parser.fixLabels()
Exemple #56
0
    #    ]
    #ruleSet = [
    #    Rule('S', ['i', 'S', 'e', 'S'], 1),
    #    Rule('S', ['i', 'S'], 2),
    #    Rule('S', ['a'], 3),
    #    ]
    t, nt = siftTokens(ruleSet)

    lr1 = items(ruleSet, t, nt)
    print 'LR1 Items'
    for i in range(len(lr1)):
        print 'state %d' % i
        for j in lr1[i]:
            print '        ', j

    sd, gd = generateStateTable(lr1, ruleSet, t)
    print
    print
    printStateTable(lr1, t, nt, sd, gd)

    import Parser
    p = Parser.ListTokenSource([
        Parser.Token('id'),
        Parser.Token('+'),
        Parser.Token('id'),
        Parser.Token('*'),
        Parser.Token('id')
    ])
    r = Parser.parse(sd, gd, p, ruleSet)
    print r
Exemple #57
0
def analyze(infiles):
    for infile in infiles:
        Parser(infile)
Exemple #58
0
                        action="store_true",
                        help="Do NOT prepend statistic names to output.")
    parser.add_argument(
        "stats",
        type=str,
        nargs="+",
        help="One or more statistics to calculate. Currently may be any of: " +
        ", ".join(statFuncs.keys()) + ". " +
        "Some statistics accept numerical arguments.  These should be appended as "
        + "a colon-delimited list to the statistic name.")

    # Parse arguments
    args = parser.parse_args(argv[1:])

    # Parse graph file
    graphs = Parser.readFile(args.graphfile)

    # Calculate and display stats:
    for stat in args.stats:
        statName = stat.split(':')[0]
        if statName in statFuncs.keys():
            if not args.n:
                print statName,
        else:
            raise Exception("Unsupported statistic {}".format(stat))
    if not args.n:
        print

    for i, graph in enumerate(graphs):
        for stat in args.stats:
            a = stat.split(':')
def mainModule():

    ######################################################################################
    #  System Init
    ######################################################################################

    testing = 'false'
    print("This is the Main Module")
    status = 'false'
    controlFlag = 'true'
    configID = 1
    testID = None

    random.seed()
    internalLinks = []

    connection = GeneralProcesses.sql()

    ######################################################################################
    #            Main program loop
    ######################################################################################

    while (controlFlag == 'true'):
        while not testID:
            testID = userInput(connection, configID)
        cue = websiteAquisition(connection, testID)

        # get configuraton details for sample size
        query = "SELECT * FROM samples WHERE configID = " + str(configID)
        result = connection.getOne(query)
        websiteSampleSize = int(result[0])
        webpageSampleSize = int(result[1])
        redherringSize = int(result[2])
        # choose the redherrings
        redHerrings = random.sample(cue, redherringSize)
        print(redHerrings)
        for siteNo in cue:
            internalLinks = []
            query = "SELECT URL FROM websiteList WHERE siteNo = '" + str(
                siteNo) + "'"
            result = connection.getOne(query)
            URL = result[0]
            print("the siteURL is: ", URL)
            # get a siteID for the new websiteSample
            siteID = connection.getID('websiteSamples')
            query = """INSERT INTO websiteSamples (siteID,URL,testID) VALUES (:siteID,:URL,:testID)"""
            connection.query(query, (str(siteID), URL, str(testID)))
            while not internalLinks:
                internalLinks = WebScraper.webScraper(connection, testID, URL,
                                                      siteID)
                if not internalLinks:
                    print("no internal Links")
                    # no internal links maybe 404 error or no links either way need a new website
                    # pop siteNo from cue
                    # drop the database entries for this site
                    query = """SELECT pageID FROM webpages WHERE siteID = """ + str(
                        siteID)
                    pageID = connection.getOne(query)
                    query = """DELETE FROM webpages WHERE siteID = """ + str(
                        siteID)
                    connection.getOne(query)

                    if pageID:
                        query = """DELETE FROM display  WHERE pgID = """ + str(
                            pageID[0])
                        connection.getOne(query)
                        query = """DELETE FROM tags  WHERE pageID = """ + str(
                            pageID[0])
                        connection.getOne(query)
                        query = """DELETE FROM content  WHERE pageID = """ + str(
                            pageID[0])
                        connection.getOne(query)
                    query = """SELECT COUNT(*) FROM websiteList"""
                    result = connection.getOne(query)
                    maxSample = int(result[0])
                    newSiteNo = random.randint(1, maxSample)
                    while newSiteNo in cue:
                        newSiteNo = random.randint(1, maxSample)
                    if siteNo in redHerrings:
                        redHerrings.remove(siteNo)
                        redHerrings.append(newSiteNo)
                    query = "SELECT URL FROM websiteList WHERE siteNo = '" + str(
                        newSiteNo) + "'"
                    result = connection.getOne(query)
                    URL = result[0]
                    # update the website information
                    query = """UPDATE  websiteSamples SET URL = '""" + URL + """'"""
                    print("new website is: ", URL)
                    connection.update(query)
                    internalLinks = WebScraper.webScraper(
                        connection, testID, URL, siteID)
            # set the site type and sample size
            if siteNo in redHerrings:
                print("RedHerring")
                query = """UPDATE websiteSamples SET type = 'redHerring' WHERE siteID = """ + str(
                    siteID)
                connection.update(query)
            elif siteNo not in redHerrings:
                print("sample")
                query = """UPDATE websiteSamples SET type = 'sample' WHERE siteID = """ + str(
                    siteID)
                connection.update(query)
                websiteDataCollection(connection, websiteSampleSize,
                                      internalLinks, siteID, testID)
        Parser.Parser(connection, testID)
        connection.commit(connection)
        connection.close(connection)
        DataManipulation.dataManipulation(testID)
        Presentation.presentation(testID)
        controlFlag = 'false'
Exemple #60
0
            if cost < min_cost:
                min_cost = cost
                opt_path = path

        end_time = time.time()
        return min_cost, opt_path, (end_time - start_time)


if __name__ == '__main__':
    # Run Example
    import csv
    import Parser
    from os import listdir
    from os.path import isfile, join

    parser = Parser.Parser()
    with open('greedy_results_symmetric.csv', mode='w') as results_file:
        results_writer = csv.writer(results_file,
                                    delimiter=',',
                                    quotechar='"',
                                    quoting=csv.QUOTE_MINIMAL)
        results_writer.writerow([
            'Problem Name', ' Dimensions', ' Solution Cost',
            ' Run Time (Milliseconds)'
        ])
        directory = 'tsp_symmetric'

        tsp_files = [
            f for f in listdir(directory) if isfile(join(directory, f))
        ]