def test_if(self): result = parser.parse('if true: true end',self.s).eval(self.e) self.assertEqual(result.to_string(), 'true') result = parser.parse('if false: true end',self.s).eval(self.e) self.assertEqual(type(result), parser.Null)
def check(self, text, ast, dedent=True, files=None): if dedent: dedented_text = textwrap.dedent(text) actual_ast = parse(dedented_text, 'build.ninja') else: actual_ast = parse(text, 'build.ninja') self.assertEqual(actual_ast, ast)
def test_multiples(self): result = parser.parse('let m = 50',self.s).eval(self.e) self.assertEqual(result.to_string(), '50') result = parser.parse('let n = m + 5',self.s).eval(self.e) self.assertEqual(result.to_string(), '55') result = parser.parse('n',self.s).eval(self.e) self.assertEqual(result.to_string(), '55')
def test_print_variable(self): with captured_output() as (out, err): result = parser.parse('let a = 50.0',self.s).eval(self.e) result = parser.parse('print(a)',self.s).eval(self.e) output = out.getvalue().strip() self.assertEqual(output, '50.0')
def test_inequality(self): result = parser.parse('true != true',self.s).eval(self.e) self.assertEqual(result.to_string(), 'false') result = parser.parse('false != false',self.s).eval(self.e) self.assertEqual(result.to_string(), 'false') result = parser.parse('true != false',self.s).eval(self.e) self.assertEqual(result.to_string(), 'true')
def test_strings(self): result = parser.parse('"5" == "5"',self.s).eval(self.e) self.assertEqual(result.to_string(), 'true') result = parser.parse('"a" >= "a"',self.s).eval(self.e) self.assertEqual(result.to_string(), 'true') result = parser.parse('"6" <= "6"',self.s).eval(self.e) self.assertEqual(result.to_string(), 'true')
def main(argv): parser = PyThreatspecParser() for f in argv: parser.parse(f) reporter = PyThreatspecReporter(parser, "project", "default") print json.dumps(reporter.export_to_json(), indent=2, separators=(',', ': '))
def setPeriod(trait): enforce(len(trait.args) == 2, 'A period needs two datetime arguments.') args = trait.args parse = iso8601.parse begin, end = parse(args[0]), parse(args[1]) enforce(begin < end, 'The datetime range must be positive and nonempty.') plot.period = begin, end
def get_AB(): '''gets all AB pairs from test data s.t. A in one and B in one''' c = configure.cfg(sys.argv[1]) with open("files/test") as test_files: test_list = [x.strip() for x in test_files] test_data = parser.parse(c.dir, test_list, c.plus, c.minus, c.cluster) with open("files/one") as train_files: train_list = [x.strip() for x in train_files] train_data = parser.parse(c.dir, train_list, c.plus, c.minus, c.cluster) #could items have all the items whose keys are not in test_data removed? items = [x for x in test_data.hashwords.items() if x[0] in train_data.hashwords.keys()] with open("files/AB.tmp","w") as out: for A,A_ptr in items: A_obj = test_data.lookup(A_ptr) for B_ptr,count in A_obj.data.items(): if count > 1: try: B = test_data.wordshash[B_ptr] train_data.hashwords[B] outstr = A + " " + B + "\n" out.write(outstr) except: pass
def test_numbers(self): result = parser.parse('5 == 5 # nice',self.s).eval(self.e) self.assertEqual(result.to_string(), 'true') result = parser.parse('5 >= 5 # ooh',self.s).eval(self.e) self.assertEqual(result.to_string(), 'true') result = parser.parse('true != false # woop',self.s).eval(self.e) self.assertEqual(result.to_string(), 'true')
def test_if_else(self): result = parser.parse('if true: true else: false end',self.s).eval(self.e) self.assertEqual(result.to_string(), 'true') result = parser.parse('if 5 == 4: true else: false end',self.s).eval(self.e) self.assertEqual(result.to_string(), 'false')
def test_doctypes(self): self.assertEqual('<?xml version="1.0" encoding="utf-8" ?>', parser.parse('!!! xml')); self.assertEqual('<!DOCTYPE html>', parser.parse('doctype html')); self.assertEqual('<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML Basic 1.1//EN" "http://www.w3.org/TR/xhtml-basic/xhtml-basic11.dtd">', parser.parse('doctype BaSiC')); self.assertEqual('<!DOCTYPE html>', parser.parse('!!! 5')); self.assertEqual('<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">', parser.parse('!!!')); self.assertEqual('<!DOCTYPE html>', parser.parse('!!! html'));
def _rewrite(ast): if isinstance(ast, (str, unicode)): return ast elif ast[0] == 'set': if ast[1][0] == 'access': if ast[1][1] == 'contract.storage': return ['sstore', _rewrite(ast[1][2]), _rewrite(ast[2])] else: return ['arrset', _rewrite(ast[1][1]), _rewrite(ast[1][2]), _rewrite(ast[2])] elif ast[0] == 'if': return ['ifelse' if len(ast) == 4 else 'if'] + map(_rewrite, ast[1:]) elif ast[0] == 'access': if ast[1] == 'msg.data': return ['calldataload', _rewrite(ast[2])] elif ast[1] == 'contract.storage': return ['sload', _rewrite(ast[2])] elif ast[0] == 'array_lit': o = ['array', str(len(ast[1:]))] for a in ast[1:]: o = ['set_and_inc', _rewrite(a), o] return ['-', o, str(len(ast[1:])*32)] elif ast[0] == 'code': return ['code', rewrite(ast[1])] elif ast[0] == 'return': if len(ast) == 2 and ast[1][0] == 'array_lit': return ['return', _rewrite(ast[1]), str(len(ast[1][1:]))] # Import is to be used specifically for creates elif ast[0] == 'import': return ['code', rewrite(parse(open(ast[1]).read()))] # Inset is to be used like a macro in C++ elif ast[0] == 'inset': return _rewrite(parse(open(ast[1]).read())) return map(_rewrite, ast)
def test(): parser = Parser() print(parser.parse("1 + 1")) print(parser.parse("1 * 7 * 9")) print(parser.parse("1 + 2 * 3")) print(parser.parse("int(2)")) print(parser.parse("abs(2 - 7)"))
def main(): pbook = [] if len(sys.argv) > 1: # if a file is an argument for line in open(sys.argv[1], 'r'): l = line.strip('\n') print prompt, print l succ, tree = parser.parse(parser.lexer(l)) if succ: pbook = exe(pbook, tree) else: print tree else: while True: try: a = raw_input(prompt) except: break succ, tree = parser.parse(parser.lexer(a)) if len(tree) == 0: print "not a keyword" elif succ: pbook = exe(pbook, tree) elif not succ: print tree
def load(filename): """Load a database by generating code for the interpreter to execute. Return a new phonebook """ try: f = open(str(filename), 'r') except: print "oops, file not found" return [] entries = f.readlines() pbook = [] for entry in entries: values = entry.split(';') lexed = parser.lexer("add %s %s" % (values[1], values[0])) succ, parsed = parser.parse(lexed) if succ: pbook = exe(pbook, parsed) else: print "error, bad filetype" return [] if len(values) > 2: for i in range(len(values) - 2): lexed = parser.lexer("alias %s %s" % (values[1], \ values[i + 2])) succ, parsed = parser.parse(lexed) if succ: pbook = exe(pbook, parsed) else: print "error, bad filetype" return [] f.close() return pbook
def test_precedence(self): result = parser.parse('5 * 3 + 4',self.s).eval(self.e) self.assertEqual(result.to_string(), '19') result = parser.parse('5 + 3 * 4',self.s).eval(self.e) self.assertEqual(result.to_string(), '17') result = parser.parse('5 * (3 + 4)',self.s).eval(self.e) self.assertEqual(result.to_string(), '35')
def run_test(package, test): # Create a message through the test for printing later. message = package + "/" + test + "\n" passed = False # Paths data_path = "tests/{0}/{1}/data.json".format(package, test) template_path = "tests/{0}/{1}/template.html".format(package, test) expected_path = "tests/{0}/{1}/expected.html".format(package, test) expected_clean_path = "tests/{0}/{1}/expected.clean.html".format(package, test) result_path = "tests/{0}/{1}/result.html".format(package, test) result_clean_path = "tests/{0}/{1}/result.clean.html".format(package, test) with open(template_path) as template_file: template = template_file.read() if os.path.isfile(data_path): with open(data_path) as data_file: data = json.load(data_file, object_pairs_hook=OrderedDict) else: data = [] # -------------------------------------------- # # WITHOUT CLEAN OPTION # -------------------------------------------- # result = parse(template, data) # Store the result for easier viewing when a test fails. with open(result_path, "w") as result_file: result_file.write(str(result)) # Validate the test. with open(expected_path) as expected_file: expected = expected_file.read() if result == expected: passed = True else: passed = False message += compare(expected, result) # -------------------------------------------- # # WITH CLEAN OPTION # -------------------------------------------- # result = parse(template, data, True) # Store the result for easier viewing when a test fails. with open(result_clean_path, "w") as result_file: result_file.write(str(result)) # Validate the test. with open(expected_clean_path) as expected_file: expected = expected_file.read() if result == expected: passed = True else: passed = False message += "--clean\n" message += compare(expected, result) return passed, message
def test_parse_exception_extra_paren(): """Another exception is raised if the expression is too large. The parse function expects to recieve only one single expression. Anything more than this, should result in the proper exception.""" with assert_raises_regexp(LispError, 'Expected EOF'): parse('(foo (bar x y)))')
def test_parse_boolean(): """Parsing single booleans. Booleans are the special symbols #t and #f. In the ASTs they are represented by Pythons True and False, respectively. """ assert_equals(True, parse('#t')) assert_equals(False, parse('#f'))
def clean_username(self): try: parser.parse() except cass.DatabaseError: pass return
def test_neg(self): f = parse('-2') self.assertAlmostEquals(-2, f({'x': 0})) self.assertAlmostEquals(-2, f({'x': 1})) f = parse('-x') self.assertAlmostEquals(-3, f({'x': 3})) self.assertAlmostEquals(-4, f({'x': 4}))
def test_simple(self): result = parser.parse('[5]',self.s).eval(self.e) self.assertEqual(result.to_string(), '[5]') result = parser.parse('let b = [5,]',self.s).eval(self.e) self.assertEqual(result.to_string(), '[5]') result = parser.parse('[5,6]',self.s).eval(self.e) self.assertEqual(result.to_string(), '[5, 6]')
def test_parse_only_text(self): tokens = parser.parse("text0") self.assertEqual(1, len(tokens)) self._assert_text_token(tokens[0], "text0") text = "text0 text1 text2" tokens = parser.parse(text) self.assertEqual(1, len(tokens)) self._assert_text_token(tokens[0], text)
def test_nested(self): result = parser.parse('[5, [6]]',self.s).eval(self.e) self.assertEqual(result.to_string(), '[5, [6]]') result = parser.parse('[5, [6, 7]]',self.s).eval(self.e) self.assertEqual(result.to_string(), '[5, [6, 7]]') result = parser.parse('let a = [5,[6,[7]]]',self.s).eval(self.e) self.assertEqual(result.to_string(), '[5, [6, [7]]]')
def testSafeHTML(self): """ Test the stripping of unsafe HTML code """ text = "<script>alert('hi');</script>" self.assertEquals(parse(text), '<p><script>alert(\'hi\');</script>\n</p>') text = '<form method="post">Hi</form>' self.assertEquals(parse(text), '<p><form method="post">Hi</form>\n</p>')
def test_raise(self): bad_tests = [ ('4 5+ -'), ('1 2 3*'), ('- 4 5'), ] for bad in bad_tests: with self.assertRaises(ParserError): parse(bad)
def testParagraphs(self): """ Test the parsing of newlines into paragraphs and linebreaks """ text = "hello\nhow are you\n\ngoodbye" self.assertEquals(parse(text), '<p>hello\nhow are you\n</p><p>goodbye\n</p>') text = "hello<br />how are you\n\ngoodbye" self.assertEquals(parse(text), '<p>hello<br>how are you\n</p><p>goodbye\n</p>')
def test_parse_integer(): """Parsing single integer. Integers are represented in the ASTs as Python ints. Tip: String objects have a handy .isdigit() method. """ assert_equals(42, parse('42')) assert_equals(1337, parse('1337'))
def main(argv): parseInput(argv) try: symbolTable = tokenizer.tokenize(files, user_options) parser.parse(files, user_options, symbolTable) print "Done" except TokenizerException as e: exit(0) except ParserException as e: exit(0)
def repl(): env = Env() while (inp := input("> ")) != "QUIT!": # or whatever inst = parser.parse(inp) # TODO inst.execute(env)
def parse(p, options): parser.parse(p) return p
from sklearn.feature_extraction.text import CountVectorizer import numpy as np import tensorflow as tf import parser ################### ### IMPORT DATA ### ################### # Insert a filename here data = parser.parse('lotsodata.txt') clean = [] for element in data: clean.append(element["url"].replace('http://', '')) vectorizer = CountVectorizer(max_features=1000) X = vectorizer.fit_transform(clean) arrayX = np.array(X.toarray()) trainTestXArray = np.array_split(arrayX, 2) trainX = trainTestXArray[0] testX = trainTestXArray[1] Y = [] # Make trainY matrix for i in range(len(data)): urlResult = [] if data[i]['result'] == 'malicious':
import sys import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D import numpy as np def scatterPlot(d): data = d #assign specific variables for the 3d x = data[:,0] y = data[:,1] z = data[:,2] fig = plt.figure() #create subplot to allow access to specific properties ax = fig.add_subplot(111, projection='3d') ax.scatter(x,y,z) plt.savefig('DataGraphs/test.png') # can change to just plt.show() parsed = parse(sys.argv[1]) data = parsed.getDataSet() scatterPlot(data) <<<<<<< HEAD ======= parsed = parse(sys.argv[1]) >>>>>>> 388226962a4b4622988f0b564a5b744388461675
def test_evaluate_not_operator_with_argument(): expr = parse(tokenize("(3 4)")) with raises(InvalidOperator): evaluate(expr)
def test_evaluate_s_expressions(source, want): expr = parse(tokenize(source)) assert want == evaluate(expr)
#!/usr/local/bin/python3 import sys import parser import DAO cmd = parser.parse( args=sys.argv ) DAO.execute( cmd )
def sol(exprstr): expression = parse2ast(parse(exprstr)) return dpll(expression)
from parser import parse if __name__ == '__main__': book_url = 'http://loveread.ec/read_book.php?id=7468' book_name = 'test_book' amount_pages = 294 parse(book_url, book_name, amount_pages)
# condition(s) depending on the requirement. if line.startswith(group_by): if data: yield data data = [] data.append(line) if data: yield data with open("one.pgn", encoding="ISO-8859-1") as f: for i, group in enumerate(get_groups(f, "[Event "), start=1): print("Group #{}".format(i)) #print (group) datapgn = "".join(group) print(datapgn) game = parser.parse(datapgn, actions=pgn.Actions()) print("Move 1:") print(game.move(1)) print("Move Black.san:") print(game.move(1).black.san) print("Move 5:") print(game.move(5)) print("Move Text:") print(game.movetext) print("Tag Pairs:") print(game.tag_pairs) print("Score:") print(game.score)
variables.update({p[1]: var}) # Error rule for syntax errors def p_error(p): print("Khata2 f ktaba !!!") # Build the parser parser = yacc.yacc() #data = open('file.txt').read().replace('\n', '') filename = sys.argv[1] file_handle = open(filename, "r") file_contents = file_handle.read() result = parser.parse(file_contents) # Tokenize '''calclex.lexer.input(file_contents)d while True: # lexer.token() :Returns a special LexToken instance on success or None if the end of the input text has been reached. tok =calclex.lexer.token() if not tok: break # No more input print(tok)''' if result is not None: for r in result: if r == None: continue else: print(r)
def test_jan_2019(self): self.maxDiff = None expected = { 'reg': '', 'name': 'ADRIANO CELSO GUIMARAES', 'role': '', 'type': 'membro', 'workplace': '', 'active': True, 'income': { 'total': 55241.15, 'wage': 35462.22, 'perks': { 'total': 1825.0, 'food': 1825.0, 'pre_school': 0.0, 'health': 0, 'birth_aid': 0, 'housing_aid': 0.0, 'subsistence': 0.0 }, 'other': { 'total': 17953.93, 'daily': 0.0, 'others_total': 17953.93, 'others': { 'Abono de permanência': 5054.28, 'ARTIGO 95, III da CF': 639.81, 'Abono constitucional de 1/3 de férias': 0.0, 'Indenização de férias': 0.0, 'Antecipação de férias': 0, 'Gratificação natalina': 0.0, 'Antecipação de gratificação natalina': 0.0, 'Substituição': 0.0, 'Gratificação por exercício cumulativo': 12259.84, 'Gratificação por encargo Curso/Concurso': 0, 'Pagamentos retroativos': 0.0, 'JETON': 0 } } }, 'discounts': { 'total': 17620.29, 'prev_contribution': 5054.28, 'ceil_retention': 0.0, 'income_tax': 9006.56, 'others_total': 3559.45, 'others': { 'Descontos Diversos': 3559.45 } } } files = ('./src/output_test/TJRJ-contracheque.xlsx', './src/output_test/TJRJ-direitos-eventuais.xlsx', './src/output_test/TJRJ-direitos-pessoais.xlsx', './src/output_test/TJRJ-indenizações.xlsx') parser.parse('TJRJ', "2019", files, '/src/output_test', 'teste') with open('./src/output_test/TJRJ-1-2019.json') as json_file: data = json.load(json_file) employees = data['cr']['employees'] # Verificações self.assertEqual(1, len(employees)) self.assertDictEqual(employees[0], expected)
wire [{LPM_WIDTHP}-1:0] unsignedoutputP; wire gated_clock; assign unsignedinputA = dataa; assign unsignedinputB = datab; assign unsignedoutputP = unsignedinputA * unsignedinputB; assign gated_clock = clock & clken; always @(posedge gated_clock)begin if(aclr)begin result <= 0; end else result <= unsignedoutputP; end endmodule''' return str.format(LPM_WIDTHA=lpm_widtha, LPM_WIDTHB=lpm_widthb, LPM_WIDTHP=lpm_widthp) def write(self, lpm_widtha, lpm_widthb, lpm_widthp): self.fp.write(self.make_str(lpm_widtha, lpm_widthb, lpm_widthp)) if __name__ == '__main__': fp = open(parser.parse(), "w") mult1 = local_mult(fp) mult1.write(64, 64, 128) fp.close()
import parser strings = parser.parse('data.txt') lengths = [len(set(s)) for s in strings] print(sum(lengths))
def test_evaluate_not_operator(): expr = parse(tokenize("(2)")) with raises(InvalidOperator): evaluate(expr)
try: log.msg('Rotating log %s' % self.log_filename) log.removeObserver(self.log_observer.emit) self.log_file.close() self.setup_log(self.log_filename) except: msg = "Error in signal_handler:\n%s" % traceback.format_exc() print msg mail.error(msg) if __name__ == '__main__': # Read config import parser config = parser.parse() config['hostname'] = socket.gethostname() config['version'] = __version__ # Log log_file = config.get('log', 'stdout') if log_file != 'stdout': logger = Logger(log_file) else: log.startLogging(sys.stdout) # Set up reactor try: from twisted.internet import epollreactor epollreactor.install() log.msg('Using epoll')
def test_evaluate_not_operator_message(): expr = parse(tokenize("(5 6)")) with raises(InvalidOperator) as excinfo: evaluate(expr) assert "Invalid operator: 5." == str(excinfo.value)
def _test(self): try: with redirect_stdout(StringIO()): parse(source) except Exception as e: self.fail('Exception: {}'.format(e))
def runbypattern(pattern, inputstring, index=0, flags=Flags()): bytecodestring = runpattern(pattern) instructionlist = parse(bytecodestring) instructionlist = relabel(instructionlist) return run(instructionlist, inputstring, index, flags)
""" MediaWiki-style markup parse(text) -- returns safe-html from wiki markup code based off of mediawiki """ import re, random, math, locale from base64 import b64encode, b64decode from trac.core import * from trac.wiki.api import IWikiMacroProvider from parser import parse class MediaWikiRenderer(Component): """ Renders plain text in MediaWiki format as HTML """ implements(IWikiMacroProvider) def get_macros(self): """Return a list of provided macros""" yield 'mediawiki' def get_macro_description(self, name): return '''desc''' def expand_macro(self, formatter, name, content): if name == 'mediawiki': return parse(content) # deprecated interface prior trac 0.11
def test_parsing_result(self): self.assertTrue(parse(self.case_one), -2) self.assertTrue(parse(self.case_two), 3) self.assertTrue(parse(self.case_three), -9)
def expand_macro(self, formatter, name, content): if name == 'mediawiki': return parse(content)
settings.DISPLAY_ASM = "A" in options.keys() settings.DISPLAY_OPTIMIZATION = "O" in options.keys() settings.DISPLAY_TREE = "t" in options.keys() settings.DISPLAY_INTERMEDIATE = "I" in options.keys() settings.USE_COLORS = "C" not in options.keys() settings.SHOW_INCREMENTAL_CHANGES = "c" in options.keys() settings.SHOW_FINE_CHANGES = "cf" in options.keys() preprocessed, line_map, preprocessor_context = preprocessor.preprocess( input_file_data, input_file_name) tokens = tokenizer.tokenize(preprocessed, line_map, input_file_name) tokens = tokenizer.macros(tokens, preprocessor_context) tree, context = parser.parse(tokens) if settings.DISPLAY_TREE: tree.display() prog = generation.generate_program(tree, context) funcs = [str(p) for p in prog.functions] optimzied = optimize.optimize(prog) if settings.DISPLAY_OPTIMIZATION: for p_func, o_func in zip(funcs, optimzied.functions): print("\n") utils.compare(p_func, str(o_func))
async def solve(expression): return parse(expression)
def parse_ingredients(): data = request.json ingredients = data['ingredients'] return parser.parse(ingredients)
def test_computation(inputstring, expected_output): ast = parser.parse(inputstring) actual_result = compute.compute(ast) print(actual_result) assert actual_result == expected_output
def test_evaluate_number_literals(source, want): expr = parse(tokenize(source)) assert want == evaluate(expr)
def test_evaluate_builtin(): expr = parse(tokenize("+")) want = Operator(symbol="+", function=operator.add) assert want == evaluate(expr)
def main(): num = input() token = lex(num) stack = parse(token) result = eval_term(stack) print(result)
def loop(): intr = interpreter.Interpreter() # context = compiler.Context() last = parser.Null() bytecode = '' opening = 0 code = '' try: while True: # loop forever until KeyboardInterrupt or other break if opening > 0: code += '\n' + readline('... ') else: code = readline('>>> ') if code.strip(' \t\r\n') == '': continue if code.strip(' \t\r\n') == ':a': print(last.rep()) continue if code.strip(' \t\r\n') == ':b': print(bytecode) continue if code.strip(' \t\r\n') == ':q': os.write(1, "\n") break try: ast = parser.parse(code) # at this point we get AST last = ast # store AST for later inspection # result = ast.eval(env) # env.variables['it'] = result result = intr.compile_interpret(ast) bytecode = intr.last_bc printresult(result, "= ") intr.context.instructions = [] opening = 0 except parser.UnexpectedEndError as e: # just keep ignoring this till we break or complete opening += 1 continue except parser.LogicError as e: opening = 0 # reset os.write(2, "ERROR: Cannot perform that operation (%s)\n" % e) continue except parser.ImmutableError as e: opening = 0 # reset os.write(2, "ERROR: Cannot reassign that (%s)\n" % e) continue except parser.UnexpectedTokenError as e: opening = 0 # reset os.write(2, "ERROR: Unexpected '%s'\n" % e.token) continue except Exception as e: opening = 0 # reset os.write(2, "ERROR: %s %s\n" % (e.__class__.__name__, str(e))) continue except KeyboardInterrupt: os.write(1, "\n")