def test_collates_words(self): p = Parser() m = Meme("all your * -base") s = [Source("", "all your cake", ""), Source("", "all your cake", ""), Source("", "all your data", "")] self.assertEqual(p.collate_words(m, s).get_list(), [("cake",2), ("data",1)])
def testFixtureOne(self): parser = Parser() fixture1 = parser.fixture1() (address, suite, postcode, description) = parser.parse(fixture1) expectedAddress = "Crown House, Toutley Road, Wokingham, Berkshire" self.assertEquals(address, expectedAddress) self.assertEquals(postcode, "RG41 1QW") self.assertEquals(suite, "Suite 2")
def testFixtureTwo(self): parser = Parser() fixture2 = parser.fixture2() (address, suite, postcode, description) = parser.parse(fixture2) expectedAddress = "329 bracknell, Doncastle Road, Bracknell, Berkshire" self.assertEquals(address, expectedAddress) self.assertEquals(postcode, None) self.assertEquals(suite, None)
def etokenize(text): p = Parser() sentences = [] for line in text.split('.'): tokens = p.tokenise(line, stem=False) if not tokens: continue tokens = p.removeStopwords(tokens) if not tokens: continue sentences.append(tokens) return sentences
def index(): SITE = 'http://www.yr.no/place/Croatia/Istria/Medulin/forecast.xml' page = requests.get(SITE) parser = Parser.get_yr_parser() parsed = parser.parse(page.text) has_waves = len([p for p in parsed if p.is_strong_jugo()]) > 0 return render_template("index.html", results=parsed, has_waves=has_waves)
def check(): page = requests.get(SITE) parser = Parser.get_yr_parser() parsed = parser.parse(page.text) output_str = "On: {time} (ws: {windSpeed}, wd: {windDirection})" result = [output_str.format(**p.__dict__) for p in parsed[:12] if p.is_strong_jugo()] if len(result) > 0: send_email_alert("\n".join(result)) return datetime.now(), "> Surf! Strong WIND:", ", ".join(result) else: return datetime.now(), "> No surf :/"
def memepie(): if 'meme' in request.values: meme = Meme(request.values['meme']) if meme.is_valid(): twitter = Twitter() parser = Parser() texts = twitter.get_sources(meme, 20) #from source import Source #texts = [Source("", "x is my middle name",""), Source("", "y is my middle name", ""), Source("", "x is my middle name","")] result = parser.collate_words(meme, texts) gchart = GChart() g.clean_query = meme.get_clean_meme() g.meme_exceptions = meme.get_exceptions() g.meme_parts = meme.get_parts() g.pie_data = gchart.generate_pie_data(result) #first_word_source_list = result.get_source_list(result.get_list()[0][0]) return render_template("memepie.htm") else: g.clean_query = meme.get_clean_meme() #our understanding of meme g.error_message = meme.get_problem() return render_template("error.htm")
import json import urllib2 from src.item import Item from src.order import Order from src.parser import Parser from src.user_list import UserList #orders_json = urllib2.urlopen('http://tw-food-for-thought.herokuapp.com/information/orders.js') with open('orders_json_backup', 'r') as f: orders_json = f.read() parser = Parser(orders_json) with open('users_json_backup', 'r') as f: users_json = f.read() user_list = UserList(users_json) #print "biggest order is: " + str(parser.biggest_order()) #for order in parser.order_list_with_num_of_items(9): # print "User: "******"The maximum amount paid is: " + str(users_who_have_paid_highest.) for key,value in users_who_have_paid_highest.iteritems(): print "The person who has paid the highest amount of money is: " + user_list.user_by_id(key)["username"] + " Amount paid: " + str(value)
import sys from src.assembler import Assembler from src.parser import Parser if __name__ == '__main__': program_name = sys.argv[1] hex = "" with open(program_name, 'r') as program: parser = Parser(program.read()) parsed_output = parser.parse() assembler = Assembler() hex = assembler.assemble(parsed_output) with open(program_name + '.hex', 'w') as hex_file: hex_file.write(hex) print "Written to " + program_name + '.hex'
def test_generates_valid_regex(self): p = Parser() m = Meme("all your * -base") self.assertEqual(p._format_regex(m), "all your ([\\w'\\-]+)")
def test_converts_to_lowercase(self): p = Parser() m = Meme("all your * -base") s = [Source("", "ALL YOUR DATA", "")] self.assertEqual(p.collate_words(m, s).get_list(), [("data",1)])
def test_handles_no_matches(self): p = Parser() m = Meme("all your * -base") s = [Source("", "foo", ""), Source("", "bar", ""), Source("", "baz","")] self.assertEqual(p.collate_words(m, s).get_list(), [])
def test_generates_valid_weird_regex(self): p = Parser() m = Meme("amn't foo-bar * -isn't") self.assertEqual(p._format_regex(m), "amn't foo-bar ([\\w'\\-]+)")
from src.tokenizer import Tokenizer from src.parser import Parser tokenizer = Tokenizer() tokenizer.load_config('./syntaxConfig.json') tokenizer.scan_source(open('../Parser/test.pkb').read()) parser = Parser() parser.load_config('./syntaxConfig.json') parser.scan_tokens(tokenizer.tokens[0:100]) #print(parser.syntax_rules["package_declaration"].match(tokenizer.tokens)) #rule = SyntaxGroupRule() #rule.init_pattern("type (<type_name> .id) is (<type_spec> !';'.id*) ';'",ignore=".blk")