def store_sentences(max_item): coll_load = "test_corpus" coll_store = "test_optimized" p = Parser() db = MongoORM() prefix = 50000 count = 0 t = Timer() for item in db.get_collection(coll_load): if prefix > 0: prefix -= 1 continue sentences = p.parse(item["text"]) del item["text"] item["sentences"] = sentences db.insert_item(coll_store, item) count += 1 if count % 1000 == 0: print count, " reviews processed." if count == max_item: break t.measure("Collection parsed.")
def clause_form(in_string, trace=False): parser = Parser() parser.build() tree = Tree(parser.parse(in_string)) print tree.root if trace: [h_weak_ref().flush() for h_weak_ref in logging._handlerList] logging.getLogger().setLevel(logging.INFO) else: [h_weak_ref().flush() for h_weak_ref in logging._handlerList] logging.getLogger().setLevel(logging.ERROR) if tree is not None: logging.info("Eliminating equivalence") equiv_elimination(tree.root, tree) logging.info("Eliminating implication") implic_elimination(tree.root, tree) logging.info("Pushing negation") push_negation(tree.root, tree) logging.info("Standarize Apart") standarize_apart(tree.root) logging.info("Skolemizing") skolemize(tree.root, set(), dict(), tree) logging.info("Discarding ForAll quantifiers") discard_for_all(tree.root, tree) logging.info("Distributing ands and ors") distribute(tree.root, tree, transform_and=False) return tree.root
def __init__(self, handler): Parser.__init__(self, handler) ## ul, li 列表处理 self.addRule(ListRule()) self.addRule(ListItemRule()) ## 文章标题处理 self.addRule(TitleRule()) ## 一级标题 self.addRule(Header1Rule()) ## 普通段落处理 self.addRule(ParagraphRule()) ############################################################### ## 特殊标记处理 ############################################################### ## 斜体 self.addFilter(r'\*(.+?)\*', 'emphasis') ## url self.addFilter(r'(http://[\.a-zA-Z/]+)', 'url') ## email self.addFilter(r'([\.a-zA-Z]+@[\.a-zA-Z]+[a-zA-z]+)', 'mail')
def main(filename): p = Parser(filename) parse_tree = p.parse() # E = Environment() E = create(None) # print(evaluate(parse_tree, env)) evaluate(parse_tree, E)
def onMessage(self, msg, binary): """ call when message receives to listning websocket message will be parse by QueryParser and delegate task to appropriate manages according to QueryParser resuslt(result will be Query object), following tasks can be perform according to QueryParser result 1. Login - Login user to system 2. Share - Share data between users, manage Share request 3. Get - Manage get query requests 4. Create - Create users, manage create requests """ # parse message and get query result # currently we support fro only valid queries # we assume incoming 'msg' is valid message print msg print '-----------------------' parser = Parser() query = parser.parse(message=msg) if query.command == 'LOGIN': # delegate to handle_login self.handle_login(binary=binary, query=query) elif query.command == 'GET' or query.command == 'DATA': # delegate task to handle_get self.handle_get(query) elif query.command == 'SHARE': # delegate to handle_share self.handle_share(binary=binary, query=query)
def __load_files(self): try: for file_name in os.listdir(self.config_location): if file_name.endswith(".json"): self.file_names.append(file_name) except OSError as e: print e return file_name = "" # silences ide warning try: for file_name in self.file_names: parser = Parser(file_name=self.config_location+file_name, simple_value=True) parser.parse_json() result = parser.result result[self.file_name_key] = file_name self.parsed_values.append(result) except UnexpectedTokenException as e: print "Error in file {0}:\n".format(file_name) print e return except TokenMismatchException as e: print "Error in file {0}:\n".format(file_name) print e return
class Reader(object): ''' classdocs ''' def __init__(self): ''' Constructor ''' self._parser = None self._version = '1.0.0' def parse(self, mfile): self._parser = Parser(mfile) self._parser.parse() def getFields(self): return self._parser.getFields() def getVersion(self): return self._version def plot(self, ylabel, xlabel=None): import matplotlib.pyplot as plt yvalues=self._parser.getValues(ylabel) xvalues=self._parser.getValues(xlabel) if xlabel else None if xvalues: plt.plot(xvalues, yvalues) plt.xlabel(xlabel) else: plt.plot(yvalues) plt.ylabel(ylabel) plt.show() version = property(fget=getVersion)
def main(): DockerExecutor.set_environment_variables() args = parse_args() final_result_file = args.output origin = GitRepository(args.original_repo, args.original_branch, "original") external = GitRepository(args.external_repo, args.external_branch, "external") tests_to_execute = get_benchmark_list(args.benchmarks) if len(tests_to_execute) == 0: exit(200) if not final_result_file.endswith(".csv"): print("Please specify a CSV file as output") exit(201) # download firstly the image print("Downloading the image " + image + ":" + tag) c = Client(base_url=args.socket_url, version=args.docker_api_version) c.pull(repository=image, tag=tag, insecure_registry=True) # stop all running containers before for container in c.containers(): print(" Stopping container " + container["Id"]) c.remove_container(container["Id"], force=True) print("Done downloading the image") origin_exec = DockerExecutor(origin, args.socket_url, args.docker_api_version) external_exec = DockerExecutor(external, args.socket_url, args.docker_api_version) origin_exec.run(tests_to_execute) parsers_origin = parse_files() shutil.rmtree(tmpdir) # clean up before the next execution os.mkdir(tmpdir) external_exec.run(tests_to_execute) parsers_external = parse_files() Parser.to_csv_multiple_comparison(parsers_origin, parsers_external, final_result_file) print("Done. File " + final_result_file + " created")
def test_parser(self): with open(path_to_new_file, 'r+') as new_file: new_file.write("for i in range(100): print i") p = Parser('tests.html') parse = p.parse_file() self.assertEqual(parse, [('for', 'keyword'), ('in', 'keyword'), ('range', 'built_in_function'), ('print', 'keyword') ])
def main(args): try: (opts, args) = getopt(args, "o:TPX") except GetoptError: usage() if len(args) != 1: usage() from tokenizer import Tokenizer from parser import Parser from error import JtError import context from os.path import abspath filename = abspath(args[0]) stdin = file(filename, "r") target = "P" stdout = sys.stdout for (ok, ov) in opts: if ok in ("-T", "-P", "-X"): target = ok[1] elif ok == "-o": stdout = file(ov, "w") contents = stdin.read() tokenizer = Tokenizer() tokenizer.build() tokenizer.input(contents) parser = Parser(tokenizer) result_tree = None try: result_tree = parser.parse() except JtError, error: failure(error)
def run_repl(self): """Run the REPL loop.""" color_output(intro) load_namespace() while True: try: val = prompt( 'Mini Matlab >> ', lexer=MathematicaLexer, history=self.hist, completer=self.autocomplete(), display_completions_in_columns=True, mouse_support=True ) if val == 'exit': self.workspace() break elif val == 'help': color_output(intro) else: parser = Parser(val) parser.save_retrieve_args() except (KeyboardInterrupt, SystemExit, EOFError): self.workspace() break
def test_nowiki(self): """<nowiki> escapes formatting""" p = Parser() text = "<nowiki>\n\n# item '''1'''\nitem ''2''\n\n</nowiki>" assumed = u"<p>\n\n# item '''1'''\nitem ''2''\n\n\n</p>" self.assertEquals(p.parse(text), assumed)
def fetch_contents(self, source): """Recursively fetches Markdown contents from a single file or directory containing itself Markdown files """ contents = "" if os.path.isdir(source): self.log(u"Entering %s" % source) for entry in os.listdir(source): contents += self.fetch_contents(os.path.join(source, entry)) else: try: parser = Parser(os.path.splitext(source)[1], self.encoding) except NotImplementedError: return contents self.log(u"Adding %s (%s)" % (source, parser.format)) file_contents = codecs.open(source, encoding=self.encoding).read() contents = parser.parse(file_contents) if self.embed: contents = self.embed_images(contents, source) if not contents.strip(): self.log(u"No contents found in %s" % source, 'warning') elif not re.match(r'.*?<hr\s?/?>$', contents.strip()): contents += u'<hr />' return contents
def compile(source): p = Parser(source) visitor = Transform() ast = p.exprs() traverse(ast, visitor) consts = code.generate(ast) return consts
def main(): filename = sys.argv[1] graph = Graph() parser = Parser() #parse gdf file parser.parse(filename, graph) # print graph.__str__() stats = Stats(graph) #compute popularity popularities = stats.computePopularity() print "Popularidad" print "********************************" stats.showPopularities(popularities) #compute influences influences = stats.computeInfluences() print "" print "Influencias" print "********************************" stats.showInfluences(influences) #obtain recomendations print "" print "Recomendaciones" print "********************************" recommendations = stats.computeRecommendations() stats.showRecommendations(recommendations)
def list_all(self): from parser import Parser parse = Parser() l = sc.list_all_dvd() if len(l) == 0: return None store = gtk.ListStore(gobject.TYPE_STRING, gobject.TYPE_UINT, gobject.TYPE_STRING, gobject.TYPE_DOUBLE) for i in range(len(l)): if l[i][3] == 0: out = "In Stock" else: out = "Out of Stock" store.append(( l[i][0], l[i][1], out, round(l[i][5] * parse.get_profit_rate(), 2) )) return store
def main(args): if len(args) < 2: print('Usage: python ' + args[0] + ' input.txt [output.txt]') sys.exit() parser = Parser() comps_linear = [] comps_diodes = [] with open(args[1]) as file: for line in file: comp = parser.next_entry(line) if comp is not None: if isinstance(comp, spicemix.Diode): comps_diodes.append(comp) else: comps_linear.append(comp) n, m = find_n_m_size(comps_linear + comps_diodes) builder = StampBuilder(n, m) for comp in comps_linear: builder.add_component(comp) prev = start_appr for d in comps_diodes: d.set_vol_zero(start_appr) if len(comps_diodes) > 0: # the_p = comps_diodes[len(comps_diodes) - 1].get_p_node() min = start_appr while True: prepared_builder = deepcopy(builder) for d in comps_diodes: prepared_builder.add_component(d) prepared_builder.clear_zer() a, z = prepared_builder.get_a_z() solution = gaussian_elimintaion(a, z) for d in comps_diodes: kek = d.get_p_node() - 1 if solution[kek] < min: min = solution[kek] for d in comps_diodes: d.set_vol_zero(min) dx = fabs(comps_diodes[0].get_value() - prev) prev = comps_diodes[0].get_value() if dx < eps: break # why python hasnt do while ? else: builder.clear_zer() a, z = builder.get_a_z() solution = gaussian_elimintaion(a, z) if len(args) > 2: form_output(args[2], solution, n, m) else: print_matrix(solution)
def process(src,filename=None): from parser import Parser from compiler import Compiler parser = Parser(src,filename=filename) block = parser.parse() compiler = Compiler(block) return compiler.compile()
class TestParser(TestCase): def setUp(self): file_name = path.join(environ['HOME'], 'tmp', 'tmp.asm') f = open(file_name, 'w') f.close() self.parser = Parser(file_name) remove(file_name) def test_comment_strip(self): """ test comment stripping """ commands = """// this is a comment this is not a comment here is a trailing comment // comment """ s = StringIO() s.write(commands) s.seek(0) self.parser.buff = s.readlines() self.parser.file_clean() l = ['this is not a comment', 'here is a trailing comment'] self.assertListEqual(l, self.parser.buff) def test_false(self): self.assertTrue(False)
class TestParser(unittest.TestCase): def setUp(self): self.p = None self.p = Parser() def test_simple_file(self): test_metric= TestMetric() self.p.add_visitor(test_metric) self.p.parse_files("test_files/simple.py") self.assertListEqual(test_metric.returns,['a','b']) def test_add_visitor(self): """ Adding a real MetricVisitor Object """ mv = [TestMetric(), TestMetric()] self.p.add_visitor(mv[0]) self.p.add_visitor(mv[1]) self.assertListEqual(self.p._visitor_list, mv) def test_add_visitor_2(self): """ Adding a MetricVisitor by String""" self.p.add_visitor("McCabeMetric") from metrics.mccabemetric import McCabeMetric self.assertEqual(self.p._visitor_list[0].metric_name, McCabeMetric().metric_name) def test_add_visitor_3(self): """ Adding a list of metrics by string """ self.p.add_visitor(["McCabeMetric","McCabeMetric"]) from metrics.mccabemetric import McCabeMetric self.assertEqual(self.p._visitor_list[0].metric_name, McCabeMetric().metric_name) self.assertEqual(self.p._visitor_list[1].metric_name, McCabeMetric().metric_name)
def get_page_patents(keyword, page_num): patent_index = page_num * 10 spider = SoopatSpider() content = spider.soopat_search(keyword, patent_index) parser = Parser(content) logger.info("get page %s patents ok" % page_num) return parser.get_patents()
def test_validate_and_parse_valid_params(self): parser = Parser() self.assertEquals(parser.validate_and_parse(t, d, e, c), (t, d, e, c)) self.assertNotEquals(parser.validate_and_parse(t, d, e, c), ('2016-01-26 16:08:41', 'device-3', 'enter', 2)) self.assertNotEquals(parser.validate_and_parse(t, d, e, c), (t, 'device-3', e, c)) self.assertNotEquals(parser.validate_and_parse(t, d, e, c), (t, d, 'enter', c)) self.assertNotEquals(parser.validate_and_parse(t, d, e, c), (t, d, e, 2))
def __load_files(self): try: for file_name in os.listdir(self.file_location): if file_name.endswith(".json"): self.file_names.append(file_name) except OSError as e: print e return file_name = "" # silences ide warning try: for file_name in self.file_names: parser = Parser( file_name=self.file_location+file_name, simple_value=False, predict_references=self.predict_references) parser.parse_json() if len(self.cdm_entities) > 0: for cdm_entity in self.cdm_entities: if cdm_entity.value.name == file_name[0:-5]: raise DataExampleException("Found same entity twice while loading! Entity: {0}". format(file_name[0:-5])) cdm_entity = JSONValueModel(file_name[0:-5], parser.result) self.cdm_entities.append(CDMEntity(cdm_entity, file_name)) except UnexpectedTokenException as e: print "Error in file {0}:\n".format(file_name) print e #return except TokenMismatchException as e: print "Error in file {0}:\n".format(file_name) print e
def fetch_contents(self, source): """Recursively fetches Markdown contents from a single file or directory containing itself Markdown files """ slides = [] if type(source) is list: for entry in source: slides.extend(self.fetch_contents(entry)) elif os.path.isdir(source): self.log(u"Entering %s" % source) for entry in os.listdir(source): slides.extend(self.fetch_contents(os.path.join(source, entry))) else: try: parser = Parser(os.path.splitext(source)[1], self.encoding) except NotImplementedError: return slides self.log(u"Adding %s (%s)" % (source, parser.format)) file_contents = codecs.open(source, encoding=self.encoding).read() inner_slides = re.split(r'<hr.+>', parser.parse(file_contents)) for inner_slide in inner_slides: slides.append(self.get_slide_vars(inner_slide, source)) if not slides: self.log(u"Exiting %s: no contents found" % source, 'notice') return slides
def testRelations(self): class MySymtab(object): def lookup(self, i, name): i.typ = Item.Integer; i.cls = Item.Global if name == 'p': i.a = 8 elif name == 'q': i.a = 16 else: i.typ = Item.Unknown class MyCG(object): def __init__(self): self.rh = 2 def load(self, i): if i.cls == Item.Register: return # already loaded. elif i.cls == Item.Global: print("\tld\tx{}, {}(gp)".format(self.rh, i.a)) i.cls = Item.Register i.a = self.rh self.rh = self.rh + 1 else: print("Unknown load {}".format(i)) s = scannerFor("p <= q"); st = MySymtab(); cg = MyCG() p = Parser(scanner=s, symtab=st, cg=cg); p.scan() i = Item() p.Expression(i) self.assertEquals(i.typ, Item.Boolean) self.assertEquals(i.cls, Item.Compare) self.assertEquals(i.a, 2) self.assertEquals(i.b, 3) self.assertEquals(i.op, LessEq)
def it_register_methods(self): parser = Parser() foo = lambda: "foo" parser.register(r"foo", foo) parser.patterns |should| equal_to(["foo"]) parser.actions |should| equal_to([foo]) parser.get("foo") |should| equal_to(foo)
class ParseTest(unittest.TestCase): parser = None response = None resp_file = None def setUp(self): print("\nsetUp: preparing tests") self.resp_file= open("resp.json", "r") self.response = json.load(self.resp_file) # pprint(self.response) self.parser = Parser(self.response) def tearDown(self): print("\ntearDown: closing response.json file") self.resp_file.close() """ Test methods """ def test_read_all_items(self): items = self.parser.get_all_tasks() if items is not None: self.assertEqual(7, len(items)) def test_today_items(self): today_items = self.parser.get_today_tasks() # for item in today_items: # print("due today: " + item['content']) self.assertEqual(0, 0)
class ParserTestCase(unittest.TestCase): """This test check to make sure the internal representation matches the file read""" def setUp(self): f = open("../grammars/" + self.file) G = f.read() f.close() self.grammar_one = Parser(G) expected_s = StringIO() self.grammar_one.printer(expected_s) expected_s.seek(0, 0) self.grammar_two = Parser(expected_s.read()) def test_start_token(self): """%start nonterminal""" self.assertEquals(self.grammar_one.grammar.start_token, self.grammar_two.grammar.start_token) def test_terminals(self): """%token terminals""" self.assertEquals(len(self.grammar_one.grammar.terminal_name), len(self.grammar_two.grammar.terminal_name)) for (a, b) in zip(self.grammar_one.grammar.terminal_name, self.grammar_two.grammar.terminal_name): self.assertEquals(a, b) def test_productions(self): """Parser Production Count""" self.assertEquals(len(self.grammar_one.grammar.production_list), len(self.grammar_two.grammar.production_list)) for (a, b) in zip(self.grammar_one.grammar.production_list, self.grammar_two.grammar.production_list): self.assertEquals(a.ls, b.ls)
class TestCircuitTree(unittest.TestCase): def setUp(self): self.parser = Parser(CircuitTree) def testCircuitTree(self): testcases = { 'R+R': ([0.1, 1.5], 10, 0.1 + 1.5), 'R|R': ([0.1, 1.5], 10, 1.0 / (1.0 / 0.1 + 1.0 / 1.5)), 'R+C': ([0.1, 1.5], 10, 0.1 + 1 / (1j * 10 * 1.5)), 'R|C': ([0.1, 1.5], 10, 0.1 / (1.0 + 1j * 10 * 0.1 * 1.5)), 'R|L': ([0.1, 1.5], 10, 0.1 / (1.0 + 0.1 / (1j * 10 * 1.5))), } for case in testcases: circuit = self.parser.parse(case).collapseCircuit() p, w, res = testcases[case] self.assertAlmostEqual(circuit.eqc(w, p), res, msg=case) def testJacobian(self): testcases = { 'R+R': ([2, 4], 10, [1, 1]), 'R|R': ([2, 4], 10, [4**2 / (2.0 + 4.0)**2, 2**2 / (2.0 + 4.0)**2]), } for case in testcases: circuit = self.parser.parse(case).collapseCircuit() p, w, jac = testcases[case] for i, v in enumerate(jac): self.assertAlmostEqual(circuit.jac[i](w, p), v, msg=case) def tearDown(self): self.parser = None
def check(): #get html error = False html = None url = None if request.method == 'POST': html = request.values['html'] if request.method == 'GET': url = request.values['url'] try: html = requests.get(url, verify=False).content except requests.exceptions.ConnectionError: error = "Sorry, that URL does not exist" except requests.exceptions.MissingSchema: error = "Sorry, that is not a valid URL" except requests.exceptions.InvalidSchema: error = "Sorry, that is not a valid URL" except requests.exceptions.HTTPError: error = "Sorry, something went wrong" except requests.exceptions.Timeout: error = "Sorry, there was a timeout when trying to visit that URL" #parse parser = Parser() if error == False: parser.parse(html) return render_template('check.html', menu_item="tools", parser=parser, error=error, url=url)
def run_ML_pipeline(fname): bu = Parser(fname) _ = compute_tif_summaryStats(bu.datasetTrain) _ = compute_tif_summaryStats(bu.datasetTest) # make overlay plot and save to plotField/ # visualize # include NIR plot_false_RGB(bu.train_img1, bu.train_img3, bu.train_img4, savedir=bu.plotdir, tag='train', savefig=bu.saveFig) plot_false_RGB(bu.test_img1, bu.test_img3, bu.test_img4, savedir=bu.plotdir, tag='test', savefig=bu.saveFig) plot_field_all_bands_hist(bu.datasetTrain, bu.saveFig, bu.plotdir, tag='train') plot_field_all_bands_hist(bu.datasetTest, bu.saveFig, bu.plotdir, tag='test') plot_truthPoints(bu.truth, bu.saveFig, bu.plotdir) overplot_truthPoints_trainField(bu.datasetTrain, bu.truth, bu.saveFig, \ bu.plotdir, bu.train_xmin, bu.train_xmax, bu.train_ymin, bu.train_ymax) # calc vegetation indices, create pd bbb = CreateDFsML(test_size=0.3) bbb.outdir = bu.MLplotdir bbb.saveFig = bu.saveFig bbb.logreg = bu.logreg bbb.SVM = bu.SVM bbb.RFC = bu.RFC bbb.trainndvi = bu.trainndvi bbb.trainendvi = bu.trainendvi bbb.traincvi = bu.traincvi bbb.trainng = bu.trainng bbb.trainnnir = bu.trainnnir bbb.trainnr = bu.trainnr bbb.traintvi = bu.traintvi if bu.trainhue: # train also on Hue image from split_fields_to_DF import create_rgb train_RGB = create_rgb(bu.train_img1, bu.train_img3, bu.train_img4) train_hsv_img = rgb2hsv(train_RGB, savedir=bu.plotdir, tag='train', plothist=True, saveFig=bu.saveFig) del train_RGB else: train_hsv_img = None bbb.build_DF_trainField(bu.train_img1, bu.train_img2, bu.train_img3, bu.train_img4, bu.truthfile, bu.datasetTrain, train_hsv_img=train_hsv_img, verbose=bu.verbose) del train_hsv_img bbb.split_data_for_ML() # similarly for test if bu.trainhue: test_RGB = create_rgb(bu.test_img1, bu.test_img3, bu.test_img4) test_hsv_img = rgb2hsv(test_RGB, savedir=bu.plotdir, tag='test', plothist=True, saveFig=bu.saveFig) del test_RGB else: test_hsv_img = None bbb.build_DF_testField(bu.test_img1, bu.test_img2, bu.test_img3, bu.test_img4, test_hsv_img, bu.verbose) del test_hsv_img del bu # ML pipeline if bbb.logreg: ## 0 and 1, so start w/ logistic logreg = LogisticRegression(penalty='l2', class_weight='balanced') logreg.fit(bbb.X_train_small, bbb.y_train_small) scores(logreg, 'Logistic Regression', bbb.X_train_small, bbb.y_train_small, bbb.X_test_small, bbb.y_test_small) confusion_scores(logreg, bbb.X_test_small, bbb.y_test_small, 'logreg', bbb.outdir, saveFig=bbb.saveFig) ROC(logreg, 'LogisticRegression', bbb.X_test_small, bbb.y_test_small, bbb.outdir, '', bbb.saveFig) # hyperparameter Tuning gsc, grid_result = GridSearch_logreg(bbb.X_train_small, bbb.y_train_small) # apply logreg_tuned = LogisticRegression(penalty='l2', class_weight='balanced', **grid_result.best_params_) logreg_tuned.fit(bbb.X_train_small, bbb.y_train_small) scores(logreg_tuned, 'Logistic Regression, Tuned', bbb.X_train_small, bbb.y_train_small, bbb.X_test_small, bbb.y_test_small) confusion_scores(logreg_tuned, bbb.X_test_small, bbb.y_test_small, 'logregtuned', bbb.outdir, saveFig=bbb.saveFig) ROC(logreg_tuned, 'LogisticRegressionTuned', bbb.X_test_small, bbb.y_test_small, bbb.outdir, '', bbb.saveFig) # run model on the test field yyy_predict_logreg = logreg_tuned.predict(bbb.XXX) print("number of plants in test field: ", yyy_predict_logreg.sum()) print("{:.2f}% of field", (yyy_predict_logreg.sum() / len(yyy_predict_logreg)) * 100) if bbb.SVM: # SVM from sklearn.svm import SVC SVM = SVC(kernel='linear', class_weight='balanced') SVM.fit(bbb.X_train_small, bbb.y_train_small) scores(SVM, 'SVM linear', bbb.X_train_small, bbb.y_train_small, bbb.X_test_small, bbb.y_test_small) confusion_scores(SVM, bbb.X_test_small, bbb.y_test_small, 'SVM linear', bbb.outdir, saveFig=bbb.saveFig) # SVM poly SVM = SVC(kernel='poly', class_weight='balanced') SVM.fit(bbb.X_train_small, bbb.y_train_small) scores(SVM, 'SVM poly', bbb.X_train_small, bbb.y_train_small, bbb.X_test_small, bbb.y_test_small) confusion_scores(SVM, bbb.X_test_small, bbb.y_test_small, 'SVM poly', bbb.outdir, saveFig=bbb.saveFig) # hypertuning for poly gsc, grid_result = GridSearch_SVMpoly(bbb.X_train_small, bbb.y_train_small) # apply SVMpoly_tuned = SVC(kernel='poly', class_weight='balanced', **grid_result.best_params_) SVMpoly_tuned.fit(bbb.X_train_small, bbb.y_train_small) scores(SVMpoly_tuned, 'SVM poly, Tuned', bbb.X_train_small, bbb.y_train_small, bbb.X_test_small, bbb.y_test_small) confusion_scores(SVMpoly_tuned, bbb.X_test_small, bbb.y_test_small, 'SVMpolytuned', bbb.outdir, saveFig=bbb.saveFig) ROC(SVMpoly_tuned, 'SVMpolyTuned', bbb.X_test_small, bbb.y_test_small, bbb.outdir, '', bbb.saveFig) # run model on the test field yyy_predict_poly = SVMpoly_tuned.predict(bbb.XXX) print("number of plants in test field: ", yyy_predict_poly.sum()) print("{:.2f}% of field", (yyy_predict_poly.sum() / len(yyy_predict_poly)) * 100) # SVM rbf SVM = SVC(kernel='rbf', class_weight='balanced') SVM.fit(bbb.X_train_small, bbb.y_train_small) scores(SVM, 'SVM rbf', bbb.X_train_small, bbb.y_train_small, bbb.X_test_small, bbb.y_test_small) confusion_scores(SVM, bbb.X_test_small, bbb.y_test_small, 'SVM rbf', bbb.outdir, saveFig=bbb.saveFig) # hypertuning for rbf gsc, grid_result = GridSearch_SVMrbf(bbb.X_train_small, bbb.y_train_small) # apply SVMrbf_tuned = SVC(kernel='rbf', class_weight='balanced', **grid_result.best_params_) SVMrbf_tuned.fit(bbb.X_train_small, bbb.y_train_small) scores(SVMrbf_tuned, 'SVM rbf, Tuned', bbb.X_train_small, bbb.y_train_small, bbb.X_test_small, bbb.y_test_small) confusion_scores(SVMrbf_tuned, bbb.X_test_small, bbb.y_test_small, 'SVMrbftuned', bbb.outdir, saveFig=bbb.saveFig) ROC(SVMrbf_tuned, 'SVMrbfTuned', bbb.X_test_small, bbb.y_test_small, bbb.outdir, '', bbb.saveFig) # run model on the test field yyy_predict_rbf = SVMrbf_tuned.predict(bbb.XXX) print("number of plants in test field: ", yyy_predict_rbf.sum()) print("{:.2f}% of field", (yyy_predict_rbf.sum() / len(yyy_predict_rbf)) * 100) if bbb.RFC: ### RFC rfc = RandomForestClassifier(class_weight='balanced') rfc.fit(X_train_small, y_train_small) # Look at parameters used by our current forest from pprint import pprint print('Parameters currently in use:\n') pprint(rfc.get_params()) scores(rfc, 'RFC', bbb.X_train_small, bbb.y_train_small, bbb.X_test_small, bbb.y_test_small) confusion_scores(rfc, bbb.X_test_small, bbb.y_test_small, 'RFC', bbb.outdir, saveFig=bbb.saveFig) # hypertuning for RFC gsc, grid_result = GridSearch_RFC(bbb.X_train_small, bbb.y_train_small) # apply rfc_tuned = RandomForestRegressor(class_weight='balanced', **grid_result.best_params_) rfc_tuned.fit(bbb.X_train_small, bbb.y_train_small) scores(rfc_tuned, 'RFC Tuned', bbb.X_train_small, bbb.y_train_small, bbb.X_test_small, bbb.y_test_small) confusion_scores(rfc_tuned, bbb.X_test_small, bbb.y_test_small, 'RFCtuned', bbb.outdir, saveFig=bbb.saveFig) ROC(rfc_tuned, 'RFCTuned', bbb.X_test_small, bbb.y_test_small, bbb.outdir, '', bbb.saveFig) # run model on the test field yyy_predict_rfc = rfc_tuned.predict(bbb.XXX) print("number of plants in test field: ", yyy_predict_rfc.sum()) print("{:.2f}% of field", (yyy_predict_rfc.sum() / len(yyy_predict_rfc)) * 100)
def main(): start_time = time.time() print "Starting SAT-based planner..." print "Checking for plugins..." try: encoding_wrapper = EncodingWrapper() encoding_wrapper.read_encoding_list() solver_wrapper = SolverWrapper() solver_wrapper.read_solver_list() except (EncodingException, SolvingException) as e: print e.message sys.exit(1) print "Encodings registered: ", len(encoding_wrapper.valid_encodings) print "Solvers registered: ", len(solver_wrapper.valid_solvers) args = parse_cmd_line_args(\ encoding_wrapper.valid_encodings, encoding_wrapper.default_encoding, solver_wrapper.valid_solvers, solver_wrapper.default_solver) if args is None: sys.exit(1) arg_processing_time = time.time() print "Command line arg processing time:", (arg_processing_time - start_time) #Ensure that the tmp_dir exists try: os.makedirs(tmp_path) except OSError as exception: if exception.errno != errno.EEXIST: print "Error: could not create temporary directory:", tmp_path sys.exit(1) #Parse the input PDDL try: parser = Parser(args.domain_file_name, args.problem_file) print "Parsing the PDDL domain..." parser.parse_domain() print "Parsing the PDDL problem..." parser.parse_problem() print "Simplifying the problem representation..." problem = parser.problem problem.simplify() problem.assign_cond_codes() end_parsing_time = time.time() print "Parsing time:", (end_parsing_time - arg_processing_time) print "Grounding the problem..." pre_file_name = os.path.join(tmp_path, args.exp_name + PRE_SUFFIX) ground_file_name = os.path.join(tmp_path, args.exp_name + GROUND_SUFFIX) grounder = Grounder(problem, pre_file_name, ground_file_name) grounder.ground() end_grounding_time = time.time() print "Grounding time:", (end_grounding_time - end_parsing_time) print "Simplifying the ground encoding..." problem.compute_static_preds() problem.link_groundings() problem.make_flat_preconditions() problem.make_flat_effects() problem.get_encode_conds() problem.make_cond_and_cond_eff_lists() problem.link_conditions_to_actions() problem.make_strips_conditions() problem.compute_conflict_mutex() end_linking_time = time.time() print "Simplify time:", (end_linking_time - end_grounding_time) object_invariants = [] if args.plangraph: print "Generating Plangraph invariants..." plangraph_preprocessor = PlangraphPreprocessor(problem) object_invariants = plangraph_preprocessor.run() if object_invariants == False: raise PreprocessingError() end_plangraph_time = time.time() if args.plangraph: print "Plangraph invariants time:", (end_plangraph_time - end_linking_time) strips_problem = problem.make_strips_problem() except (ParsingException, PreprocessingException, ProblemException) as e: print e sys.exit(1) finally: if args.remove_tmp: try: os.system("rm " + pre_file_name) except: pass try: os.system("rm " + ground_file_name) except: pass print "Planning..." try: for horizon in args.horizons: print "Step:", horizon print "-------------------------------------------------" step_start_time = time.time() try: print "Generating base encoding:", args.encoding, "..." encoding_wrapper.instantiate_encoding(args.encoding, strips_problem) encoding = encoding_wrapper.encoding encoding.encode(horizon, args.exec_semantics, args.plangraph_constraints) end_encoding_base_time = time.time() print "Encoding generation time:", (end_encoding_base_time - step_start_time) print "Writing CNF file..." cnf_file_name = os.path.join( tmp_path, args.exp_name + "_" + str(horizon) + ".cnf") encoding.write_cnf(cnf_file_name) end_writing_cnf_time = time.time() print "Writing time:", (end_writing_cnf_time - end_encoding_base_time) except Exception as e: print "Exception while generating the CNF!\n" print traceback.format_exc() try: os.system("rm " + cnf_file_name) except: pass sys.exit(0) if args.debug_cnf: print "Writing debug CNF..." encoding.write_debug_cnf(cnf_file_name + "_dbg") end_writing_dbg_cnf_time = time.time() if args.debug_cnf: print "Writing time:", (end_writing_dbg_cnf_time - end_writing_cnf_time) try: print "Solving..." solver_wrapper.instantiate_solver(args.solver, cnf_file_name,\ tmp_path, args.exp_name, args.time_out) (sln_res, sln_time, true_vars) = solver_wrapper.solver.solve() print "SAT" if sln_res else "UNSAT" print "Solution time:", sln_time except SolvingException as e: raise PlanningException(e.message, solving_error_code) finally: if args.remove_tmp: try: os.system("rm " + cnf_file_name) except: pass try: os.system("rm " + solver_wrapper.solver.sln_file_name) except: pass if sln_res: encoding.set_true_variables(true_vars) try: print "Extracting the plan..." encoding.build_plan(horizon) plan = problem.make_plan_from_strips(encoding.plan) except: print "Exception while extracting the plan!\n" print traceback.format_exc() sys.exit(0) output_file = None if args.output_file_name is not None: try: output_file = file(args.output_file_name, "w") except: print "Error: could not open plan file! Not saving plan." num_actions = 0 print "Plan:" for step, s_actions in enumerate(plan): for (action, a_args) in s_actions: a_str = action.name if a_args: a_str += " " + " ".join(a_args) print str(step) + ": " + a_str if output_file is not None: output_file.write(str(step) + ": " + a_str + "\n") num_actions += 1 if output_file is not None: output_file.close() print "Simulating plan for validation." sim_res, plan_cost = problem.simulate_plan(plan) if sim_res: print "Plan valid.", num_actions, "actions." else: raise PlanningException("INVALID PLAN!", solving_error_code) step_end_time = time.time() print "Step time:", step_end_time - step_start_time break step_end_time = time.time() print "Step time:", step_end_time - step_start_time end_time = time.time() print "Total time:", end_time - start_time except PlanningException as e: print "Planning Error: ", e.message sys.exit(1) sys.exit(0)
def validate(args): # TODO: Add support for XML once the XML import is completed. p = Parser(tax) p.validate(args.infile, FileFormat.JSON)
def convert(args): # TODO: Add support for XML to JSON once the XML import code is completed. p = Parser(tax) p.convert(args.infile, args.outfile, FileFormat.JSON)
from parser import Parser from statistics import Statistics from visualisation import Visualisation parser = Parser('logs/project_gitlog.log') data_container = parser.create_data_container() # Serailize data for # Create statistics statistics = Statistics(data_container) statistics.generate_statistics() statistics.print_statistics() visualisation = Visualisation(statistics) # serializer = statistics.general_serializer json_data = serializer.serialize()
from lexer import Lexer from parser import Parser lexer = Lexer().build() file = open('testCase.txt') text_input = file.read() file.close() lexer.input(text_input) # while True: # tok = lexer.token() # if not tok: # break # print(tok) parser = Parser() parser.build().parse(text_input, lexer, False)
from profile import Profile from parser import Parser def run_operational(reference_data): if reference_data: operationalDriver = Operational(OPER_URL, reference_data) operationalDriver.run() def run_profiles(reference_data): if reference_data: profileDriver = Profile(reference_data) profileDriver.run() if __name__ == "__main__": parser = Parser(BASE_URL) reference_data = parser.get_deputy_reference_data() modes = { 'operational': run_operational, 'profiles': run_profiles } mode = 'profiles' # Run mode modes[mode](reference_data)
def main(arguments=None): args = arguments or argv[1:] fileparser = None argparser = argparse.ArgumentParser( description='Convert pep8 or flake8 output to HTML', prog=NAME, epilog= '%(name)s accepts input either from stdin or from a filename argument.\n' + 'Unless specified otherwise with -o OUTPUT_FILE, %(name)s outputs to stdout.' % {'name': NAME}) argparser.add_argument( 'filename', nargs='?', type=str, help='Path to file containing pep8 or flake8 results.') argparser.add_argument( '-v', '--verbose', action='store_true', help='Enable verbose output (only if --output-file is specified)') argparser.add_argument('--version', action='store_true', help='Prints %s version and exists' % NAME) argparser.add_argument( '-o', '--output-file', type=str, help= 'Outputs the HTML data to the specified file and enables the use of the --verbose option.' ) argparser.add_argument('-g', '--generator', choices=GENERATOR_CHOICES.keys(), help='Selects the generator Html or TeamCity') argparser.add_argument('-r', '--report-name', type=str, default=DEFAULT_REPORT_NAME, help='Name for the report.') # Fetch the provided arguments from sys.argv args = argparser.parse_args(args) if args.version: print('%s version %s' % (NAME, VERSION)) exit(0) if args.filename: try: f = open(args.filename) fileparser = Parser(f) except IOError as e: stderr.write('Could not open file: %s' % e) stderr.flush() exit(1) else: # We need to check if stdin is piped or read from file, since we dont want # stdin to hang at terminal input mode = fstat(0).st_mode if S_ISFIFO(mode) or S_ISREG(mode): fileparser = Parser(stdin) else: # stdin is terminal input at this point argparser.print_help() exit(0) # Generate the HTML report to output_file if not None, else print to stdout generator = GeneratorBase.create_generator(args.generator, fileparser, args.report_name) if generator is None: stderr.write('Unsupported generator: %s' % args.generator) stderr.flush() exit(1) generator.analyze(output_file=args.output_file)
def test_parse_entry(): parser = Parser(["day", "value"]) assert parser.parse(["2020-01-01", "01"]) == { "day": "2020-01-01", "value": "01" }
def test_parse_entry_invalid_date(): parser = Parser(["day"]) assert parser.parse(["01/02/2020"]) == {"day": "2020-02-01"}
from loader import Connector TOKEN = os.getenv('TELEGRAM_BOT_TOKEN') bot = telebot.TeleBot(TOKEN) bot.remove_webhook() @bot.message_handler(func=lambda message: True, commands=['firstnews', 'lastnews', 'listnews']) def update_news_list(message): urls = parser.get_detailed() objs = [parser.follow(u) for u in urls] print(objs) connector.insert(objs) command = message.html_text if command == '/firstnews': payload = f"Title: {objs[-1]['title']}\nDate: {objs[-1]['date']}\nUrl: {objs[-1]['url']}\n" elif command == '/lastnews': payload = f"Title: {objs[0]['title']}\nDate: {objs[0]['date']}\nUrl: {objs[0]['url']}\n" elif command == '/listnews': payload = '\n\n'.join( ['\n'.join([str(obj['date']), obj['title']]) for obj in objs]) else: payload = 'wtf??' bot.send_message(message.chat.id, payload) if __name__ == '__main__': connector = Connector() parser = Parser() bot.polling(none_stop=True)
#!/usr/bin/env python3 # 2019-3-31 import sys from parser import Parser sys.path.append('.') from LinkedObjectGraph.ObjectGraph import ObjectGraph parser = Parser('./test/graphs.txt') for graph in parser.graph_commands: print('NEW GRAPH') objectGraph = ObjectGraph(graph) print(objectGraph) print('-----------------------')
class CodeWriter: def __init__(self, path): self.parser = Parser(path) # just perform the logic of the recommended setFileName constructor here ind1 = path.find('/') ind2 = path.find('.') self.writefile = path[:ind1] + "/" + path[ind1 + 1:ind2] self.filename = self.writefile + '.asm' self.file = open(self.filename, 'w') self.writefile_ind = self.writefile.rfind('/') self.static_var = self.writefile[ self.writefile_ind + 1:] # useful in declaring static variables def writePushPop(self): # no need to pass in command as an argument assert self.parser.commandType() in ['C_PUSH', 'C_POP'] arg1 = self.parser.arg1() arg2 = self.parser.arg2() if self.parser.commandType() == 'C_PUSH': # stack operation if arg1 == 'constant': # e.g. push constant 7 self.file.write('@%s\n' % arg2) self.file.write('D=A\n') # D = 7 self.file.write('@SP\n') self.file.write('A=M\n') self.file.write('M=D\n') # M[M[base_address]] = 7 elif arg1 in [ 'temp', 'pointer', 'local', 'argument', 'this', 'that' ]: self.file.write('@%s\n' % arg2) self.file.write('D=A\n') if arg1 == 'temp': self.file.write('@5\n') self.file.write('A=D+A\n') elif arg1 == 'pointer': self.file.write('@3\n') self.file.write('A=D+A\n') elif arg1 == 'local': self.file.write('@LCL\n') self.file.write('A=D+M\n') elif arg1 == 'argument': self.file.write('@ARG\n') self.file.write('A=D+M\n') elif arg1 == 'this': self.file.write('@THIS\n') self.file.write('A=D+M\n') elif arg1 == 'that': self.file.write('@THAT\n') self.file.write('A=D+M\n') else: pass self.file.write('D=M\n') self.file.write('@SP\n') self.file.write('A=M\n') self.file.write('M=D\n') elif arg1 == 'static': # declare a new symbol file.j in "push static j" self.file.write('@%s.%s\n' % (self.static_var, arg2)) self.file.write('D=M\n') # push D's value to the stack self.file.write('@SP\n') self.file.write('A=M\n') self.file.write('M=D\n') else: # TODO pass # increase address of stack top self.file.write('@SP\n') self.file.write('M=M+1\n') # M[base_address] = M[base_address] + 1 elif self.parser.commandType() == 'C_POP': # pop the stack value and store it in segment[index] # use general purpose RAM[13] to store the value of 'segment_base_address + index' self.file.write('@%s\n' % arg2) self.file.write('D=A\n') if arg1 in [ 'temp', 'pointer', 'local', 'argument', 'this', 'that' ]: if arg1 == 'local': self.file.write('@LCL\n') self.file.write('D=D+M\n') elif arg1 == 'argument': self.file.write('@ARG\n') self.file.write('D=D+M\n') elif arg1 == 'this': self.file.write('@THIS\n') self.file.write('D=D+M\n') elif arg1 == 'that': self.file.write('@THAT\n') self.file.write('D=D+M\n') elif arg1 == 'temp': self.file.write('@5\n') self.file.write('D=D+A\n') elif arg1 == 'pointer': self.file.write('@3\n') self.file.write('D=D+A\n') else: # TODO pass # self.file.write('D=D+M\n') self.file.write('@13\n') # general purpose register self.file.write('M=D\n') self.file.write('@SP\n') self.file.write('A=M-1\n') self.file.write('D=M\n') # pop command self.file.write('@13\n') self.file.write('A=M\n') self.file.write('M=D\n') # write to appropriate address self.file.write('@SP\n') self.file.write('M=M-1\n') # adjust address of stack top elif arg1 == 'static': self.file.write('@SP\n') self.file.write('A=M-1\n') self.file.write('D=M\n') # pop command self.file.write('@%s.%s\n' % (self.static_var, arg2)) self.file.write('M=D\n') # write to appropriate address self.file.write('@SP\n') self.file.write('M=M-1\n') # adjust address of stack top else: # TODO pass def writeArithmetic(self): # no need to pass in command as an argument assert self.parser.commandType() == 'C_ARITHMETIC' command = self.parser.arg1() if command == 'add': # stack operation self.file.write('@SP\n') self.file.write('A=M-1\n') self.file.write('D=M\n') self.file.write('A=A-1\n') self.file.write('D=D+M\n') self.file.write('M=D\n') self.file.write('@SP\n') self.file.write('M=M-1\n') elif command == 'sub': # stack operation self.file.write('@SP\n') self.file.write('A=M-1\n') self.file.write('D=M\n') self.file.write('A=A-1\n') self.file.write('D=M-D\n') self.file.write('M=D\n') self.file.write('@SP\n') self.file.write('M=M-1\n') elif command == 'eq': # stack operation self.file.write('@SP\n') self.file.write('A=M-1\n') self.file.write('D=M\n') self.file.write('A=A-1\n') self.file.write('D=M-D\n') self.file.write( '@IF_TRUE_%s\n' % self.parser.i) # there could be more than one 'eq' command self.file.write('D;JEQ\n') self.file.write('@SP\n') self.file.write('A=M-1\n') self.file.write('A=A-1\n') self.file.write('M=0\n') self.file.write( '@END_%s\n' % self.parser.i) # there could be more than one 'eq' command self.file.write('0;JMP\n') self.file.write('(IF_TRUE_%s)\n' % self.parser.i) self.file.write('@SP\n') self.file.write('A=M-1\n') self.file.write('A=A-1\n') self.file.write('M=-1\n') self.file.write('(END_%s)\n' % self.parser.i) self.file.write('@SP\n') self.file.write('M=M-1\n') elif command == 'gt': # stack operation self.file.write('@SP\n') self.file.write('A=M-1\n') self.file.write('D=M\n') self.file.write('A=A-1\n') self.file.write('D=M-D\n') self.file.write( '@IF_TRUE_%s\n' % self.parser.i) # there could be more than one 'gt' command self.file.write('D;JGT\n') self.file.write('@SP\n') self.file.write('A=M-1\n') self.file.write('A=A-1\n') self.file.write('M=0\n') self.file.write( '@END_%s\n' % self.parser.i) # there could be more than one 'gt' command self.file.write('0;JMP\n') self.file.write('(IF_TRUE_%s)\n' % self.parser.i) self.file.write('@SP\n') self.file.write('A=M-1\n') self.file.write('A=A-1\n') self.file.write('M=-1\n') self.file.write('(END_%s)\n' % self.parser.i) self.file.write('@SP\n') self.file.write('M=M-1\n') elif command == 'lt': # stack operation self.file.write('@SP\n') self.file.write('A=M-1\n') self.file.write('D=M\n') self.file.write('A=A-1\n') self.file.write('D=M-D\n') self.file.write( '@IF_TRUE_%s\n' % self.parser.i) # there could be more than one 'lt' command self.file.write('D;JLT\n') self.file.write('@SP\n') self.file.write('A=M-1\n') self.file.write('A=A-1\n') self.file.write('M=0\n') self.file.write( '@END_%s\n' % self.parser.i) # there could be more than one 'lt' command self.file.write('0;JMP\n') self.file.write('(IF_TRUE_%s)\n' % self.parser.i) self.file.write('@SP\n') self.file.write('A=M-1\n') self.file.write('A=A-1\n') self.file.write('M=-1\n') self.file.write('(END_%s)\n' % self.parser.i) self.file.write('@SP\n') self.file.write('M=M-1\n') elif command == 'and': # stack operation self.file.write('@SP\n') self.file.write('A=M-1\n') self.file.write('D=M\n') self.file.write('A=A-1\n') self.file.write('M=D&M\n') self.file.write('@SP\n') self.file.write('M=M-1\n') elif command == 'or': # stack operation self.file.write('@SP\n') self.file.write('A=M-1\n') self.file.write('D=M\n') self.file.write('A=A-1\n') self.file.write('M=D|M\n') self.file.write('@SP\n') self.file.write('M=M-1\n') elif command == 'neg': # stack operation self.file.write('@SP\n') self.file.write('A=M-1\n') self.file.write('M=-M\n') elif command == 'not': # stack operation self.file.write('@SP\n') self.file.write('A=M-1\n') self.file.write('M=!M\n') else: raise ValueError( "Unrecognized command for C_ARITHMETIC command type") def createOutput(self): # initially set the SP address to 256 (the address for the stack) self.file.write('@256\n') self.file.write('D=A\n') self.file.write('@SP\n') self.file.write('M=D\n') # set the local address to 300 self.file.write('@300\n') self.file.write('D=A\n') self.file.write('@LCL\n') self.file.write('M=D\n') # set the argument address to 400 self.file.write('@400\n') self.file.write('D=A\n') self.file.write('@ARG\n') self.file.write('M=D\n') # set the this address to 3000 self.file.write('@3000\n') self.file.write('D=A\n') self.file.write('@THIS\n') self.file.write('M=D\n') # set the that address to 3010 self.file.write('@3010\n') self.file.write('D=A\n') self.file.write('@THAT\n') self.file.write('M=D\n') self.parser.i = -1 while self.parser.hasMoreCommands(): self.parser.advance() c_type = self.parser.commandType() if c_type in ['C_PUSH', 'C_POP']: self.writePushPop() elif c_type == 'C_ARITHMETIC': self.writeArithmetic() # close file self.file.close()
def generate(self, try_parallelize): ''' Get the parsed code from the input file and write equivalent C++ and CUDA code to the output file. If the try_parallelize parameter is false, the code is just converted to C++ without any CUDA code to parallelize it. Return true if the code was parallelized and false otherwise. ''' parallelized = False # Nothing was parallelized so far. # Get the parsed versino of the code. p = Parser(self.in_filename) # Type list of Expr's try: # Parse the code. parsed_exprs = p.parse() # Typecheck the code. This also updates the type of some of the # parsed expressions (updates from unknown type to known type) and # addes environment data to the expressions. type_checker = TypeChecker(parsed_exprs) type_checker.validate_exprs() if try_parallelize: # Analyze the code to see if some parts can be marked to run in # parallel. analyzer = Analyzer(parsed_exprs) parallelized = analyzer.analyze() except error.Error as e: e.print() exit(1) # Include some useful libraries. cpp_file = open(self._filename_no_ext + '.cpp', 'w') if parallelized: cpp_file.write('#include <cuda_runtime.h>\n') cpp_file.write('#include <stdio.h>\n') cpp_file.write('#include <stdlib.h>\n') cpp_file.write('#include <time.h>\n') cpp_file.write('\n') cpp_file.write(f'#include "{self._base_filename_no_ext + ".hpp"}"\n') if parallelized: cpp_file.write(f'#include "{self._base_filename_no_ext}.cuh"\n') cpp_file.write('\n') # Only make a CUDA file if necessary. if parallelized: cuda_file = open(self._filename_no_ext + '.cu', 'w') cuda_file.write('#include <cuda_runtime.h>\n') cuda_file.write('\n') cuda_file.write(f'#include "{self._base_filename_no_ext}.cuh"\n') cuda_file.write('\n') # Convert each expression to C++ and CUDA code, writing the result to # the output file. for expr in parsed_exprs: (cpp, cuda) = self.__translate_expr(expr) cpp_file.write(cpp) if parallelized: cuda_file.write(cuda) cpp_file.close() if parallelized: cuda_file.close() # Write the C++ header file. hpp_file = open(self._filename_no_ext + '.hpp', 'w') hpp_file.write(f'#ifndef {self._base_filename_no_ext.upper()}_HPP\n') hpp_file.write(f'#define {self._base_filename_no_ext.upper()}_HPP\n\n') hpp_file.write('struct int_list {\n') hpp_file.write(' int size;\n') hpp_file.write(' int *data;\n') hpp_file.write('};\n\n') hpp_file.write('struct float_list {\n') hpp_file.write(' int size;\n') hpp_file.write(' float *data;\n') hpp_file.write('};\n\n') hpp_file.write('struct char_list {\n') hpp_file.write(' int size;\n') hpp_file.write(' char *data;\n') hpp_file.write('};\n\n') for proto in self.cpp_prototypes: hpp_file.write(proto) hpp_file.write('\n') hpp_file.write(f'#endif\n') hpp_file.close() # Write the CUDA header file. if parallelized: cuh_file = open(self._filename_no_ext + '.cuh', 'w') guard = f'{self._base_filename_no_ext.upper()}_CUH' cuh_file.write(f'#ifndef {guard}\n') cuh_file.write(f'#define {guard}\n\n') cuh_file.write(f'#include "{self._base_filename_no_ext}.hpp"\n\n') for proto in self.cuda_prototypes: cuh_file.write(proto) cuh_file.write('\n') cuh_file.write(f'#endif\n') cuh_file.close() # Write the Makefile. makefile = open(self._path + '/Makefile', 'w') base_name = self._base_filename_no_ext cpp_name = self._base_filename_no_ext + '.cpp' hpp_name = self._base_filename_no_ext + '.hpp' cu_name = self._base_filename_no_ext + '.cu' cuh_name = self._base_filename_no_ext + '.cuh' m = '' if parallelized: m += f'CUDA_PATH = /usr/local/cuda\n' m += f'CUDA_INC_PATH = $(CUDA_PATH)/include\n' m += f'CUDA_BIN_PATH = $(CUDA_PATH)/bin\n' m += f'CUDA_LIB_PATH = $(CUDA_PATH)/lib64\n' m += f'NVCC = $(CUDA_BIN_PATH)/nvcc\n' m += f'CUDAFLAGS = -g -dc -Wno-deprecated-gpu-targets \\\n' m += f' --std=c++11 --expt-relaxed-constexpr\n' m += f'CUDA_LINK_FLAGS = -dlink -Wno-deprecated-gpu-targets\n' m += f'\n' m += f'GPP=g++\n' m += f'CXXFLAGS = -g -Wall -D_REENTRANT -std=c++0x -pthread\n' if parallelized: m += f'INCLUDE = -I$(CUDA_INC_PATH)\n' m += f'LIBS = -L$(CUDA_LIB_PATH) -lcudart -lcufft -lsndfile\n' m += f'\n' m += f'all: {base_name}\n' m += f'\n' m += f'{cpp_name}.o: {cpp_name}\n' m += f'\t$(GPP) $(CXXFLAGS) -c -o $@ $(INCLUDE) $<\n' m += f'\n' if parallelized: m += f'{cu_name}.o: {cu_name}\n' m += f'\t$(NVCC) $(CUDAFLAGS) -c -o $@ $<\n' m += f'\n' m += f'cuda.o: {cu_name}.o\n' m += f'\t$(NVCC) $(CUDA_LINK_FLAGS) -o $@ $^\n' m += f'\n' if parallelized: m += f'{base_name}: {cpp_name}.o {cu_name}.o cuda.o\n' else: m += f'{base_name}: {cpp_name}.o\n' m += f'\t$(GPP) $(CXXFLAGS) -o $@ $(INCLUDE) $^ $(LIBS)\n' m += f'\n' m += f'clean:\n' m += f'\trm -f {base_name} *.o\n' m += f'\n' m += f'full_clean: clean\n' m += f'\trm -f {cpp_name} {hpp_name} {cu_name} {cuh_name} Makefile\n' m += f'\n' m += f'.PHONY: all clean full_clean\n' makefile.write(m) return parallelized
def readloop(path, parser): i = 0 with open(path, 'rb') as f: byte = int.from_bytes(f.read(2), byteorder='little') last = 0 while last != f.tell(): parser.bytes.append(byte) last = f.tell() byte = int.from_bytes(f.read(2), byteorder='little') def executeLoop(parser): while parser.offset < len(parser.bytes): r = parseIns(parser.bytes[parser.offset], parser) if r < 0: print('ERROR') print('Exiting on instruction %s' % parser.bytes[parser.offset]) sys.exit(1) if r == 0: sys.exit() parser.offset += 1 print('End of instructions') if __name__ == '__main__': p = Parser() readloop('challenge.bin', p) executeLoop(p)
if len(sys.argv) == 1: print('You need to pass an .asm file as an argument') sys.exit() filename = sys.argv[1] basename = os.path.basename(filename) if os.path.splitext(basename)[1] != '.asm': print('The argument provided does not have the .asm extension') sys.exit() symbol_table = SymbolTable() # first pass parser = Parser(filename) current_line = 0 while parser.hasMoreCommands(): label = parser.getLabel() if label: symbol_table.updateTable(label, current_line) else: current_line += 1 parser.advance() del(parser) # second pass parser = Parser(filename)
def __init__(self, verbose, repository_tag, repository_chunk, airr_map, repository): Parser.__init__(self, verbose, repository_tag, repository_chunk, airr_map, repository)
f.write('%s %s %s %s' % ( args.dataset, args.learn_eps, args.neighbor_pooling_type, args.graph_pooling_type )) f.write("\n") f.write("%f %f %f %f" % ( train_loss, train_acc, valid_loss, valid_acc )) f.write("\n") lrbar.set_description( "Learning eps with learn_eps={}: {}".format( args.learn_eps, [layer.eps.data(args.device).asscalar() for layer in model.ginlayers])) tbar.close() vbar.close() lrbar.close() if __name__ == '__main__': args = Parser(description='GIN').args print('show all arguments configuration...') print(args) main(args)
from frontier import Frontier from parser import Parser from graph import Graph from pagerank import Ranker from indexer import Indexer from scorer import Scorer frontier = Frontier([ 'http://mysql12.f4.htw-berlin.de/crawl/d01.html', 'http://mysql12.f4.htw-berlin.de/crawl/d06.html', 'http://mysql12.f4.htw-berlin.de/crawl/d08.html' ]) parser = Parser() indexer = Indexer() web_graph = Graph() for url in frontier: # get outgoing links for the graph and content for tokenization body, links_on_page = parser.parse(url) # add document to indexer indexer.add_document(url, body) # build our webgraph node = web_graph.get_node(url) if node is None: node = web_graph.add_node(url) for out_link in links_on_page: web_graph.add_edge(url, out_link)
from generator import Generator from cache import Cache argparser = ArgumentParser() argparser.add_argument("-t", "--targets", help="specify any target files", nargs='+') argparser.add_argument("-c", "--crawl", help="enable image crawling", action="store_true") argparser.add_argument("-d", "--download", help="enable image downloading", action="store_true") argparser.add_argument("-v", "--views", help="enable generation of views", action="store_true") args = argparser.parse_args() parser = Parser(args.targets) if args.targets else Parser() if args.crawl: downloader = Downloader() elif args.download: downloader = Downloader(max_path_len=None) else: downloader = None cache = Cache()
def check_equation_raises_error(equation: str, error_type): parser: Parser = Parser(equation, False) with pytest.raises(error_type): parser.parse_equation()
import numpy as np import tensorflow as tf import random import sys, os import json import argparse from parser import Parser from datamanager import DataManager from actor import ActorNetwork from LSTM_critic import LSTM_CriticNetwork tf.logging.set_verbosity(tf.logging.ERROR) os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' #get parse argv = sys.argv[1:] parser = Parser().getParser() args, _ = parser.parse_known_args(argv) random.seed(args.seed) #get data dataManager = DataManager(args.dataset) train_data, dev_data, test_data = dataManager.getdata(args.grained, args.maxlenth) word_vector = dataManager.get_wordvector(args.word_vector) print("train_data ", len(train_data)) print("dev_data", len(dev_data)) print("test_data", len(test_data)) if args.fasttest == 1: train_data = train_data[:100] dev_data = dev_data[:20] test_data = test_data[:20]
class SemanticScale(object): def __init__(self, dataDir, seedFile, outputFile, origDays, extendDays, origSpeed, extendSpeed, payloadName, speedScaleNoise, timeScaleNoise, type): self.outputFile = outputFile self.dataDir = dataDir self.seedFile = seedFile self.origDays = origDays self.origSpeed = origSpeed self.extendDays = extendDays self.extendSpeed = extendSpeed self.type = type self.payloadName = payloadName self.speedScaleNoise = speedScaleNoise self.timeScaleNoise = timeScaleNoise self.writer = open(self.outputFile, "w") def getRandAroundPayload(self, payload, scaleNoise): if self.type == int: min = (int)(payload * (1 - scaleNoise)) max = (int)(payload * (1 + scaleNoise)) return random.randint(0, max - min) + min elif self.type ==float: min = payload * (1 - scaleNoise) max = payload * (1 + scaleNoise) return min + (max-min)*random.random() def getRandBetweenPayloads(self, payload1, payload2, scaleNoise): if self.type == int: if payload1 < payload2: min = (int) (payload1 * (1 - scaleNoise)) max = (int) (payload2 * (1 + scaleNoise)) return random.randint(0, max - min) + min else: min = (int)(payload2 * (1 - scaleNoise)) max = (int)(payload2 * (1 + scaleNoise)) return random.randint(0, max - min) + min elif self.type == float: if payload1 < payload2: min = payload1 * (1 - scaleNoise) max = payload2 * (1 + scaleNoise) return random.random()*(max - min) + min else: min = payload2 * (1 - scaleNoise) max = payload2 * (1 + scaleNoise) return random.random()*(max - min) + min def writeObject(self, timestamp, payload, sensor, entity): try: object = { "id": str(uuid.uuid4()), "semanticEntity": entity, "virtualSensor": sensor, "type_": sensor['type_']['semanticObservationType'], "timeStamp": timestamp.strftime('%Y-%m-%d %H:%M:%S'), "payload": { self.payloadName: payload } } self.writer.write(json.dumps(object) + '\n') except Exception as e: print (e) print ("IO error") def speedScale(self): self.seedData = Parser(self.seedFile) self.writer = None try: self.writer = open(SPEED_SCALED_FILE, "w") prevObservation = self.seedData.getNext() prevTimestamp = datetime.datetime.strptime(prevObservation['timeStamp'], DATE_FORMAT) while prevObservation: self.writeObject(datetime.datetime.strptime(prevObservation['timeStamp'], DATE_FORMAT), prevObservation['payload'][self.payloadName], prevObservation['virtualSensor'], prevObservation['semanticEntity']) currentObservaton = self.seedData.getNext() if currentObservaton is None: prevObservation = currentObservaton continue currentTimeStamp = datetime.datetime.strptime(currentObservaton['timeStamp'], DATE_FORMAT) if prevTimestamp > currentTimeStamp: prevObservation = currentObservaton continue prevPayload = prevObservation['payload'][self.payloadName] timestamp = prevTimestamp for i in range(self.extendSpeed / self.origSpeed - 1): timestamp += datetime.timedelta(seconds=self.extendSpeed) payload = self.getRandBetweenPayloads(prevPayload, currentObservaton['payload'][self.payloadName], self.speedScaleNoise) self.writeObject(timestamp, payload, prevObservation['virtualSensor'], prevObservation['semanticEntity']) prevPayload = payload prevObservation = currentObservaton except KeyError as e: print("Speed IO error", e) finally: try: self.writer.close() except Exception as e: print("IO error") def timeScale(self): self.seedData = Parser(SPEED_SCALED_FILE) self.writer = None try: self.writer = open(self.outputFile, "w") currentObservation = self.seedData.getNext() while currentObservation: self.writeObject(datetime.datetime.strptime(currentObservation['timeStamp'], DATE_FORMAT), currentObservation['payload'][self.payloadName], currentObservation['virtualSensor'], currentObservation['semanticEntity']) timestamp = datetime.datetime.strptime(currentObservation['timeStamp'], DATE_FORMAT) for i in range(1, self.extendDays / self.origDays): payload = self.getRandAroundPayload(currentObservation['payload'][self.payloadName], self.timeScaleNoise) timestamp += datetime.timedelta(days=i * self.origDays) self.writeObject(timestamp, payload, currentObservation['virtualSensor'], currentObservation['semanticEntity']) currentObservation = self.seedData.getNext() except KeyError as e: print("Time IO error", e) finally: try: self.writer.close() except Exception as e: print("IO error")
def check_equation(equation: str, correct_multipliers: Dict[float, float]): parser: Parser = Parser(equation, False) parser.parse_equation() equation_multipliers = parser.multipliers for i in range(3): assert compare_floats_with_epsilon(correct_multipliers[i], equation_multipliers[i])
class CodeWriter: def __init__(self, filepath, isfile=True): self.parser = Parser(filepath) self.isfile = isfile # * Performs the logic of the recommended setFileName constructor here if self.isfile: ind1 = path.find('/') ind2 = path.find('.') self.writefile = path[:ind1] + "/" + path[ind1 + 1:ind2] self.filename = self.writefile + '.asm' self.file = open(self.filename, 'w') self.writefile_ind = self.writefile.rfind('/') # useful in declaring static variables self.static_var = self.writefile[self.writefile_ind + 1:] self.function_list = [] else: inds = [i for i, x in enumerate(filepath) if x == '/'] self.writefolder = path[inds[-2] + 1:inds[-1]] self.filename = self.writefolder + '.asm' writefile_ind = filepath.rfind('/') filepath_ = filepath[:writefile_ind] self.file = open(filepath_ + '/' + self.filename, 'w') self.static_var_dict = {} # useful in declaring static variables self.function_list = [] def writePushPop(self): # no need to pass in command as an argument assert self.parser.commandType() in ['C_PUSH', 'C_POP'] arg1 = self.parser.arg1() arg2 = self.parser.arg2() if self.parser.commandType() == 'C_PUSH': self.file.write('// push %s %s\n' % (arg1, arg2)) # stack operation if arg1 == 'constant': # e.g. push constant 7 self.file.write('@%s\n' % arg2) self.file.write('D=A\n') # D = 7 self.file.write('@SP\n') self.file.write('A=M\n') self.file.write('M=D\n') # M[M[base_address]] = 7 elif arg1 in [ 'temp', 'pointer', 'local', 'argument', 'this', 'that' ]: self.file.write('@%s\n' % arg2) self.file.write('D=A\n') if arg1 == 'temp': self.file.write('@5\n') self.file.write('A=D+A\n') elif arg1 == 'pointer': self.file.write('@3\n') self.file.write('A=D+A\n') elif arg1 == 'local': self.file.write('@LCL\n') self.file.write('A=D+M\n') elif arg1 == 'argument': self.file.write('@ARG\n') self.file.write('A=D+M\n') elif arg1 == 'this': self.file.write('@THIS\n') self.file.write('A=D+M\n') elif arg1 == 'that': self.file.write('@THAT\n') self.file.write('A=D+M\n') else: pass self.file.write('D=M\n') self.file.write('@SP\n') self.file.write('A=M\n') self.file.write('M=D\n') elif arg1 == 'static': # declare a new symbol file.j in "push static j" if self.isfile: self.file.write('@%s.%s\n' % (self.static_var, arg2)) else: self.file.write( '@%s.%s\n' % (self.static_var_dict[self.parser.i], arg2)) self.file.write('D=M\n') # push D's value to the stack self.file.write('@SP\n') self.file.write('A=M\n') self.file.write('M=D\n') else: # TODO pass # increase address of stack top self.file.write('@SP\n') self.file.write('M=M+1\n') # M[base_address] = M[base_address] + 1 elif self.parser.commandType() == 'C_POP': # pop the stack value and store it in segment[index] # use general purpose RAM[13] to store the value of 'segment_base_address + index' self.file.write('// pop %s %s\n' % (arg1, arg2)) self.file.write('@%s\n' % arg2) self.file.write('D=A\n') if arg1 in [ 'temp', 'pointer', 'local', 'argument', 'this', 'that' ]: if arg1 == 'local': self.file.write('@LCL\n') self.file.write('D=D+M\n') elif arg1 == 'argument': self.file.write('@ARG\n') self.file.write('D=D+M\n') elif arg1 == 'this': self.file.write('@THIS\n') self.file.write('D=D+M\n') elif arg1 == 'that': self.file.write('@THAT\n') self.file.write('D=D+M\n') elif arg1 == 'temp': self.file.write('@5\n') self.file.write('D=D+A\n') elif arg1 == 'pointer': self.file.write('@3\n') self.file.write('D=D+A\n') else: # TODO pass # self.file.write('D=D+M\n') self.file.write('@13\n') # general purpose register self.file.write('M=D\n') self.file.write('@SP\n') self.file.write('A=M-1\n') self.file.write('D=M\n') # pop command self.file.write('@13\n') self.file.write('A=M\n') self.file.write('M=D\n') # write to appropriate address self.file.write('@SP\n') self.file.write('M=M-1\n') # adjust address of stack top elif arg1 == 'static': self.file.write('@SP\n') self.file.write('A=M-1\n') self.file.write('D=M\n') # pop command if self.isfile: self.file.write('@%s.%s\n' % (self.static_var, arg2)) else: self.file.write( '@%s.%s\n' % (self.static_var_dict[self.parser.i], arg2)) self.file.write('M=D\n') # write to appropriate address self.file.write('@SP\n') self.file.write('M=M-1\n') # adjust address of stack top else: # TODO pass def writeArithmetic(self): # no need to pass in command as an argument assert self.parser.commandType() == 'C_ARITHMETIC' command = self.parser.arg1() self.file.write('// %s\n' % command) if command == 'add': # stack operation self.file.write('@SP\n') self.file.write('A=M-1\n') self.file.write('D=M\n') self.file.write('A=A-1\n') self.file.write('D=D+M\n') self.file.write('M=D\n') self.file.write('@SP\n') self.file.write('M=M-1\n') elif command == 'sub': # stack operation self.file.write('@SP\n') self.file.write('A=M-1\n') self.file.write('D=M\n') self.file.write('A=A-1\n') self.file.write('D=M-D\n') self.file.write('M=D\n') self.file.write('@SP\n') self.file.write('M=M-1\n') elif command == 'eq': # stack operation self.file.write('@SP\n') self.file.write('A=M-1\n') self.file.write('D=M\n') self.file.write('A=A-1\n') self.file.write('D=M-D\n') # there could be more than one 'eq' command self.file.write('@IF_TRUE_%s\n' % self.parser.i) self.file.write('D;JEQ\n') self.file.write('@SP\n') self.file.write('A=M-1\n') self.file.write('A=A-1\n') self.file.write('M=0\n') # there could be more than one 'eq' command self.file.write('@END_%s\n' % self.parser.i) self.file.write('0;JMP\n') self.file.write('(IF_TRUE_%s)\n' % self.parser.i) self.file.write('@SP\n') self.file.write('A=M-1\n') self.file.write('A=A-1\n') self.file.write('M=-1\n') self.file.write('(END_%s)\n' % self.parser.i) self.file.write('@SP\n') self.file.write('M=M-1\n') elif command == 'gt': # stack operation self.file.write('@SP\n') self.file.write('A=M-1\n') self.file.write('D=M\n') self.file.write('A=A-1\n') self.file.write('D=M-D\n') # there could be more than one 'gt' command self.file.write('@IF_TRUE_%s\n' % self.parser.i) self.file.write('D;JGT\n') self.file.write('@SP\n') self.file.write('A=M-1\n') self.file.write('A=A-1\n') self.file.write('M=0\n') # there could be more than one 'gt' command self.file.write('@END_%s\n' % self.parser.i) self.file.write('0;JMP\n') self.file.write('(IF_TRUE_%s)\n' % self.parser.i) self.file.write('@SP\n') self.file.write('A=M-1\n') self.file.write('A=A-1\n') self.file.write('M=-1\n') self.file.write('(END_%s)\n' % self.parser.i) self.file.write('@SP\n') self.file.write('M=M-1\n') elif command == 'lt': # stack operation self.file.write('@SP\n') self.file.write('A=M-1\n') self.file.write('D=M\n') self.file.write('A=A-1\n') self.file.write('D=M-D\n') # there could be more than one 'lt' command self.file.write('@IF_TRUE_%s\n' % self.parser.i) self.file.write('D;JLT\n') self.file.write('@SP\n') self.file.write('A=M-1\n') self.file.write('A=A-1\n') self.file.write('M=0\n') # there could be more than one 'lt' command self.file.write('@END_%s\n' % self.parser.i) self.file.write('0;JMP\n') self.file.write('(IF_TRUE_%s)\n' % self.parser.i) self.file.write('@SP\n') self.file.write('A=M-1\n') self.file.write('A=A-1\n') self.file.write('M=-1\n') self.file.write('(END_%s)\n' % self.parser.i) self.file.write('@SP\n') self.file.write('M=M-1\n') elif command == 'and': # stack operation self.file.write('@SP\n') self.file.write('A=M-1\n') self.file.write('D=M\n') self.file.write('A=A-1\n') self.file.write('M=D&M\n') self.file.write('@SP\n') self.file.write('M=M-1\n') elif command == 'or': # stack operation self.file.write('@SP\n') self.file.write('A=M-1\n') self.file.write('D=M\n') self.file.write('A=A-1\n') self.file.write('M=D|M\n') self.file.write('@SP\n') self.file.write('M=M-1\n') elif command == 'neg': # stack operation self.file.write('@SP\n') self.file.write('A=M-1\n') self.file.write('M=-M\n') elif command == 'not': # stack operation self.file.write('@SP\n') self.file.write('A=M-1\n') self.file.write('M=!M\n') else: raise ValueError( "Unrecognized command for C_ARITHMETIC command type") def writeInit(self): self.file.write('// init\n') # initially set the SP address to 256 (the address for the stack) self.file.write('@256\n') self.file.write('D=A\n') self.file.write('@SP\n') self.file.write('M=D\n') # set the local address to 300 self.file.write('@300\n') self.file.write('D=A\n') self.file.write('@LCL\n') self.file.write('M=D\n') # set the argument address to 400 self.file.write('@400\n') self.file.write('D=A\n') self.file.write('@ARG\n') self.file.write('M=D\n') # set the this address to 3000 self.file.write('@3000\n') self.file.write('D=A\n') self.file.write('@THIS\n') self.file.write('M=D\n') # set the that address to 3010 self.file.write('@3010\n') self.file.write('D=A\n') self.file.write('@THAT\n') self.file.write('M=D\n') def writeLabel(self): self.file.write('// label\n') # check if label was declared within function; if so, label should carry function name try: func_name = self.function_list[-1] + "$" except: func_name = '' label_name_input = self.parser.arg1() label_name = func_name + label_name_input self.file.write('(%s)\n' % label_name) def writeGoto(self): self.file.write('// goto\n') # check if goto was declared within function; if so, label should carry function name try: func_name = self.function_list[-1] + "$" except: func_name = '' label_name_input = self.parser.arg1() label_name = func_name + label_name_input self.file.write('@%s\n' % label_name) self.file.write('0;JMP\n') def writeIf(self): self.file.write('// if-goto\n') # check if 'if-goto' was declared within function; if so, label should carry function name try: func_name = self.function_list[-1] + "$" except: func_name = '' label_name_input = self.parser.arg1() label_name = func_name + label_name_input self.file.write('@SP\n') self.file.write('A=M-1\n') self.file.write('D=M\n') self.file.write('@SP\n') # adjust stack top self.file.write('M=M-1\n') self.file.write('@%s\n' % label_name) self.file.write('D;JNE\n') def writeFunction(self): func_name = self.parser.arg1() self.function_list.append(func_name) num_locals = self.parser.arg2() self.file.write('// function %s %s\n' % (func_name, num_locals)) self.file.write('(%s)\n' % func_name) self.file.write('@%s\n' % num_locals) self.file.write('D=A\n') self.file.write('@13\n') self.file.write('M=D\n') self.file.write('(LOOP_%s)\n' % func_name) self.file.write('@13\n') self.file.write('D=M\n') self.file.write('@END_%s\n' % func_name) self.file.write('D;JEQ\n') # start logic for code to carry out while D != 0 self.file.write('@SP\n') self.file.write('A=M\n') self.file.write('M=0\n') # M[M[base_address]] = 7 self.file.write('@SP\n') self.file.write('M=M+1\n') # M[base_address] = M[base_address] + 1 self.file.write('@13\n') self.file.write('M=M-1\n') # end logic for code to carry out while D != 0 self.file.write('@LOOP_%s\n' % func_name) self.file.write('0;JMP\n') self.file.write('(END_%s)\n' % func_name) def writeReturn(self): self.file.write('// return\n') # func_name = self.function_list.pop() # FRAME = LCL : store FRAME in a temp variable self.file.write('@LCL\n') self.file.write('D=M\n') self.file.write('@13\n') # address of the temp variable FRAME self.file.write('M=D\n') # RET = *(FRAME - 5) : store return address in another temp variable self.file.write('@13\n') self.file.write('D=M\n') self.file.write('@5\n') self.file.write('D=D-A\n') self.file.write('A=D\n') self.file.write('D=M\n') # D now equals *(FRAME - 5) self.file.write('@14\n') # address of the temp variable RET self.file.write('M=D\n') # *ARG = pop() self.file.write('@SP\n') self.file.write('A=M-1\n') self.file.write('D=M\n') self.file.write('@ARG\n') self.file.write('A=M\n') self.file.write('M=D\n') self.file.write('@SP\n') self.file.write('M=M-1\n') ## SP = ARG + 1 self.file.write('@ARG\n') self.file.write('D=M+1\n') self.file.write('@SP\n') self.file.write('M=D\n') # THAT = *(FRAME - 1) self.file.write('@13\n') self.file.write('A=M-1\n') self.file.write('D=M\n') self.file.write('@THAT\n') self.file.write('M=D\n') # THIS = *(FRAME - 2) self.file.write('@13\n') self.file.write('D=M\n') self.file.write('@2\n') self.file.write('A=D-A\n') self.file.write('D=M\n') self.file.write('@THIS\n') self.file.write('M=D\n') # ARG = *(FRAME - 3) self.file.write('@13\n') self.file.write('D=M\n') self.file.write('@3\n') self.file.write('A=D-A\n') self.file.write('D=M\n') self.file.write('@ARG\n') self.file.write('M=D\n') # LCL = *(FRAME - 4) self.file.write('@13\n') self.file.write('D=M\n') self.file.write('@4\n') self.file.write('A=D-A\n') self.file.write('D=M\n') self.file.write('@LCL\n') self.file.write('M=D\n') # goto RET self.file.write('@14\n') # address of RET self.file.write('A=M\n') # address = RET self.file.write('0;JMP\n') def writeCall(self): func_name = self.parser.arg1() num_args = self.parser.arg2() self.file.write('// call %s %s\n' % (func_name, num_args)) # push return-address (using label declared below) self.file.write('// call : push return-address\n') # there could be more than one return_addresses in the entire code s = 'RETURN_ADDRESS_' + str(self.parser.i) self.file.write('@%s\n' % s) self.file.write('D=A\n') self.file.write('@SP\n') self.file.write('A=M\n') self.file.write('M=D\n') self.file.write('@SP\n') self.file.write('M=M+1\n') # push LCL self.file.write('// call : push LCL\n') self.file.write('@LCL\n') self.file.write('D=M\n') self.file.write('@SP\n') self.file.write('A=M\n') self.file.write('M=D\n') self.file.write('@SP\n') self.file.write('M=M+1\n') # push ARG self.file.write('// call : push ARG\n') self.file.write('@ARG\n') self.file.write('D=M\n') self.file.write('@SP\n') self.file.write('A=M\n') self.file.write('M=D\n') self.file.write('@SP\n') self.file.write('M=M+1\n') # push THIS self.file.write('// call : push THIS\n') self.file.write('@THIS\n') self.file.write('D=M\n') self.file.write('@SP\n') self.file.write('A=M\n') self.file.write('M=D\n') self.file.write('@SP\n') self.file.write('M=M+1\n') # push THAT self.file.write('// call : push THAT\n') self.file.write('@THAT\n') self.file.write('D=M\n') self.file.write('@SP\n') self.file.write('A=M\n') self.file.write('M=D\n') self.file.write('@SP\n') self.file.write('M=M+1\n') # ARG = SP - n - 5 self.file.write('// call : ARG = SP - n - 5\n') self.file.write('@SP\n') self.file.write('D=M\n') self.file.write('@%s\n' % num_args) self.file.write('D=D-A\n') self.file.write('@5\n') self.file.write('D=D-A\n') self.file.write('@ARG\n') self.file.write('M=D\n') # LCL = SP self.file.write('// call : LCL = SP\n') self.file.write('@SP\n') self.file.write('D=M\n') self.file.write('@LCL\n') self.file.write('M=D\n') # goto f self.file.write('// call : goto f\n') self.file.write('@%s\n' % func_name) self.file.write('0;JMP\n') # declare a label for the return-address self.file.write('// call : declare label for return-address\n') self.file.write('(%s)\n' % s) def writeBootstrap(self): self.file.write('// boostrap\n') ## SP = 256 self.file.write('@256\n') self.file.write('D=A\n') self.file.write('@SP\n') self.file.write('M=D\n') # call Sys.init : call Sys.init 0 # push return-address sys_init_ret_add = 'return-address-sysinit' self.file.write('@%s\n' % sys_init_ret_add) self.file.write('D=A\n') self.file.write('@SP\n') self.file.write('A=M\n') self.file.write('M=D\n') self.file.write('@SP\n') self.file.write('M=M+1\n') # push LCL self.file.write('@LCL\n') self.file.write('D=M\n') self.file.write('@SP\n') self.file.write('A=M\n') self.file.write('M=D\n') self.file.write('@SP\n') self.file.write('M=M+1\n') # push ARG self.file.write('@ARG\n') self.file.write('D=M\n') self.file.write('@SP\n') self.file.write('A=M\n') self.file.write('M=D\n') self.file.write('@SP\n') self.file.write('M=M+1\n') # push THIS self.file.write('@THIS\n') self.file.write('D=M\n') self.file.write('@SP\n') self.file.write('A=M\n') self.file.write('M=D\n') self.file.write('@SP\n') self.file.write('M=M+1\n') # push THAT self.file.write('@THAT\n') self.file.write('D=M\n') self.file.write('@SP\n') self.file.write('A=M\n') self.file.write('M=D\n') self.file.write('@SP\n') self.file.write('M=M+1\n') # ARG = SP - n - 5 self.file.write('@SP\n') self.file.write('D=M\n') self.file.write('@5\n') self.file.write('D=D-A\n') self.file.write('@ARG\n') self.file.write('M=D\n') # LCL = SP self.file.write('@SP\n') self.file.write('D=M\n') self.file.write('@LCL\n') self.file.write('M=D\n') # goto f func_name = 'Sys.init' self.file.write('@%s\n' % func_name) self.file.write('0;JMP\n') # declare a label for the return-address self.file.write('(%s)\n' % sys_init_ret_add) def createOutput(self): if not self.isfile: self.writeBootstrap() else: pass # self.writeBootstrap() self.parser.i = -1 while self.parser.hasMoreCommands(): self.parser.advance() c_type = self.parser.commandType() if c_type in ['C_PUSH', 'C_POP']: self.writePushPop() elif c_type == 'C_ARITHMETIC': self.writeArithmetic() elif c_type == 'C_FUNCTION': self.writeFunction() elif c_type == 'C_LABEL': self.writeLabel() elif c_type == 'C_GOTO': self.writeGoto() elif c_type == 'C_IF': self.writeIf() elif c_type == 'C_RETURN': self.writeReturn() elif c_type == 'C_CALL': self.writeCall() # close file self.file.close()
class Scale(object): def __init__(self, dataDir, seedFile, outputFile, origDays, extendDays, origSpeed, extendSpeed, origSensor, extendSensor, payloadName, speedScaleNoise, timeScaleNoise, deviceScaeNoise, type): self.outputFile = outputFile self.dataDir = dataDir self.seedFile = seedFile self.origDays = origDays self.origSpeed = origSpeed self.origSensor = origSensor self.extendDays = extendDays self.extendSpeed =extendSpeed self.extendSensor = extendSensor self.type = type if isinstance(payloadName, list): self.payloadName = payloadName[0] self.payloadList = payloadName else: self.payloadName = payloadName self.payloadList = None self.speedScaleNoise = speedScaleNoise self.timeScaleNoise = timeScaleNoise self.deviceScaleNoise = deviceScaeNoise self.writer = open(self.outputFile, "w") with open(self.dataDir + 'sensor.json') as data_file: data = json.load(data_file) self.sensorMap = {} for sensor in data: self.sensorMap[sensor['name']] = sensor def getRandAroundPayload(self, payload, scaleNoise): if self.type == int: min = (int)(payload * (1 - scaleNoise)) max = (int)(payload * (1 + scaleNoise)) return random.randint(0, max - min) + min elif self.type ==float: min = payload * (1 - scaleNoise) max = payload * (1 + scaleNoise) return round(min + (max-min)*random.random(), 2) def getRandBetweenPayloads(self, payload1, payload2, scaleNoise): if self.type == int: if payload1 < payload2: min = (int) (payload1 * (1 - scaleNoise)) max = (int) (payload2 * (1 + scaleNoise)) return random.randint(0, max - min) + min else: min = (int)(payload2 * (1 - scaleNoise)) max = (int)(payload2 * (1 + scaleNoise)) return random.randint(0, max - min) + min elif self.type == float: if payload1 < payload2: min = payload1 * (1 - scaleNoise) max = payload2 * (1 + scaleNoise) else: min = payload2 * (1 - scaleNoise) max = payload2 * (1 + scaleNoise) return round(random.random()*(max - min) + min, 2) def writeObject(self, timestamp, payload, sensor): try: if self.payloadList is not None: object = { "id": str(uuid.uuid4()), "sensor": sensor, "timeStamp": timestamp.strftime('%Y-%m-%d %H:%M:%S'), "payload": { self.payloadList[0]: payload, self.payloadList[1]: random.randint(0, 100) } } else: object = { "id": str(uuid.uuid4()), "sensor": sensor, "timeStamp": timestamp.strftime('%Y-%m-%d %H:%M:%S'), "payload": { self.payloadName: payload } } self.writer.write(json.dumps(object) + '\n') except Exception as e: print (e) print ("IO error") def speedScale(self): self.seedData = Parser(self.seedFile) self.writer = None try: self.writer = open(SPEED_SCALED_FILE, "w") prevObservation = self.seedData.getNext() prevTimestamp = datetime.datetime.strptime(prevObservation['timeStamp'], DATE_FORMAT) while prevObservation: self.writeObject(datetime.datetime.strptime(prevObservation['timeStamp'], DATE_FORMAT), prevObservation['payload'][self.payloadName], prevObservation['sensor']) currentObservaton = self.seedData.getNext() if currentObservaton is None: prevObservation = currentObservaton continue currentTimeStamp = datetime.datetime.strptime(currentObservaton['timeStamp'], DATE_FORMAT) if prevTimestamp > currentTimeStamp: prevObservation = currentObservaton continue prevPayload = prevObservation['payload'][self.payloadName] timestamp = prevTimestamp for i in range(self.origSpeed/self.extendSpeed - 1): timestamp += datetime.timedelta(seconds=self.extendSpeed) payload = self.getRandBetweenPayloads(prevPayload, currentObservaton['payload'][self.payloadName], self.speedScaleNoise) self.writeObject(timestamp, payload, prevObservation['sensor']) prevPayload = payload prevObservation = currentObservaton except KeyError as e: print("Speed IO error", e) finally : try: self.writer.close() except Exception as e: print("IO error") def getCopyOfSensor(self, sensor, numCopy): return self.sensorMap['simSensor_{}_{}'.format(sensor['id'], numCopy)] def deviceScale(self): self.seedData = Parser(SPEED_SCALED_FILE) self.writer = None try: self.writer = open(SENSOR_SCALED_FILE, "w") currentObservation = self.seedData.getNext() while currentObservation: self.writeObject(datetime.datetime.strptime(currentObservation['timeStamp'], DATE_FORMAT), currentObservation['payload'][self.payloadName], currentObservation['sensor']) for i in range(self.extendSensor/self.origSensor - 1): payload = self.getRandAroundPayload(currentObservation['payload'][self.payloadName], self.deviceScaleNoise) self.writeObject(datetime.datetime.strptime(currentObservation['timeStamp'], DATE_FORMAT), payload, self.getCopyOfSensor(currentObservation['sensor'], i)) currentObservation = self.seedData.getNext() except KeyError as e: print("Device IO error", e) finally: try: self.writer.close() except Exception as e: print("IO error") def timeScale(self): self.seedData = Parser(SENSOR_SCALED_FILE) self.writer = None try: self.writer = open(self.outputFile, "w") currentObservation = self.seedData.getNext() while currentObservation: self.writeObject(datetime.datetime.strptime(currentObservation['timeStamp'], DATE_FORMAT), currentObservation['payload'][self.payloadName], currentObservation['sensor']) timestamp = datetime.datetime.strptime(currentObservation['timeStamp'], DATE_FORMAT) for i in range(1, self.extendDays/self.origDays): payload = self.getRandAroundPayload(currentObservation['payload'][self.payloadName], self.deviceScaleNoise) timestamp += datetime.timedelta(days=i*self.origDays) self.writeObject(timestamp, payload, currentObservation['sensor']) currentObservation = self.seedData.getNext() except KeyError as e: print("Time IO error", e) finally: try: self.writer.close() except Exception as e: print("IO error")
else: y = self.sentiment model = MLPClassifier(max_iter=100) param_grid = { 'hidden_layer_sizes': [(50, 50, 50), (50, 100, 50), (100, )], 'activation': ['tanh', 'relu'], 'solver': ['sgd', 'adam'], 'alpha': [0.0001, 0.05], 'learning_rate': ['constant', 'adaptive'], } clf = GridSearchCV(model, param_grid, n_jobs=-1, cv=3) clf.fit(self.X, y) # Best parameter set print('Best parameters found:\n', clf.best_params_) if __name__ == '__main__': corpus = Parser.parsing_vector_corpus_pandas( os.path.join(ROOT_DIR, 'corpus/iot-tweets-vector-v3.tsv')) model = ModelPrediction(corpus=corpus) model.tweak_hyperparameters("SVM") model.tweak_hyperparameters("MLP") # print(model.gender_model()) # print(model.sentiment_model()) # print(model.country_model()) # print(model.gender_model().predict(np.zeros(300).reshape(1, -1))) # print(model.sentiment_model().predict(np.zeros(300).reshape(1, -1))) # print(model.country_model().predict(np.zeros(300).reshape(1, -1)))
def load_corpus(self): if self.tweets is None: self.tweets = Parser.parsing_vector_corpus_pandas( os.path.join(ROOT_DIR, "corpus/iot-tweets-vector-v3.tsv"))
def main(): filename = os.path.join(os.getcwd(), Util.getCommandLineArg(1)) first_parser = Parser(filename) second_parser = Parser(filename) symbol_table = SymbolTable() hack_filename = filename.replace('asm', 'hack') hack_file = open(hack_filename, 'w') ann_filename = filename.replace('asm', 'ann') ann_file = open(ann_filename, 'w') rom_address = 0 ram_address = 16 assembly = '' while first_parser.has_more_commands(): first_parser.advance() if first_parser.command_type( ) is 'A_COMMAND' or first_parser.command_type() is 'C_COMMAND': rom_address += 1 elif first_parser.command_type() is 'L_COMMAND': symbol_table.add_entry(first_parser.symbol(), rom_address, 'LAB') while second_parser.has_more_commands(): second_parser.advance() machine_command = '' if second_parser.command_type() is 'A_COMMAND': if second_parser.symbol()[0].isdigit(): binary = second_parser.symbol() else: if symbol_table.contains(second_parser.symbol()): binary = symbol_table.get_address( second_parser.symbol()) else: binary = ram_address symbol_table.add_entry(second_parser.symbol(), ram_address, 'VAR') ram_address += 1 machine_command = '{0:016b}\n'.format(int(binary)) hack_file.write(machine_command) elif second_parser.command_type() is 'C_COMMAND': dest = Code.dest(second_parser.dest()) comp = Code.comp(second_parser.comp()) jump = Code.jump(second_parser.jump()) machine_command = '111{0}{1}{2}\n'.format(comp, dest, jump) hack_file.write(machine_command) assembly = second_parser.original_command().strip() mc = machine_command.strip() annotated_machine = '{} {} {} {}'.format(mc[0:4], mc[4:8], mc[8:12], mc[12:16]) symbolless_command = '' if second_parser.command_type() is 'L_COMMAND': symbolless_command = symbol_table.get_address( second_parser.symbol()) elif second_parser.command_type( ) is 'A_COMMAND' and not second_parser.symbol().isdigit(): symbolless_command = '@{}'.format( symbol_table.get_address(second_parser.symbol())) else: symbolless_command = second_parser.command annotated_command = '{:<39} {} {:<11} {}\n'.format( assembly, '//' if second_parser.command_type() else '', symbolless_command, annotated_machine) ann_file.write(annotated_command) ann_file.write('\n// Symbol Table:\n') for symbol, address in symbol_table.symbol_table.items(): ann_file.write('// {}: {:<30} -> {}\n'.format( address[1], symbol, address[0])) hack_file.close() ann_file.close()
def testChars(): print ("=== char based ===") # creation a = Char('a') aaa = OneOrMore(a) klass = Klass(r"a..z A..Z 0..9 +-*/ ]\t\\ !!kq B..Y 0") klassN = Klass("a..zA..Z") string = String(klassN, numMin=1, numMax=9) parser = Parser(vars()) # char matches print ("=== char matches") a.test('az') a.test('za') aaa.test('aaaz') # klass matches print ("=== klass matches") print "pattern %s" % klass for source in ("a", "q", "A", "B", "1", "0", "*", "\\", "]", "[", "&", ""): try: result = klass.match(source) except EndOfText: result = "<EndOfText>" except MatchFailure: result = "<MatchFailure>" print " %5s --> %s" % (repr(source), result) # string expression print ("=== numbered string format expression") k = Klass('z') print "False, False -->", String(k, False, False)._format() print "0, False -->", String(k, 0, False)._format() print "False, 0 -->", String(k, False, 0)._format() print "0, 0 -->", String(k, 0, 0)._format() print "1, False -->", String(k, 1, False)._format() print "1, 0 -->", String(k, 1, False)._format() print "3, False -->", String(k, 3, False)._format() print "3, 0 -->", String(k, 3, False)._format() print "3, 3 -->", String(k, 3, 3)._format() print "3, 7 -->", String(k, 3, 7)._format() print "0, 7 -->", String(k, 0, 7)._format() # string matches print ("=== string matches") String(klassN, 0, 0).test("abc#") String(klassN, 0, 0).test("#abc#") String(klassN, 1, 0).test("abc#") String(klassN, 1, 0).test("#abc#") String(klassN, 5, 0).test("abcefghij#") String(klassN, 5, 0).test("abc#") String(klassN, 5, 0).test("abc") for source in ( "Yabc#", "abc", "abcqkabc", "a\\\]\t789", "1+2*3", "abc#", "#abc", "abcdefghijklmno", "a", "" ): string.test(source)