def read(path: str): """ `path` to image file Errors ------ IOError is raised when no image can be found under `path`. """ image = cv2.imread(path) # image is None if no image is found/ opened if image is None: raise IOError("Could not open image file under: {}".format(path)) image = preprocess(image) text = ocr(image, language="pol") print(text) receipt = parse(text) return receipt
def read(path: str): """ `path` to image file """ image = cv2.imread(path) # cv2.imshow("original", image) # cv2.waitKey(0) image = preprocess(image) # cv2.imshow("processed", image) # cv2.waitKey(0) text = ocr(image) # print(text) receipt = parse(text) return receipt
while True: changes_found = False generator = RandomChanges() for new_assignments in generator.iterate_chained(self.current_assignments, depth=depth): new_cost = self.cost(new_assignments) self.save_if_best_solution(new_assignments, new_cost) if self.current_cost < new_cost: self.current_assignments = new_assignments self.current_cost = new_cost changes_found = True break row = "{0};\t\t\t{1};\t{2}\n".format(iter_num, time.time() - start, self.current_cost) file.write(row) print row iter_num += 1 if not changes_found: break return self.current_assignments, self.current_cost, self.best_assignments, self.best_cost configuration, assignments = parse() engine = GreedyLocalSearch(configuration, list(assignments)) assignments, current_cost, best_assignments, best_cost = engine.run()
def generateStateMachines(logger, num, analysisType, mode): logger.info("Reading application file...") with open("application.s", 'r') as file: stream = parser.parse(file.readlines()) with open("application_parsed.s", 'w') as file: file.write(str(stream)) logger.info("Read " + str(stream.instructionCount()) + " instructions, " + str(stream.labelCount()) + " labels, " + str(stream.directiveCount()) + " directives.") # Get basic blocks from stream. blocks = basicblocks.extractBasicBlocks(logger, stream, mode) # Filter any blocks we don't want to convert. blocks = list(filter(lambda b: b.cost() != math.inf, blocks)) if analysisType == "hybrid": logger.info("Selecting blocks based on hybrid cost function.") blocksSorted = sorted(blocks, key=lambda b: b.cost()) if len(blocksSorted) <= num: logger.debug( "Number specified is lower than or equal to number of blocks. Selecting all." ) selected = blocksSorted else: selected = blocksSorted[:num] stateMachines = [] for b in selected: sm = statemachine.getStateMachine(b) stateMachines.append(sm) # Emit info about selected cores in cost order. for sm in stateMachines: logger.info("Selected: " + sm.name() + " (cost: " + str(round(sm.block().cost(), 4)) + ", states: " + str(len(sm)) + ", inputs: " + str(len(sm.inputRegisters())) + ", outputs: " + str(len(sm.outputRegisters())) + ")") elif analysisType == "avgwidth": logger.info( "Selecting blocks based on potential parallelism (computation width)." ) blocksSorted = sorted(blocks, key=lambda b: b.averageComputationWidth()) if len(blocksSorted) <= num: logger.debug( "Number specified is lower than or equal to number of blocks. Selecting all." ) selected = blocksSorted else: selected = blocksSorted[:num] stateMachines = [] for b in selected: sm = statemachine.getStateMachine(b) stateMachines.append(sm) # Emit info about selected cores in cost order. for sm in stateMachines: logger.info("Selected: " + sm.name() + " (average width: " + str(round(sm.block().averageComputationWidth(), 4)) + ", states: " + str(len(sm)) + ", inputs: " + str(len(sm.inputRegisters())) + ", outputs: " + str(len(sm.outputRegisters())) + ")") elif analysisType == "memdensity": logger.info("Selecting blocks based on memory access density.") blocksSorted = sorted(blocks, key=lambda b: b.memoryAccessDensity()) if len(blocksSorted) <= num: logger.debug( "Number specified is lower than or equal to number of blocks. Selecting all." ) selected = blocksSorted else: selected = blocksSorted[:num] stateMachines = [] for b in selected: sm = statemachine.getStateMachine(b) stateMachines.append(sm) # Emit info about selected cores in cost order. for sm in stateMachines: logger.info("Selected: " + sm.name() + " (memory access density: " + str(round(sm.block().memoryAccessDensity(), 4)) + ", states: " + str(len(sm)) + ", inputs: " + str(len(sm.inputRegisters())) + ", outputs: " + str(len(sm.outputRegisters())) + ")") elif analysisType == "overhead": logger.info("Selecting blocks based on I/O overhead.") blocksSorted = sorted(blocks, key=lambda b: b.ioOverhead()) if len(blocksSorted) <= num: logger.debug( "Number specified is lower than or equal to number of blocks. Selecting all." ) selected = blocksSorted else: selected = blocksSorted[:num] stateMachines = [] for b in selected: sm = statemachine.getStateMachine(b) stateMachines.append(sm) # Emit info about selected cores in cost order. for sm in stateMachines: logger.info("Selected: " + sm.name() + " (I/O overhead: " + str(round(sm.block().ioOverhead(), 4)) + ", states: " + str(len(sm)) + ", inputs: " + str(len(sm.inputRegisters())) + ", outputs: " + str(len(sm.outputRegisters())) + ")") # Get the sum of the lengths of all basic blocks. sumAll = 0 for b in blocks: sumAll += len(b) # Get the sum of the lengths of selected basic blocks. sumSelected = 0 for b in selected: sumSelected += len(b) # Sort in textual order. stateMachines = sorted(stateMachines, key=lambda sm: sm.block().startLine()) id = 0 change = 0 for sm in stateMachines: sm.setId(id) with open(sm.name() + "_temp.vhd", 'w') as file: logger.debug("Writing definition for " + sm.name() + " to file " + sm.name() + "_temp.vhd.") file.write(translator.translateStateMachine(sm)) change += stream.replaceLines(sm.block().lines()[0] + change, sm.block().lines()[-1] + change, sm.replacementInstructions()) id += 1 with open("application_new.s", 'w') as file: file.write(str(stream)) return (blocks, stateMachines, (sumSelected / sumAll))
# Additionally, make sure that the trees are really lossless representations of source strings. import re from parsing import TagTree, parser from pithy.io import err_progress from pithy.loader import load tag_start_re = re.compile(r'[[({]|<([^/>]*)>') tag_end_re = re.compile(r'[])}]|</([^>]*)>') for record in err_progress(load('wb/scan.jsons')): for para in record: tree = parser.parse(para) assert isinstance(tree, TagTree) def checkF(cond:bool, msg:str) -> None: if not cond: exit(f'{msg}\n{para}\n\n{tree.structured_desc()}') # type: ignore checkF(not tree.is_flawed, 'flawed paragraph:') # the remaining checks are meant to validate the implementation of tag_tree. s = str(tree) checkF(s == para, f'bad tree str:\n{s}') for leaf in tree.walk_contents(): checkF(not tag_start_re.search(leaf), f'leaf token looks like tag start: {leaf}')
from tokenizer import tokenizer from parsing.parser import parse from scoring.scorer import score tokens = tokenizer.tokenizer() AST = parse(tokens) score(tokens, AST)
def test_evaluation(self): configuration, assignments = parse() value, completness = evaluate_goal(configuration, assignments) self.assertLess(value, .7719) self.assertAlmostEquals(value, .7711, delta=0.0001) self.assertAlmostEquals(completness, 100)
def test_evaluation(self): configuration, assignments = parse() value, completness = evaluate_goal(configuration, assignments) self.assertLess(value, 0) self.assertLess(completness, 100)