def ap_substitute(self, ap_subs): # If we try something like [x/x]P, just don't do anything ap_subs = {k: v for k, v in ap_subs.items() if k != v} if not ap_subs: return self bdd_subs = {self.aut.register_ap(k): v for k, v in ap_subs.items()} settings.log(3, lambda: 'ap_subs: {}'.format(ap_subs)) if settings.get_simplication_level() > 0: self.postprocess() new_var_map = VarMap() to_register = [] for v, aps in self.var_map.items(): new_var_map[v] = [] for ap in aps: # Get the new name of this ap, or just the ap if the name didn't get new_ap = ap_subs.get(ap, ap) new_var_map[v].append(new_ap) to_register.append(new_ap) settings.log( 3, lambda: 'ap_subs: {}, {}, {}'.format(ap_subs, self.var_map, new_var_map)) new_aut = buchi_transform(self.aut, Substitution(bdd_subs)) for new_ap in to_register: new_aut.register_ap(new_ap) return BuchiAutomaton(new_aut, new_var_map) #.postprocess()
def postprocess(self): if not self.aut.is_sba(): # Use 'BA' in the option list to ensure that the automata we have is a Buchi (possible nondeterministic) automata if settings.use_heuristics(): if self.aut.num_states() > 300: postprocess_settings = ['BA', 'Deterministic', 'Low'] elif self.aut.num_states() > 100: postprocess_settings = ['BA', 'Deterministic', 'Medium'] else: postprocess_settings = ['BA', 'Deterministic', 'High'] else: postprocess_settings = ['BA'] settings.log( 3, lambda: 'Postprocessing (before) using {}: {} states and {} edges'. format(postprocess_settings, self.num_states(), self.num_edges( ))) self.aut = self.aut.postprocess(*postprocess_settings) settings.log( 3, lambda: 'Postprocessing (after): {} states and {} edges'. format(self.num_states(), self.num_edges())) return self
def show_aut_stats(self, prog, aut, desc=None): sn, en = aut.num_states(), aut.num_edges() if desc is None: settings.log(1, lambda: self.indented(prog, 'Automaton has {} states and {} edges'.format(sn, en))) else: settings.log(1, lambda: self.indented(prog, 'Automaton has {} states and {} edges {}'.format(sn, en, desc)))
def evaluate(self, prog): # TODO: Support argument restrictions on loaded automata start_time = time.time() realpath = prog.locate_file(self.filename) settings.log( lambda: f'[INFO] Loading {self.pred} from {realpath} in "{self.aut_format}" format.' ) if self.aut_format == 'hoa': # TODO: Rename the APs of the loaded automaton to be the same as the args specified aut = load_hoa(realpath) elif self.aut_format == 'walnut': aut = convert_aut(realpath, [v.var_name for v in self.pred.args]) elif self.aut_format == 'pecan': aut = convert_labeled_aut(realpath, [v.var_name for v in self.pred.args]) elif self.aut_format == 'fsa-dict': aut = load_finite(realpath, [v.var_name for v in self.pred.args]) else: raise Exception('Unknown format: {}'.format(self.aut_format)) end_time = time.time() settings.log( 0, lambda: '[INFO] Loaded {} in {:.2f} seconds ({} states, {} edges).' .format(self.pred, end_time - start_time, aut.num_states(), aut.num_edges())) prog.preds[self.pred.name] = NamedPred(self.pred.name, self.pred.args, {}, AutLiteral(aut)) return None
def run_definition(self, i, d): from pecan.lang.typed_ir_lowering import TypedIRLowering from pecan.lang.optimizer.optimizer import UntypedOptimizer, Optimizer if isinstance(d, NamedPred): settings.log( 1, lambda: '[DEBUG] Type inference and IR lowering for: {}'. format(d.name)) transformed_def = TypedIRLowering(self).transform( self.type_infer(d)) if settings.opt_enabled(): settings.log( 1, lambda: '[DEBUG] Performing typed optimization on: {}'. format(d.name)) transformed_def = Optimizer(self).optimize(transformed_def) transformed_def = TypedIRLowering(self).transform(transformed_def) settings.log(1, lambda: 'Lowered IR:') settings.log(1, lambda: transformed_def) self.defs[i] = transformed_def self.preds[d.name] = self.defs[i] self.preds[d.name].evaluate(self) settings.log(0, lambda: self.preds[d.name]) else: return d.evaluate(self)
def run_repl(env): utility.touch(settings.get_history_file()) readline.read_history_file(settings.get_history_file()) while True: try: prog_str = input('> ').strip() if prog_str.lower() == 'exit': break if prog_str.startswith(':set'): parts = prog_str.split(' ') if len(parts) > 1: if parts[1] == 'debug': settings.set_debug_level(1 if settings.get_debug_level() <= 0 else 0) else: prog = program.from_source(prog_str) settings.log(0, lambda: str(prog)) prog.include_with_restrictions(env) env = prog.evaluate() except KeyboardInterrupt: print('') # newline to go "below" the prompt print("Use 'exit' to exit Pecan.") except EOFError: print('exit') break except UnexpectedToken as e: print(e) except Exception as e: print('An exception occurred:', e) readline.write_history_file(settings.get_history_file()) return env
def from_source(source_code, *args, **kwargs): prog = pecan_parser.parse(source_code) settings.log(4, lambda: 'Parsed program:') settings.log(4, lambda: prog) prog.search_paths = make_search_paths( filename=kwargs.get('filename', None)) prog.loader = load if settings.get_extract_implications(): prog.extract_implications() prog = ASTToIR().transform(prog) settings.log(0, lambda: 'Search path: {}'.format(prog.search_paths)) # Load the standard library prog = settings.include_stdlib(prog, load, args, kwargs) if settings.opt_enabled(): prog = UntypedOptimizer(prog).optimize() settings.log(1, lambda: '(Untyped) Optimized program:') settings.log(1, lambda: prog) return prog
def evaluate(self, prog): settings.log( lambda: f'[INFO] Saving {self.pred_name} as {self.filename}') prog.add_generated_file(self.filename) prog.call(self.pred_name).save(self.filename) return None
def evaluate(self, prog): options = dict(as_python(prog.praline_lookup('options').evaluate(prog))) num_systems = dict(as_python(prog.praline_lookup('numSystems').evaluate(prog))) term = as_python(prog.praline_lookup('aut').evaluate(prog), PralinePecanLiteral) settings.log(lambda: '[INFO] Plotting {} using numeration systems {} with options: {}'.format(term, num_systems, options)) aut = term.evaluate(prog) plotter = BuchiPlotter(prog, num_systems, aut, **options) plotter.plot() return PralineBool(True)
def evaluate(self, prog): # TODO: Support formats other than SVG? settings.log( lambda: f'[INFO] Saving {self.pred_name} as an SVG in {self.filename}') evaluated = prog.call(self.pred_name) with open(self.filename, 'w') as f: f.write( evaluated.show().data) # Write the raw svg data into the file return None
def optimize(self, node, pred): self.changed = False self.pred = pred settings.log(3, lambda: 'Before pre-optimize {}: {}'.format(type(self).__name__, node)) res = self.pre_optimize(node) if res is not None: node = res if self.changed: settings.log(3, lambda: 'Before optimize {}: {}'.format(type(self).__name__, node)) new_node = self.transform(node) if self.changed: settings.log(3, lambda: 'Before post-optimize {}: {}'.format(type(self).__name__, new_node)) res = self.post_optimize(new_node) if res is not None: new_node = res if self.changed: settings.log(3, lambda: 'After post-optimize {}: {}'.format(type(self).__name__, new_node)) return self.changed, new_node
def simplify_states(self): self.get_aut().purge_dead_states() settings.log( 3, lambda: 'after purge_dead_states: {}'.format(self.num_states())) self.get_aut().purge_unreachable_states() settings.log( 3, lambda: 'after purge_unreachable_states: {}'.format( self.num_states())) self.aut = self.get_aut().scc_filter() settings.log(3, lambda: 'after scc_filter: {}'.format(self.num_states())) if self.num_states() < 10 & self.get_aut().is_deterministic(): self.aut = spot.sat_minimize(self.get_aut()) settings.log( 3, lambda: 'after sat_minimize: {}'.format(self.num_states())) if settings.use_heuristics(): self.merge_states() else: if self.num_states() < 50000: self.merge_states() return self
def run_optimizations(self, node, pred): settings.log(2, lambda: f'Optimizing: {node}') optimization_pass = [ArithmeticOptimizer(self), CSEOptimizer(self), BooleanOptimizer(self), RedundantVariableOptimizer(self), UnusedVariableOptimizer(self)] new_node = node ast_changed = True # Default to true so we run at least once while ast_changed: ast_changed = False for optimization in optimization_pass: changed, new_node = optimization.optimize(new_node, pred) ast_changed |= changed settings.log(2, lambda: f'Optimized node: {new_node}') return new_node
def postprocess(self): if not self.aut.is_sba(): settings.log( 3, lambda: 'Postprocessing (before): {} states and {} edges'. format(self.num_states(), self.num_edges())) # Ensure that the automata we have is a Buchi (possible nondeterministic) automata self.aut = self.aut.postprocess('BA') # if self.aut.num_states() > 300: # self.aut = self.aut.postprocess('BA', 'Deterministic', 'Low') # elif self.aut.num_states() > 100: # self.aut = self.aut.postprocess('BA', 'Deterministic', 'Medium') # else: # self.aut = self.aut.postprocess('BA', 'Deterministic', 'High') settings.log( 3, lambda: 'Postprocessing (after): {} states and {} edges'. format(self.num_states(), self.num_edges())) return self
def evaluate(self, prog): settings.log( lambda: f'[INFO] Checking if {self.pred_name} is {self.display_truth_val()}.' ) pred_truth_value = self.pred_truth_value(prog) if pred_truth_value == self.truth_val: result = Result(f'{self.pred_name} is {self.display_truth_val()}.', True) else: result = Result( f'{self.pred_name} is not {self.display_truth_val()}.', False) settings.log(lambda: result.result_str()) return result
def ap_project(self, aps): if not aps: return self settings.log(3, lambda: 'ap_project: {}'.format(aps)) # Do a quick check here to simplify if we're empty; the emptiness checking algorithm is very fast (can be done in linear time) # Compared to the cost of postprocessing (depends on the underlying automaton, but generally atrocious) # this is very cheap, and gives us an easy simplification if it works. if self.aut.is_empty(): return self.make_empty_aut() remover = spot.remove_ap() for ap in aps: remover.add_ap(ap) res_aut = remover.strip(self.get_aut()) return BuchiAutomaton(res_aut, self.get_var_map())
def evaluate(self, prog): # Here we keep track of all restrictions that were in scope when we are evaluated; # this essentially builds a closure. Otherwise, if we forget a variable after the declaration of this predicate, # then we will lose the restriction when we are called. This would cause our behavior to depend on lexically # where this predicate is used in the program, which would be confusing. prog.enter_scope() try: for _, arg_restriction in self.arg_restrictions.items(): arg_restriction.evaluate(prog) self.restriction_env = prog.get_restriction_env() from pecan.lang.optimizer.tools import FreeVars free_vars = FreeVars().analyze(self.body) diff = free_vars - set(arg.var_name for arg in self.args) if len(diff) > 0: settings.log(lambda: "[WARN] Free variables found in {}: {}".format(self.name, diff)) finally: prog.exit_scope()
def plot(self): dim = len(self.dimensions) if dim == 1: assert (self.layer is not None or (self.layer_from is not None and self.layer_to is not None)), \ "one of layer or (layer_from, layer_to) must be specified" else: assert self.layer is not None, "layer must be specified for higher-dimensional plots" # layers is a list of tuple [(layer_num, bitmap), (layer_num, bitmap), ...] # that records the cell bitmap at each layer layers = [] if dim == 1 and (self.layer_from is not None and self.layer_to is not None): for layer in range(self.layer_from, self.layer_to + 1): layers.append( (layer, self.get_hit_cell_bitmap(self.buchi_aut, layer))) else: layers = [(self.layer, self.get_hit_cell_bitmap(self.buchi_aut, self.layer))] # plot all layers in the specified method for layer, cell_bitmap in layers: self.plot_method.plot_layer( [self.alphabet_sizes[dim] for dim in self.dimensions], layer, cell_bitmap, self.dimensions, color_by_axis=self.color_by_axis) if self.save_to: settings.log( lambda: '[INFO] Saving plot to {}'.format(self.save_to)) self.plot_method.save(self.save_to) self.prog.add_generated_file(self.save_to) if self.show: self.plot_method.show() self.plot_method.cleanup()
def postprocess(self, level=None): settings.log(3, lambda: 'Empty: {}'.format(self.is_empty())) # settings.log(3, lambda: 'Universal: {}'.format(spot.is_universal(self.get_aut()))) postprocess_settings = ['BA'] if level is not None: postprocess_settings.append(level) if not self.aut.is_sba(): # Use 'BA' in the option list to ensure that the automata we have is a Buchi (possible nondeterministic) automata if settings.use_heuristics(): postprocess_settings.append('Deterministic') if level is None: if self.aut.num_states() > 300: postprocess_settings.append('Low') elif self.aut.num_states() > 100: postprocess_settings.append('Medium') else: postprocess_settings.append('High') settings.log( 1, lambda: 'Postprocessing (before) using {}: {} states and {} edges'. format(postprocess_settings, self.num_states(), self.num_edges( ))) self.aut = self.aut.postprocess(*postprocess_settings) settings.log( 1, lambda: 'Postprocessing (after): {} states and {} edges'. format(self.num_states(), self.num_edges())) return self
def evaluate(self, old_env=None): from pecan.lib.praline.builtins import builtins for builtin in builtins: builtin.evaluate(self) if old_env is not None: self.include(old_env) succeeded = True msgs = [] self.idx = 0 # Don't use a for, because Praline code can insert new definitions dynamically while self.idx < len(self.defs): self.enter_var_map_scope() self.emit_offset = 0 d = self.defs[self.idx] settings.log(0, lambda: '[DEBUG] Processing: {}'.format(d)) result = self.run_definition(self.idx, d) if result is not None and type(result) is Result: if result.failed(): succeeded = False msgs.append(result.message()) self.idx += 1 + self.emit_offset self.exit_var_map_scope() # Clear all restrictions. All relevant restrictions will be held inside the restriction_env of the relevant predicates. # Having them also in our restrictions list just leads to double restricting, which is a waste of computation time self.restrictions.clear() self.idx = None self.result = Result('\n'.join(msgs), succeeded) return self
def relabel(self): level_before = settings.get_simplication_level() settings.set_simplification_level(0) ap_set = set(map(str, self.aut.ap())) new_aps = {} for ap in self.aut.ap(): # Make sure that we don't try to relabel with an AP that's already in the automaton. # This can happen when we load an automaton from a file. new_ap = self.fresh_ap() while new_ap in ap_set: new_ap = self.fresh_ap() new_aps[ap.ap_name()] = new_ap settings.log(3, lambda: 'Relabeling: {}'.format(new_aps)) res = self.ap_substitute(new_aps) settings.set_simplification_level(level_before) return res
def evaluate(self, prog): prog.eval_level += 1 if settings.get_debug_level() > 0: start_time = time.time() # settings.log(0, lambda: self.indented(prog, 'Evaluating {}'.format(self)) result = self.evaluate_node(prog) if type(result) is tuple: result = (self.simplify(prog, result[0]), result[1]) else: result = self.simplify(prog, result) prog.eval_level -= 1 if settings.get_debug_level() > 0: if type(result) is tuple: sn, en = result[0].num_states(), result[0].num_edges() else: sn, en = result.num_states(), result.num_edges() end_time = time.time() settings.log(0, lambda: self.indented(prog, '{} has {} states and {} edges ({:.2f} seconds)'.format(self.get_display_node(prog), sn, en, end_time - start_time))) return result
def plot_layer(self, alphabet_sizes, layer, cell_bitmap, labels, color_by_axis=None, **kwargs): assert len(alphabet_sizes) == 3 k1, k2, k3 = alphabet_sizes voxels = np.zeros((k1**layer, k2**layer, k3**layer), dtype=np.uint8) settings.log(lambda: "Preparing voxel map...") for x in range(k1**layer): for y in range(k2**layer): for z in range(k3**layer): if cell_bitmap[x, y, z]: voxels[x, y, z] = 1 if color_by_axis is not None: settings.log(lambda: "Preparing color map...") colors = np.empty(np.shape(voxels), dtype=object) cmap = self.pt.get_cmap("jet") axis_index = labels.index(color_by_axis) for x in range(k1**layer): for y in range(k2**layer): for z in range(k3**layer): if axis_index == 0: colors[x, y, z] = cmap(x / k1**layer) elif axis_index == 1: colors[x, y, z] = cmap(y / k2**layer) elif axis_index == 2: colors[x, y, z] = cmap(z / k3**layer) settings.log(lambda: "Drawing voxels...") fig = self.pt.figure() ax = fig.gca(projection="3d") if color_by_axis is not None: ax.voxels(voxels, facecolors=colors) else: ax.voxels(voxels) assert len(labels) == 3 ax.set_xlabel(labels[0]) ax.set_ylabel(labels[1]) ax.set_zlabel(labels[2]) ax.xaxis.set_ticklabels([]) ax.yaxis.set_ticklabels([]) ax.zaxis.set_ticklabels([])
def merge_states(self): if settings.get_simplification_level() > 1: ran = False while self.get_aut().merge_states() > 0: ran = True settings.log(3, lambda: 'after merge_states: {}'.format(self.num_states())) # If we didn't merge any states, we still want to show the message once if not ran: settings.log(3, lambda: 'after merge_states: {}'.format(self.num_states())) else: self.get_aut().merge_states() settings.log(3, lambda: 'after merge_states: {}'.format(self.num_states())) return self
def evaluate(self, prog): term = prog.praline_lookup('pecanTerm').evaluate(prog).get_term() settings.log(0, lambda: '[DEBUG] Emitted: "{}"'.format(term)) prog.emit_definition(term) return PralineBool(True)
def merge_states(self): self.get_aut().merge_states() settings.log( 3, lambda: 'after merge_states: {}'.format(self.num_states())) return self