def get_loop_var_analysis_at (p, n): for hook in target_objects.hooks ('loop_var_analysis'): res = hook (p, n) if res != None: return res var_deps = p.compute_var_dependencies () return p.get_loop_var_analysis (var_deps, n)
def preserves_sp(fname): """all functions will keep the stack pointer equal, whether they have pairing partners or not.""" assume_sp_equal = bool(target_objects.hooks('assume_sp_equal')) if not extra_symbols: for fname2 in target_objects.symbols: extra_symbols.add(fname2) extra_symbols.add('_'.join(fname2.split('.'))) return (get_asm_calling_convention(fname) or assume_sp_equal or fname in extra_symbols)
def fast_const_ret (self, n, nm, typ): """determine if we can heuristically consider this return value to be the same as an input. this is known for some function returns, e.g. memory. this is important for heuristic "fast" analysis.""" if not self.fast: return False node = self.p.nodes[n] assert node.kind == 'Call' for hook in target_objects.hooks ('rep_unsafe_const_ret'): if hook (node, nm, typ): return True return False
def get_loop_var_analysis_at (p, n): k = ('search_loop_var_analysis', n) if k in p.cached_analysis: return p.cached_analysis[k] for hook in target_objects.hooks ('loop_var_analysis'): res = hook (p, n) if res != None: p.cached_analysis[k] = res return res var_deps = p.compute_var_dependencies () res = p.get_loop_var_analysis (var_deps, n) p.cached_analysis[k] = res return res
def build_compound_problem_with_links(call_stack, f): funs = [get_body_addrs_fun(addr) for addr in call_stack] + [f] (p, hyps, addr_map, tag_pairs) = build_compound_problem(funs) call_tags = zip(tag_pairs[:-1], tag_pairs[1:]) call_hyps = [ get_call_link_hyps(p, addr_map[n], from_tp, to_tp) for (n, (from_tp, to_tp)) in zip(call_stack, call_tags) ] wcet_hyps = [] from rep_graph import eq_hyp for (entry, tag, _, inputs) in p.entries: entry_vis = ((entry, ()), tag) for f in target_objects.hooks("extra_wcet_assertions"): for assn in f(inputs): wcet_hyps.append(eq_hyp((assn, entry_vis), (syntax.true_term, entry_vis))) return (p, hyps + [h for hs in call_hyps for h in hs] + wcet_hyps, addr_map)
def candidate_additional_eqs(p, split): eq_vals = set() def visitor(expr): if expr.is_op('Equals') and expr.vals[0].typ.kind == 'Word': [x, y] = expr.vals eq_vals.update([(x, y), (y, x)]) for n in p.loop_body(split): p.nodes[n].visit(lambda x: (), visitor) for (x, y) in list(eq_vals): if is_zero(x) and y.is_op('Plus'): [x, y] = y.vals eq_vals.add((x, syntax.mk_uminus(y))) eq_vals.add((y, syntax.mk_uminus(x))) elif is_zero(x) and y.is_op('Minus'): [x, y] = y.vals eq_vals.add((x, y)) eq_vals.add((y, x)) loop = syntax.mk_var('%i', syntax.word32T) minus_loop_step = syntax.mk_uminus(loop) vas = search.get_loop_var_analysis_at(p, split) ls_vas = dict([(var, [data]) for (var, data) in vas if data[0] == 'LoopLinearSeries']) cmp_series = [(x, y, rew, offs) for (x, y) in eq_vals for (_, rew, offs) in ls_vas.get(x, [])] odd_eqs = [] for (x, y, rew, offs) in cmp_series: x_init_cmp1 = syntax.mk_less_eq(x, rew(x, minus_loop_step)) x_init_cmp2 = syntax.mk_less_eq(rew(x, minus_loop_step), x) fin_cmp1 = syntax.mk_less(x, y) fin_cmp2 = syntax.mk_less(y, x) odd_eqs.append(syntax.mk_eq(x_init_cmp1, fin_cmp1)) odd_eqs.append(syntax.mk_eq(x_init_cmp2, fin_cmp1)) odd_eqs.append(syntax.mk_eq(x_init_cmp1, fin_cmp2)) odd_eqs.append(syntax.mk_eq(x_init_cmp2, fin_cmp2)) ass_eqs = [] var_deps = p.compute_var_dependencies() for hook in target_objects.hooks('extra_wcet_assertions'): for assn in hook(var_deps[split]): ass_eqs.append(assn) return odd_eqs + ass_eqs
def build_compound_problem_with_links(call_stack, f): funs = [get_body_addrs_fun(addr) for addr in call_stack] + [f] (p, hyps, addr_map, tag_pairs) = build_compound_problem(funs) call_tags = zip(tag_pairs[:-1], tag_pairs[1:]) call_hyps = [ get_call_link_hyps(p, addr_map[n], from_tp, to_tp) for (n, (from_tp, to_tp)) in zip(call_stack, call_tags) ] wcet_hyps = [] from rep_graph import eq_hyp for (entry, tag, _, inputs) in p.entries: entry_vis = ((entry, ()), tag) for f in target_objects.hooks('extra_wcet_assertions'): for assn in f(inputs): wcet_hyps.append( eq_hyp((assn, entry_vis), (syntax.true_term, entry_vis))) return (p, hyps + [h for hs in call_hyps for h in hs] + wcet_hyps, addr_map)
def candidate_additional_eqs(p, split): eq_vals = set() def visitor(expr): if expr.is_op("Equals") and expr.vals[0].typ.kind == "Word": [x, y] = expr.vals eq_vals.update([(x, y), (y, x)]) for n in p.loop_body(split): p.nodes[n].visit(lambda x: (), visitor) for (x, y) in list(eq_vals): if is_zero(x) and y.is_op("Plus"): [x, y] = y.vals eq_vals.add((x, syntax.mk_uminus(y))) eq_vals.add((y, syntax.mk_uminus(x))) elif is_zero(x) and y.is_op("Minus"): [x, y] = y.vals eq_vals.add((x, y)) eq_vals.add((y, x)) loop = syntax.mk_var("%i", syntax.word32T) minus_loop_step = syntax.mk_uminus(loop) vas = search.get_loop_var_analysis_at(p, split) ls_vas = dict([(var, [data]) for (var, data) in vas if data[0] == "LoopLinearSeries"]) cmp_series = [(x, y, rew, offs) for (x, y) in eq_vals for (_, rew, offs) in ls_vas.get(x, [])] odd_eqs = [] for (x, y, rew, offs) in cmp_series: x_init_cmp1 = syntax.mk_less_eq(x, rew(x, minus_loop_step)) x_init_cmp2 = syntax.mk_less_eq(rew(x, minus_loop_step), x) fin_cmp1 = syntax.mk_less(x, y) fin_cmp2 = syntax.mk_less(y, x) odd_eqs.append(syntax.mk_eq(x_init_cmp1, fin_cmp1)) odd_eqs.append(syntax.mk_eq(x_init_cmp2, fin_cmp1)) odd_eqs.append(syntax.mk_eq(x_init_cmp1, fin_cmp2)) odd_eqs.append(syntax.mk_eq(x_init_cmp2, fin_cmp2)) ass_eqs = [] var_deps = p.compute_var_dependencies() for hook in target_objects.hooks("extra_wcet_assertions"): for assn in hook(var_deps[split]): ass_eqs.append(assn) return odd_eqs + ass_eqs
def get_extra_sp_defs (rep, tag): """all functions will keep the stack pointer equal, whether they have pairing partners or not. add these extra defs/equalities for the purposes of stack depth analysis.""" # FIXME how to parametrise this? sp = mk_var ('r13', syntax.word32T) defs = {} assume_sp_equal = bool (target_objects.hooks ('assume_sp_equal')) items = [(n_vc, x) for (n_vc, x) in rep.funcs.iteritems () if logic.is_int (n_vc[0]) if get_asm_calling_convention (rep.p.nodes[n_vc[0]].fname) or assume_sp_equal or rep.p.nodes[n_vc[0]].fname in target_objects.symbols] for ((n, vc), (inputs, outputs, _)) in items: if rep.p.node_tags[n][0] == tag: inp_sp = solver.smt_expr (sp, inputs, rep.solv) inp_sp = solver.parse_s_expression (inp_sp) out_sp = solver.smt_expr (sp, outputs, rep.solv) out_sp = solver.parse_s_expression (out_sp) if inp_sp != out_sp: defs[out_sp] = inp_sp return defs
self.arc_pc_envs[(n, vcount)] = arcs return arcs.get (n2[0]) def add_local_def (self, n, vname, name, val, env): if self.local_defs_unsat: smt_name = self.solv.add_var (name, val.typ) eq = mk_eq (mk_smt_expr (smt_name, val.typ), val) self.solv.assert_fact (eq, env, unsat_tag = ('Def', n, vname)) else: smt_name = self.solv.add_def (name, val, env) return smt_name def var_rep_request (self, (nm, typ), kind, n_vc, env): assert type (n_vc[0]) != str for hook in target_objects.hooks ('problem_var_rep'): z = hook (self.p, (nm, typ), kind, n_vc[0]) if z == None: continue if z[0] == 'SplitMem': assert typ == builtinTs['Mem'] (_, addr) = z addr = smt_expr (addr, env, self.solv) name = '%s_for_%s' % (nm, self.node_count_name (n_vc)) return self.solv.add_split_mem_var (addr, name, typ, mem_name = 'SplitMemNonsense') else: assert z == None def emit_node (self, n):
def should_avoid_fun(fname): for hook in target_objects.hooks("wcet_functions_to_avoid"): if fname in hook: return True return False
def function_limit(fname): for hook in target_objects.hooks("wcet_function_limits"): if fname in hook: return hook[fname] return None
def should_avoid_fun(fname): for hook in target_objects.hooks('wcet_functions_to_avoid'): if fname in hook: return True return False
def function_limit(fname): for hook in target_objects.hooks('wcet_function_limits'): if fname in hook: return hook[fname] return None