def solve(self): #mpp gen ts = [t for t in self.terms if t != 1] if self.subset_siz is None: subset_siz = len(ts) else: subset_siz = self.subset_siz def wprocess(tasks,Q): rs = [] for ts_subset in tasks: rs.extend(IeqMPPGen.build_vts_poly(list(ts_subset),self.tcs,self.mpp_opt)) if Q is None: #no multiprocessing return rs else: Q.put(rs) from vu_common import get_workloads tasks = list(itertools.combinations(ts,subset_siz)) Q = mp.Queue() workloads = get_workloads(tasks,max_nprocesses=mp.cpu_count(), chunksiz=2) logger.debug("workloads 'build_vts_poly' {}: {}" .format(len(workloads),map(len,workloads))) workers = [mp.Process(target=wprocess,args=(wl,Q)) for wl in workloads] for w in workers: w.start() wrs = [] for _ in workers: wrs.extend(Q.get()) self.sols = map(InvMPP, wrs)
def runMP(taskname, tasks, wprocess, chunksiz, doMP): """ Run wprocess on tasks in parallel """ if doMP: from vu_common import get_workloads from multiprocessing import (Process, Queue, cpu_count) Q = Queue() workloads = get_workloads(tasks, max_nprocesses=cpu_count(), chunksiz=chunksiz) logger.debug("workloads '{}' {}: {}".format( taskname, len(workloads), map(len, workloads))) workers = [ Process(target=wprocess, args=(wl, Q)) for wl in workloads ] for w in workers: w.start() wrs = [] for _ in workers: wrs.extend(Q.get()) else: wrs = wprocess(tasks, Q=None) return wrs
def rfilter(self, tcs, do_parallel=True): if is_empty(self.ps) or is_empty(tcs): logger.debug('rfilter skips (|ps|={}, |tcs|={})'.format( len(self.ps), len(tcs))) return None logger.debug('rfilter(|ps|={}, |tcs|={})'.format( len(self.ps), len(tcs))) if not isinstance(self.ps[0], InvExp): from dig_miscs import Miscs tcs = Miscs.keys_to_str(tcs) def wprocess(tasks, Q): rs = [p for p in tasks if all(p.seval(tc) for tc in tcs)] if Q is None: #no multiprocessing return rs else: Q.put(rs) tasks = self.ps if do_parallel: from vu_common import get_workloads from multiprocessing import (Process, Queue, current_process, cpu_count) Q = Queue() workloads = get_workloads(tasks, max_nprocesses=cpu_count(), chunksiz=2) logger.debug("workloads 'refine' {}: {}".format( len(workloads), map(len, workloads))) workers = [ Process(target=wprocess, args=(wl, Q)) for wl in workloads ] for w in workers: w.start() wrs = [] for _ in workers: wrs.extend(Q.get()) else: wrs = wprocess(tasks, Q=None) self.ps = wrs Refine.print_diff('rfilter', len(tasks), len(self.ps))
def rfilter(self,tcs,do_parallel=True): if is_empty(self.ps) or is_empty(tcs): logger.debug('rfilter skips (|ps|={}, |tcs|={})' .format(len(self.ps), len(tcs))) return None logger.debug('rfilter(|ps|={}, |tcs|={})' .format(len(self.ps), len(tcs))) if not isinstance(self.ps[0],InvExp): from dig_miscs import Miscs tcs = Miscs.keys_to_str(tcs) def wprocess(tasks,Q): rs = [p for p in tasks if all(p.seval(tc) for tc in tcs)] if Q is None: #no multiprocessing return rs else: Q.put(rs) tasks = self.ps if do_parallel: from vu_common import get_workloads from multiprocessing import (Process, Queue, current_process, cpu_count) Q=Queue() workloads = get_workloads(tasks, max_nprocesses=cpu_count(), chunksiz=2) logger.debug("workloads 'refine' {}: {}" .format(len(workloads),map(len,workloads))) workers = [Process(target=wprocess,args=(wl,Q)) for wl in workloads] for w in workers: w.start() wrs = [] for _ in workers: wrs.extend(Q.get()) else: wrs = wprocess(tasks,Q=None) self.ps = wrs Refine.print_diff('rfilter', len(tasks), len(self.ps))
def tb(src, combs, no_bugfix, no_parallel, no_stop): tasks = [] for sid,tpl,tpl_level, l in combs: rs_ = get_data(tpl,l) rs_ = [(src, sid, tpl, tpl_level) + r for r in rs_] tasks.extend(rs_) from random import shuffle shuffle(tasks) print "KR: tasks {}".format(len(tasks)) if no_parallel: wrs = wprocess(0, tasks, no_stop, no_bugfix, V=None, Q=None) else: #parallel from vu_common import get_workloads from multiprocessing import (Process, Queue, Value, current_process, cpu_count) Q = Queue() V = Value("i",0) workloads = get_workloads(tasks, max_nprocesses=cpu_count(), chunksiz=2) print ("workloads {}: {}".format(len(workloads), map(len,workloads))) workers = [Process(target=wprocess,args=(i,wl,no_stop, no_bugfix, V, Q)) for i,wl in enumerate(workloads)] for w in workers: w.start() wrs = [] for i,_ in enumerate(workers): wrs.extend(Q.get()) wrs = [r for r in wrs if r] rs = "\n".join(["{}. {}".format(i,r) for i,r in enumerate(wrs)]) print ("KR: summary " "(bugfix: {}, stop after a repair found: {}, parallel: {}), " "'{}', {} / {}\n" "{}" .format(not no_bugfix, not no_stop, not no_parallel, src,len(wrs),len(tasks), rs))
def solve(self): #mpp gen ts = [t for t in self.terms if t != 1] if self.subset_siz is None: subset_siz = len(ts) else: subset_siz = self.subset_siz def wprocess(tasks, Q): rs = [] for ts_subset in tasks: rs.extend( IeqMPPGen.build_vts_poly(list(ts_subset), self.tcs, self.mpp_opt)) if Q is None: #no multiprocessing return rs else: Q.put(rs) from vu_common import get_workloads tasks = list(itertools.combinations(ts, subset_siz)) Q = mp.Queue() workloads = get_workloads(tasks, max_nprocesses=mp.cpu_count(), chunksiz=2) logger.debug("workloads 'build_vts_poly' {}: {}".format( len(workloads), map(len, workloads))) workers = [ mp.Process(target=wprocess, args=(wl, Q)) for wl in workloads ] for w in workers: w.start() wrs = [] for _ in workers: wrs.extend(Q.get()) self.sols = map(InvMPP, wrs)
def prove_props(self, props, k, do_trans, do_base_case, do_induction, do_pcompress, do_term_check, do_abstraction, nreprove, do_parallel): """ Proves the given properties. Attempt to re-prove unproven ones using lemmas. do_soft_reprove: checks if the proved properties imply the unknown ones. This does not add proved properties as lemmas and re-invoke prover nreprove: times we attempt to fully reprove props do_parallel performs the task in parallel if multiprocessing is available """ def wprocess(tasks, Q): rs = [] for (idx, p) in tasks: logger.info("{}. Checking '{}'".format(idx, p)) r, m, k_ = self.prove(p, k=k, do_base_case=do_base_case, do_abstraction=do_abstraction, do_pcompress=do_pcompress, do_term_check=do_term_check) #cannot explicitly store p and m #b/c they contain pointers and cannot be pickled rs.append((idx, r, m if m is None else model_str(m), k_)) if Q is None: #no multiprocessing return rs else: Q.put(rs) if do_parallel: from vu_common import get_workloads from multiprocessing import (Process, Queue, cpu_count) nreprove_ = 0 unchecked_idxs = range(len(props)) rs = [None] * len(unchecked_idxs) while True: new_invs = [] unchecked_idxs_ = [] tasks = zip(unchecked_idxs, [props[idx] for idx in unchecked_idxs]) if do_parallel: Q = Queue() workloads = get_workloads(tasks, max_nprocesses=cpu_count(), chunksiz=2) logger.debug('workloads {}: {}'.format(len(workloads), map(len, workloads))) workers = [ Process(target=wprocess, args=(wl, Q)) for wl in workloads ] for w in workers: w.start() wrs = [] for _ in workers: wrs.extend(Q.get()) else: wrs = wprocess(tasks, Q=None) for idx, r, m, k_ in wrs: p = props[idx] #9/10: bug if not make a list copy since these things change! rs[idx] = (p, r, m, k_, list(self.invs_state), list(self.assumes_state + self.assumes_trans)) if r == True: new_invs.append(p) if r is None: unchecked_idxs_.append(idx) if not (new_invs and unchecked_idxs_): break if nreprove_ >= nreprove: break nreprove_ = nreprove_ + 1 logger.info("Re-prove {} prop(s) using {} new invs " "(attempt {}/{})".format(len(unchecked_idxs_), len(new_invs), nreprove_, nreprove)) for inv in new_invs: self.add_inv(inv) unchecked_idxs = sorted(unchecked_idxs_) return rs