def correct_KB(KB1, KB2): Diff = [ list(x) for x in set(map(tuple, KB1.clauses)).difference( set(map(tuple, KB2.clauses))) ] KB2_s = [ list(x) for x in set(map(tuple, KB2.clauses)).difference( set(map(tuple, KB1.clauses))) ] KB2_h = [ list(x) for x in set(map(tuple, KB2.clauses)).intersection( set(map(tuple, KB1.clauses))) ] wcnf = WCNF() for c in KB2_s: wcnf.append(c, weight=1) for c in KB2_h: wcnf.append(c) for c in Diff: wcnf.append(c) lbx = LBX(wcnf, use_cld=True, solver_name='g3') # Compute mcs and return the clauses indexes mcs = lbx.compute() temp_cl_lookup = create_clauses_lookup(wcnf.soft) clauses = get_clauses_from_index(mcs, temp_cl_lookup) return clauses
def __init__(self): self.map_atoms = {} self.atoms_counter = 1 self.wcnf = WCNF() self.min_card = math.inf self.number_of_diagnoses = 0 self.time = 0
def init(self, bootstrap_with): """ This method serves for initializing the hitting set solver with a given list of sets to hit. Concretely, the hitting set problem is encoded into partial MaxSAT as outlined above, which is then fed either to a MaxSAT solver or an MCS enumerator. :param bootstrap_with: input set of sets to hit :type bootstrap_with: iterable(iterable(obj)) """ # formula encoding the sets to hit formula = WCNF() # hard clauses for to_hit in bootstrap_with: to_hit = list(map(lambda obj: self.idpool.id(obj), to_hit)) formula.append(to_hit) # soft clauses for obj_id in six.iterkeys(self.idpool.id2obj): formula.append([-obj_id], weight=1) if self.htype == 'rc2': # using the RC2-A options from MaxSAT evaluation 2018 self.oracle = RC2(formula, solver=self.solver, adapt=False, exhaust=True, trim=5) elif self.htype == 'lbx': self.oracle = LBX(formula, solver_name=self.solver, use_cld=True) else: self.oracle = MCSls(formula, solver_name=self.solver, use_cld=True)
def init(self, encoding, target, solver): """ Actual constructor. """ # saving the target self.target = target # copying class values for clid in encoding: for lit, wght in encoding[clid].leaves: self.values[clid].append(tuple([lit, wght])) # creating the formulas and oracles for clid in encoding: if clid == self.target: continue # adding hard clauses self.formulas[clid] = WCNF() for cl in encoding[clid].formula: self.formulas[clid].append(cl) if len(encoding) > 2: for cl in encoding[self.target].formula: self.formulas[clid].append(cl) # adding soft clauses and recording all the leaf values self.init_soft(encoding, clid) if self.ortype == 'int': # a new MaxSAT solver self.oracles[clid] = ERC2(self.formulas[clid], solver=solver, adapt=self.am1, blo='cluster', exhaust=self.exhaust, minz=self.minz, verbose=0)
def sample_models(num_models, num_vars, clause_length, num_hard, num_soft, rng) -> List[MaxSatModel]: clauses = _generate_all_clauses_up_to_length(num_vars, clause_length) if logger.isEnabledFor(logging.DEBUG): # Print the clauses and quit from pprint import pprint pprint(clauses) num_clauses = len(clauses) total = num_hard + num_soft assert total > 0 logger.info( f"{num_clauses} clauses total - {num_hard} hard and {num_soft} soft") for m in range(num_models): logger.info(f"generating model {m + 1} of {num_models}") model = [] wcnf = WCNF() indices = get_random_clauses(wcnf, rng, clauses, total) if len(indices) < total: print(len(clauses), total, len(indices)) assert len(indices) == total hard_indices = list(sorted(rng.permutation(indices)[:num_hard])) soft_indices = list(sorted(set(indices) - set(hard_indices))) weights = rng.randint(_MIN_WEIGHT, _MAX_WEIGHT, size=num_soft) for i in hard_indices: model.append((None, set(clauses[i]))) for i, weight in zip(soft_indices, weights): model.append((weight / 100, set(clauses[i]))) yield model
def genMinGraph(graph, num_colours=4, approx=False, required_cl=[], required_nodes=[]): cl, n_clM, cl_nM = getSAT(graph, num_colours) f = WCNF() for c in required_cl: f.append(c) for c in cl: f.append(c, weight=1) print("Created formula") if approx: required_cl = genApproxMinClauses(f) else: # Calculate MUS mu = MUSX(f, verbosity=2) required_cl = mu.compute() # Map back to graph req_nodes = set(required_nodes) [req_nodes.update(cl_nM[i - 1]) for i in required_cl] print("New graph size:", len(req_nodes)) # Create blacklist and delete bk_lst = [n for n in graph.G.nodes if n not in req_nodes] [graph.G.remove_node(i) for i in bk_lst]
def optimize(self, formula, model): """ Try to optimize the solution with a MaxSAT solver. """ MaxSAT = RC2Stratified if self.options.weighted else RC2 formula_new = WCNF() formula_new.extend(formula.hard) # hardening the soft clauses based on the model for j in range(1, self.nof_terms + 1): formula_new.append([model[self.unused(j)]]) for lb in self.labels: for q in self.samps[lb]: formula_new.append([model[self.miss(q + 1)]]) for j in range(1, self.nof_terms + 1): for r in range(1, self.nof_feats + 1): formula_new.append([-self.dvar1(j, r)], weight=1) formula_new.append([-self.dvar0(j, r)], weight=1) with MaxSAT( formula_new, solver=self.options.solver, adapt=self.options.am1, exhaust=self.options.exhaust, minz=self.options.minz, trim=self.options.trim, ) as rc2: model = rc2.compute() return model
def __init__(self): self.nOfTaxi = 3 self.newDemandSize = 2 self.capacityOfEachTaxi = [3, 3, 3] self.noListOfPickDrop = [[1, -1], [1, -1, -2], [-3]] for i in range(len(self.noListOfPickDrop)): self.noListOfCarried.append(-1 * sum(self.noListOfPickDrop[i])) for i in range(len(self.noListOfPickDrop)): self.noListOfAcceptedPoint.append(len(self.noListOfPickDrop[i])) self.currenTime = 10 self.deadlineList = [[self.currenTime, 40], [self.currenTime, 30, 60], [20]] self.deadlineOfNewDemand = [self.currenTime, 90] self.cosTimeMatrices = [[[0, 13, 54, 21, 46], [13, 0, 29, 18, 64], [54, 29, 0, 37, 25], [21, 18, 37, 0, 34], [46, 64, 25, 34, 0]], [[0, 11, 38, 62, 19, 57], [11, 0, 27, 45, 36, 49], [38, 27, 0, 48, 65, 40], [62, 45, 48, 0, 21, 31], [19, 36, 65, 21, 0, 34], [57, 49, 40, 31, 34, 0]], [[0, 8, 28, 69], [8, 0, 31, 52], [28, 31, 0, 34], [69, 52, 34, 0]]] self.maxWidthOfNet = 3 + max(self.noListOfAcceptedPoint) self.conNet = [[[0] * self.maxWidthOfNet for i in range(self.maxWidthOfNet)] for j in range(self.nOfTaxi)] self.rchNet = [[[0] * self.maxWidthOfNet for i in range(self.maxWidthOfNet)] for j in range(self.nOfTaxi)] self.wcnf = WCNF()
def get_MCS(KBa_s, KBa_h, q, seed, clauses_dict): # Compute minimal hitting set wcnf = WCNF() for c in KBa_s: if c not in seed: # don't add to the soft clauses those in the seed wcnf.append(c, weight=1) for c in KBa_h: wcnf.append(c) for c in seed: # add clauses in the seed as hard if any(isinstance(el, list) for el in c): for cs in c: wcnf.append(cs) else: wcnf.append(c) wcnf.extend(q.negate().clauses) lbx = LBX(wcnf, use_cld=True, solver_name='g3') # Compute mcs and return the clauses indexes mcs = lbx.compute() # Current mcs is computed w.r.t. the soft clauses excluding the seed. Below we find the corresponding indexes of these clauses in KBa_s temp_cl_lookup = create_clauses_lookup(wcnf.soft) clauses = get_clauses_from_index(mcs, temp_cl_lookup) mcs = get_index_from_clauses(clauses, clauses_dict) return mcs
def MUSExtraction(self, C): wcnf = WCNF() wcnf.extend(self.cnf.clauses) wcnf.extend([[l] for l in C], [1] * len(C)) with MUSX(wcnf, verbosity=0) as musx: mus = musx.compute() # gives back positions of the clauses !! return set(C[i - 1] for i in mus)
def optimize(self, enc): """ Try to optimize the solution with a MaxSAT solver. """ # a dummy model (everything is deselected) model = [-v for v in range(enc.nv)] all_vars = set() # MaxSAT formula to work with formula = WCNF() # hard clauses for cl in enc.clauses: formula.append(cl) for j in range(1, self.nof_terms + 1): for r in range(1, self.nof_feats + 1): formula.append([-self.dvar1(j, r)], 1) formula.append([-self.dvar0(j, r)], 1) all_vars.add(self.dvar1(j, r)) all_vars.add(self.dvar0(j, r)) if self.options.approx: hitman = LBX(formula, use_cld=self.options.use_cld, solver_name=self.options.solver) hses = [] for i, hs in enumerate(hitman.enumerate()): hitman.block(hs) hses.append(hs) if i + 1 == self.options.approx: break hs = list( map(lambda v: -formula.soft[v - 1][0], min(hses, key=lambda x: len(x)))) hitman.delete() else: hitman = RC2(formula, solver=self.options.solver, adapt=True, exhaust=True, incr=False, minz=False, trim=self.options.trim) hs = list( filter(lambda v: v > 0 and v in all_vars, hitman.compute())) hitman.delete() # filling the model with the right values for e in hs: model[e - 1] = e return model
def init(self, bootstrap_with, weights=None): """ This method serves for initializing the hitting set solver with a given list of sets to hit. Concretely, the hitting set problem is encoded into partial MaxSAT as outlined above, which is then fed either to a MaxSAT solver or an MCS enumerator. An additional optional parameter is ``weights``, which can be used to specify non-unit weights for the target objects in the sets to hit. This only works if ``'sorted'`` enumeration of hitting sets is applied. :param bootstrap_with: input set of sets to hit :param weights: weights of the objects in case the problem is weighted :type bootstrap_with: iterable(iterable(obj)) :type weights: dict(obj) """ # formula encoding the sets to hit formula = WCNF() # hard clauses for to_hit in bootstrap_with: to_hit = list(map(lambda obj: self.idpool.id(obj), to_hit)) formula.append(to_hit) # soft clauses for obj_id in six.iterkeys(self.idpool.id2obj): formula.append( [-obj_id], weight=1 if not weights else weights[self.idpool.obj(obj_id)]) if self.htype == 'rc2': if not weights or min(weights.values()) == max(weights.values()): self.oracle = RC2(formula, solver=self.solver, adapt=self.adapt, exhaust=self.exhaust, minz=self.minz, trim=self.trim) else: self.oracle = RC2Stratified(formula, solver=self.solver, adapt=self.adapt, exhaust=self.exhaust, minz=self.minz, nohard=True, trim=self.trim) elif self.htype == 'lbx': self.oracle = LBX(formula, solver_name=self.solver, use_cld=self.usecld) else: self.oracle = MCSls(formula, solver_name=self.solver, use_cld=self.usecld)
def generate_musx_mus(contrastive_sat_rmp, J): wcnf = WCNF() for i in range(len(contrastive_sat_rmp.clauses)): if i in contrastive_sat_rmp.comparaison_to_clause[J]: wcnf.append(contrastive_sat_rmp.clauses[i]) else: wcnf.append(contrastive_sat_rmp.clauses[i], weight=1) musx = MUSX(wcnf, verbosity=0) mus = musx.compute() return mus
def parse_formula(fml_file): """ Parse and return MaxSAT formula. """ if re.search('\.wcnf(\.(gz|bz2|lzma|xz))?$', fml_file): fml = WCNF(from_file=fml_file) else: # expecting '*.cnf' fml = CNF(from_file=fml_file).weighted() return fml
def __init__(self, data, options): """ Constructor. """ self.status = True if len(data.feats[-1]) > 1 else False if not self.status: return self.init_time = resource.getrusage(resource.RUSAGE_SELF).ru_utime self.data = data self.dbin = copy.deepcopy(data) # samples to binarize self.options = options # binarizing the data properly for i in range(len(self.data.samps)): samp_bin = self.data.samps[i][:-1] for l in samp_bin: if l > 0: # negative literal means that the feature is binary name, lit = self.data.fvmap.opp[l] j = self.data.nm2id[name] if len(self.data.feats[j]) > 2: samp_bin += [-self.data.fvmap.dir[(name, l)] for l in sorted(self.data.feats[j].difference(set([lit])))] self.dbin.samps[i] = samp_bin # clusterizing samples self.clust = {self.data.fvmap.dir[(self.data.names[-1], v)]: [] for v in self.data.feats[-1]} for i, s in enumerate(self.data.samps): self.clust[s[-1]].append(i) # creating a formula self.formula = WCNF() self.formula.nv = len(data.samps) self.formula.topw = len(data.samps) + 1 # soft clauses and their weights for i, sample in enumerate(data.samps): self.formula.soft.append([i + 1]) self.formula.wght.append(data.wghts[i]) # hard clauses (pairwise overlapping samples) for c1, c2 in itertools.combinations(self.clust.keys(), 2): for i in self.clust[c1]: samp = set([-l for l in self.dbin.samps[i]]) for j in self.clust[c2]: if not samp.intersection(set(self.dbin.samps[j])): # these two samples do not clash! self.formula.hard.append([-i - 1, -j - 1])
def simplify(cnf_file, output_file, num_var_not_fixed, rng): sol = solve_weighted_max_sat_file(cnf_file, [], 1) int_sol = [] for i, val in enumerate(sol[0]): if val: int_sol.append(i + 1) else: int_sol.append(-(i + 1)) sol = rng.choice(int_sol, size=len(int_sol) - num_var_not_fixed, replace=False) output_file += ".wcnf" fix_var(cnf_file, output_file, sol) wcnf = WCNF(output_file) wcnf.to_file(output_file) return cnf_to_model(output_file, rng)
def __createWncf(self, initialisationFormulas, distanceFormula, artefactForMinimization): ''' This method creates the wncf formulas with the weighted variables depending on the distance and artefact. :param initialisationFormulas: @see __artefactsInitialisation :param distanceFormula: @see __compute_distance :param artefactForMinimization: MULTI_ALIGNMENT or ANTI_ALIGNMENT or EXACT_ALIGNMENT :return: ''' formulas = initialisationFormulas + distanceFormula + self.__sup_to_minimize(artefactForMinimization) full_formula = And([], [], formulas) cnf = full_formula.operatorToCnf(self.__vars.iterator) wcnf = WCNF() wcnf.extend(cnf) wcnf = self.__createWeights(wcnf,artefactForMinimization) self.__formula_time = time.time() return wcnf
def get_MUS(KB, e, q): # Compute minimal unsatisfiable set wcnf2 = WCNF() for k in e: if any(isinstance(el, list) for el in k): for ks in k: wcnf2.append(ks, weight=1) else: wcnf2.append(k, weight=1) if KB: for c in KB.clauses: wcnf2.append(c, weight=1) wcnf2.extend((q.negate().clauses)) mmusx = MUSX(wcnf2, verbosity=0) mus = mmusx.compute() return [list(wcnf2.soft[m - 1]) for m in mus]
def optimize(self, enc): """ Try to optimize the solution with a MaxSAT solver. """ # all d0 and d1 variables (for computing the complement --- MSS) all_vars = set([]) # MaxSAT formula to work with formula = WCNF() # hard clauses for cl in enc.clauses: formula.append(cl) for j in range(1, self.nof_terms + 1): for r in range(1, self.nof_feats + 1): formula.append([-self.dvar1(j, r)], 1) formula.append([-self.dvar0(j, r)], 1) all_vars.add(self.dvar1(j, r)) all_vars.add(self.dvar0(j, r)) if self.options.approx: hitman = LBX(formula, use_cld=self.options.use_cld, solver_name=self.options.solver) hses = [] for i, hs in enumerate(hitman.enumerate()): hitman.block(hs) hses.append(hs) if i + 1 == self.options.approx: break hs = list(map(lambda v: -formula.soft[v - 1][0], min(hses, key=lambda x: len(x)))) hitman.delete() else: hitman = RC2(formula, solver=self.options.solver, adapt=True, exhaust=True, incr=False, minz=False, trim=self.options.trim) hs = list(filter(lambda v: v > 0 and v in all_vars, hitman.compute())) hitman.delete() return sorted([-v for v in all_vars.difference(set(hs))])
def init(self, bootstrap_with, costs): """ This method serves for initializing the hitting set solver with a given list of sets to hit. Concretely, the hitting set problem is encoded into partial MaxSAT as outlined above, which is then fed either to a MaxSAT solver or an MCS enumerator. :param bootstrap_with: input set of sets to hit :type bootstrap_with: iterable(iterable(obj)) """ # formula encoding the sets to hit formula = WCNF() # hard clauses for to_hit in bootstrap_with: to_hit = list(map(lambda obj: self.idpool.id(obj), to_hit)) formula.append(to_hit) # soft clauses for obj_id in six.iterkeys(self.idpool.id2obj): # this is saying that not including a clause is given a weight of x # maxSAT is MAXIMISING the sum of satisfied soft clauses, so to minimise sum, # we want to weight *not* including something (hence the -obj_id) # this means words such as <PAD> should be given a *higher* weight, so the # solver decides that NOT including <PAD> is more worth it than not including # a more "meaningful" word cost = costs[obj_id - 1] formula.append([-obj_id], weight=cost) if self.htype == 'rc2': # using the RC2-A options from MaxSAT evaluation 2018 self.oracle = RC2(formula, solver=self.solver, adapt=False, exhaust=True, trim=5) elif self.htype == 'lbx': self.oracle = LBX(formula, solver_name=self.solver, use_cld=True) else: self.oracle = MCSls(formula, solver_name=self.solver, use_cld=True)
def gen_constraint_conflict_courses(idpool: IDPool, id2varmap, courses: tCourses) -> WCNF: """ Generate a constraint that two conflicting courses can not share TAs""" wcnf = WCNF() conflict_courses = compute_conflict_courses(courses) for course in conflict_courses.keys(): for ccourse in conflict_courses[course]: for t in courses[course].tas_available: if t in courses[ccourse].tas_available: t1 = tuple((course, t)) t2 = tuple((ccourse, t)) id1 = idpool.id(t1) id2 = idpool.id(t2) if t1 not in id2varmap.keys(): id2varmap[t1] = id1 if t2 not in id2varmap.keys(): id2varmap[t2] = id2 wcnf.append([-id1, -id2]) return wcnf
def generate_wcnfs(path, models_and_contexts): for i, elem in enumerate(models_and_contexts): model = elem["model"] contexts = elem["contexts"] wcnf = WCNF() for weight_clause in model: if weight_clause[0] == None: wcnf.append(tuple(weight_clause[1])) else: wcnf.append(tuple(weight_clause[1]), weight=weight_clause[0]) wcnf.to_file(path + f"_{i}.wcnf") for j, context in enumerate(contexts): wcnf_context = wcnf.copy() # print(context) for literals in context: wcnf_context.append((literals,)) wcnf_context.to_file(path + f"_{i}_context_{j}.wcnf")
def grow_maxsat(self, f, A, HS): remaining, weights = None, None wcnf = WCNF() # HARD clauses wcnf.extend(self.cnf.clauses) wcnf.extend([[l] for l in HS]) # SOFT clauses to grow if self.params.interpretation is Interpretation.INITIAL: remaining = list(self.I0 - HS) elif self.params.interpretation is Interpretation.ACTUAL: remaining = list(self.I - HS) elif self.params.interpretation is Interpretation.FULL: remaining = list(self.Iend - HS) elif self.params.interpretation is Interpretation.FINAL: remaining = list(A - HS) remaining_clauses = [[l] for l in remaining] if self.params.maxsat_weighing is Weighing.POSITIVE: weights = [f(l) for l in remaining] elif self.params.maxsat_weighing is Weighing.INVERSE: max_weight = max(f(l) for l in remaining) + 1 weights = [max_weight - f(l) for l in remaining] elif self.params.maxsat_weighing is Weighing.UNIFORM: weights = [1] * len(remaining) # cost is associated for assigning a truth value to literal not in # contrary to A. wcnf.extend(clauses=remaining_clauses, weights=weights) # solve the MAXSAT problem with RC2(wcnf) as s: if self.params.maxsat_polarity and hasattr(s, 'oracle'): s.oracle.set_phases(literals=list(self.Iend)) t_model = s.compute() return set(t_model)
def solve_weighted_max_sat(n: int, model: MaxSatModel, context: Clause, num_sol, prev_sol=[]): """ Solves a MaxSatModel and tries to return num_sol optimal solutions """ c = WCNF() c.nv = n for w, clause in model: # c.append(list(map(int, list(clause))), weight=w) if w != 0 and len(clause) > 0: c.append(list(map(int, list(clause))), weight=w) if context and len(context) > 0: # c.append(list(map(int, list(context))), weight=None) # c.append(list(map(int, list(context)))) c.hard.extend([[int(c)] for c in context]) s = RC2(c) sol = [] cst = -1 for m in s.enumerate(): # while len(m) < n: # m.append(len(m) + 1) if cst < 0: cst = s.cost # print(s.cost, cst, len(sol), num_sol) if s.cost > cst or len(sol) >= num_sol: break m = [v > 0 for v in m] if m not in prev_sol: sol.append(m) if len(sol) >= num_sol: break if num_sol == 1 and sol: return sol[0], cst return sol, cst
def solve_weighted_max_sat_file(wcnf_file, context: Clause, num_sol, prev_sol=[]) -> Optional[Instance]: """ Solves a MaxSatModel file and tries to return num_sol optimal solutions """ wcnf = WCNF(wcnf_file) if len(context) > 0: wcnf.hard.extend(list(map(int, list(context))), weight=None) s = RC2(wcnf) model = [] cst = -1 for m in s.enumerate(): if cst < 0: cst = s.cost if s.cost > cst or len(model) >= num_sol: break m = [v > 0 for v in m] if np.array(m) not in prev_sol: model.append(np.array(m)) return model
def generate_contexts(model: MaxSatModel, num_context, num_constraints, num_vars, rng): wcnf = WCNF() for clauses in model: # print(clauses[1]) wcnf.append(tuple(clauses[1])) contexts = [] # n=0 for n in range(num_context): context = set() if num_constraints == 0: num_constraints = rng.randint(1, 2 * num_vars) literals = [] for i in range(1, 1 + num_vars): literals.append({i}) literals.append({-i}) # print(literals) indices = get_random_clauses(wcnf, rng, literals, num_constraints) for j in indices: context |= literals[j] contexts.append(context) # print(num_vars, contexts) # exit() return contexts
def optimize(graph, num_colours=4, extract_MUS=False, required_cl=[], required_nodes=[], verbosity=0, shuffle=False): """ Use clever SAT things to reduce the number of nodes in the graph, while still maintaining it's UNSAT status. extract_MUS runs an additional optimization that will find a minimal graph, but it is more expensive. required_cl: provide additional clauses that won't be optimized away required_nodes: blacklist some nodes from removal. (Typically those in required_cl) verbosity: set logging level """ def maybeLog(s, th=1, nl=True): if verbosity >= th: print(s, end=('\n' if nl == True else '')) clauses, n_clM, cl_nM = getSAT(graph, num_colours) f = WCNF() for c in required_cl: f.append(c) for c in clauses: f.append(c, weight=1) maybeLog("Created formula") topv = f.nv # Number of vars in basic formula # Selectors for each node, each getting a new variable. # Note: graph.G.nodes may not be a contigous list after previous removal steps. sels = [i + topv for i in graph.G.nodes] # vmap = {} # Map to origional clause s = MapleChrono(bootstrap_with=f.hard) # Possibly load the graph nodes in in a random order. ixs = [i for i in range(0, len(clauses))] if shuffle: random.shuffle(ixs) # For each node, add the relevent clauses for i in ixs: clause = clauses[i] # Nodes involved in this clause nodes = cl_nM[i] # Assume node is enabled (as negative value). # Need both nodes in an edge on to care about rest of clause. s.add_clause(clause + [-(n + topv) for n in nodes]) if not s.solve(assumptions=sels): maybeLog("Core extraction\n") approx = s.get_core() if extract_MUS: # Perform additional refinement to get MUC. # Attempt to remove each node. i = 0 while i < len(approx): # Try ignoring nodes (setting variable to positive), # And seeing what happens... to_test = approx[:i] + approx[(i + 1):] sel, node = approx[i], approx[i] - topv maybeLog('c testing node: {0}'.format(node), nl=False) if s.solve(assumptions=to_test): maybeLog(' -> sat (keeping {0})'.format(node)) i += 1 else: maybeLog(' -> unsat (removing {0})'.format(node)) approx = to_test # Map back to node ixs, adding to passed required nodes. required_nodes = [x - topv for x in approx] + required_nodes # Create blacklist and delete from graph bk_lst = [n for n in graph.G.nodes if n not in required_nodes] print("Removing", len(bk_lst), "nodes.", len(required_nodes), "left.") [graph.G.remove_node(i) for i in bk_lst]
print( ' Available values: [0 .. FLOAT_MAX], none (default: none)' ) print(' -v, --verbose Be verbose') # #============================================================================== if __name__ == '__main__': print_model, solver, pb_enc, timeout, verbose, files = parse_options() if files: # reading standard CNF or WCNF if re.search('cnf(\.(gz|bz2|lzma|xz))?$', files[0]): if re.search('\.wcnf(\.(gz|bz2|lzma|xz))?$', files[0]): formula = WCNF(from_file=files[0]) else: # expecting '*.cnf' formula = CNF(from_file=files[0]).weighted() lsu = LSU(formula, solver=solver, pb_enc_type=pb_enc, expect_interrupt=(timeout != None), verbose=verbose) # reading WCNF+ elif re.search('\.wcnf[p,+](\.(gz|bz2|lzma|xz))?$', files[0]): formula = WCNFPlus(from_file=files[0]) lsu = LSUPlus(formula, pb_enc_type=pb_enc, expect_interrupt=(timeout != None),
def get_instances(): """returns array of instance names, array of corresponding n""" instance_data = np.genfromtxt('m2s_nqubits.csv', delimiter=',', skip_header=1, dtype=str) # path of csv file return instance_data[:, 0], instance_data[:, 1] if __name__ == '__main__': instance_names, instance_n_bits_str = get_instances() runtimes = np.zeros(10000) n = 20 n_shifted = n - 5 # n_shifted runs from 0 to 15 instead of 5 to 20 for loop, i in enumerate(range(n_shifted * 10000, (n_shifted + 1) * 10000)): # 10000 instances per value of n instance_name = instance_names[i] wcnf = WCNF(from_file='./../../instances_wcnf/' + instance_name + '.txt') with RC2(wcnf) as rc2: rc2.compute() runtime = rc2.oracle_time() runtimes[loop] = runtime with open("adam_runtimes_" + str(n) + ".txt", "ab") as f: # saves runtimes to .txt file f.write(b"\n") np.savetxt(f, runtimes)
def encode(self, label, nof_terms=1): """ Encode the problem of computing a DS of size nof_terms. """ self.nof_terms = nof_terms enc = WCNF() # all the hard clauses # # constraint 6 (relaxed with the unused variable) for j in range(1, self.nof_terms + 1): for r in range(1, self.nof_feats + 1): enc.append([-self.unused(j), self.svar(j, r)]) enc.append( [self.unused(j)] + [-self.svar(j, r) for r in range(1, self.nof_feats + 1)]) # sort unused rules for j in range(1, self.nof_terms): enc.append([-self.unused(j), self.unused(j + 1)]) # constraint 7 for j in range(1, self.nof_terms + 1): for r in range(1, self.nof_feats + 1): d0 = self.dvar0(j, r) p0 = [-self.svar(j, r), self.lvar(j, r)] enc.append([d0, -p0[0], -p0[1]]) enc.append([-d0, p0[0]]) enc.append([-d0, p0[1]]) d1 = self.dvar1(j, r) p1 = [-self.svar(j, r), -self.lvar(j, r)] enc.append([d1, -p1[0], -p1[1]]) enc.append([-d1, p1[0]]) enc.append([-d1, p1[1]]) # constraint 8 if len(self.labels) == 1: # distinguish one class from all the others other_labels = set(self.samps.keys()) else: # distinguish the classes under question only other_labels = set(self.labels) other_labels.remove(label) other_labels = sorted(other_labels) for j in range(1, self.nof_terms + 1): for lb in other_labels: for q in self.samps[lb]: cl = [self.unused(j), self.miss(q + 1)] # the clause is relaxed shift = 0 for r in range(1, self.nof_feats + 1): if r - 1 in self.data.vmiss[q]: # this feature is missing in q'th sample cl.append(-self.svar(j, r)) shift += 1 elif self.data.samps[q][r - 1 - shift] > 0: cl.append(self.dvar1(j, r)) else: cl.append(self.dvar0(j, r)) enc.append(cl) # constraint 9 for j in range(1, self.nof_terms + 1): for q in self.samps[label]: cr = self.crvar(j, q + 1) cl = [self.unused(j)] shift = 0 for r in range(1, self.nof_feats + 1): if r - 1 in self.data.vmiss[q]: # this feature is missing in q'th sample cl.append(-self.svar(j, r)) shift += 1 elif self.data.samps[q][r - 1 - shift] > 0: cl.append(self.dvar1(j, r)) else: cl.append(self.dvar0(j, r)) enc.append([cr] + cl) for l in cl: enc.append([-cr, -l]) # constraint 10 for q in self.samps[label]: enc.append( [self.miss(q + 1)] + [self.crvar(j, q + 1) for j in range(1, self.nof_terms + 1)]) # at most one value can be chosen for a feature for feats in six.itervalues(self.ffmap.dir): if len(feats) > 2: for j in range(1, self.nof_terms + 1): lits = [self.dvar0(j, r + 1) for r in feats] # atmost1 can be true onev = CardEnc.atmost(lits, top_id=enc.nv, encoding=self.options.enc) enc.extend(onev.clauses) # soft clauses # minimizing the number of literals used for j in range(1, self.nof_terms + 1): enc.append([self.unused(j)], weight=self.lambda_) # minimizing the number of missclassifications for lb in self.labels: for q in self.samps[lb]: enc.append([-self.miss(q + 1)], weight=self.data.wghts[q]) # there should be at least one rule for this class enc.append([-self.unused(1)]) # saving comments for j in range(1, self.nof_terms + 1): for r in range(1, self.nof_feats + 1): enc.comments.append('c s({0}, {1}) => v{2}'.format( j, r, self.svar(j, r))) enc.comments.append('c l({0}, {1}) => v{2}'.format( j, r, self.lvar(j, r))) enc.comments.append('c d0({0}, {1}) => v{2}'.format( j, r, self.dvar0(j, r))) enc.comments.append('c d1({0}, {1}) => v{2}'.format( j, r, self.dvar1(j, r))) for q in range(len(self.data.samps)): enc.comments.append('c cr({0}, {1}) => v{2}'.format( j, q + 1, self.crvar(j, q + 1))) for j in range(1, self.nof_terms + 1): enc.comments.append('c u({0}) => v{1}'.format(j, self.unused(j))) for lb in self.labels: for q in self.samps[lb]: enc.comments.append('c m({0}) => v{1}'.format( q + 1, self.miss(q + 1))) for n, f in zip(self.data.names[:-1], self.data.feats[:-1]): for v in f: if self.data.fvmap.dir[(n, v)] > 0: enc.comments.append('c {0} = {1} => positive'.format(n, v)) else: enc.comments.append('c {0} = {1} => negative'.format(n, v)) return enc