def minimize_val(self, vector): '''get the minimum value of the zonotope projected onto the passed-in direction similar to zonotope.maximize but slightly faster ''' Timers.tic('zonotope.minimize_val') rv = self.center.dot(vector) # project vector (a generator) onto row, to check if it's positive or negative #res_vec = np.dot(self.mat_t.transpose(), vector) # slow? since we're taking transpose res_vec = np.dot(vector, self.mat_t) #Timers.tic('loop') #for res, ib in zip(res_vec, self.init_bounds): # factor = ib[1] if res <= 0 else ib[0] # rv += factor * res #Timers.toc('loop') if self.init_bounds_nparray is None: self.init_bounds_nparray = np.array(self.init_bounds, dtype=self.dtype) ib = self.init_bounds_nparray res = np.where(res_vec <= 0, ib[:, 1], ib[:, 0]) rv += res.dot(res_vec) Timers.toc('zonotope.minimize_val') return rv
def advance_star(self): '''advance current star (self.priv.ss) A precondition to this is that ss is already at the next split point. The logic for this is: 1. do split, creating new_star 2. propagate up to next split with ss 3. propagate up to next split with new_star 4. save new_star to remaining work ''' Timers.tic('advance') ss = self.priv.ss network = self.shared.network spec = self.shared.spec if not ss.is_finished(network): new_star = ss.do_first_relu_split(network, spec, self.priv.start_time) ss.propagate_up_to_split(network, self.priv.start_time) if new_star: # new_star can be null if it wasn't really a split (copy prefilter) new_star.propagate_up_to_split(network, self.priv.start_time) # note: new_star may be done... but for expected branching order we still add it self.priv.stars_in_progress += 1 self.priv.work_list.append(new_star) Timers.toc('advance')
def check_round(ss, sets, spec_arg, check_cancel_func=None): '''check overapproximation result of one round against spec this may modify ss.safe_spec_list is part of the spec is proven as safe This returns is_safe?, violation_stars, violation_indices ''' Timers.tic('overapprox_check_round') if check_cancel_func is None: check_cancel_func = lambda: False whole_safe = True unsafe_violation_stars = [ ] # list of violation stars for each part of the disjunctive spec unsafe_violation_indices = [] # index in spec_list # break it apart disjunctive specs, as quicker overapproximation may work for some parts and not others spec_list = spec_arg.spec_list if isinstance( spec_arg, DisjunctiveSpec) else [spec_arg] for i, single_spec in enumerate(spec_list): if ss.safe_spec_list is not None and ss.safe_spec_list[i]: continue single_safe = False violation_star = None for s in sets: single_safe = s.check_spec(single_spec, check_cancel_func) if isinstance(s, StarOverapprox) and not single_safe: violation_star = s.violation_star #print(f".check_round checking spec with set {s}, result: {single_safe}") if single_safe: if ss.safe_spec_list is not None: ss.safe_spec_list[i] = True break # done with this spec! if not single_safe: whole_safe = False if violation_star is not None: unsafe_violation_stars.append(violation_star) unsafe_violation_indices.append(i) if not Settings.CONTRACT_OVERAPPROX_VIOLATION: # if contracting violation, we need all violation stars break Timers.toc('overapprox_check_round') return whole_safe, unsafe_violation_stars, unsafe_violation_indices
def minimize_output(self, output_index, maximize=False): ''' get the output value when one of the outputs is minimized (or maximized) if stop_at_zero is set, this will terminate the search once zero is crossed if you want the (input, output) pair to produce this output, use consutrct_last_io() ''' Timers.tic('minimize_output') if self.a_mat.size == 0: value = self.bias else: row = self.a_mat[output_index] if maximize: row = -1 * row self.last_lp_result = lp_result = self.lpi.minimize(row) self.num_lps += 1 num_init_vars = self.a_mat.shape[1] assert len(lp_result) == num_init_vars # single row value = self.a_mat[output_index].dot( lp_result) + self.bias[output_index] Timers.toc('minimize_output') return value
def try_single(self): '''try to generate an adversarial image for the single value of epsilon (quick) returns [adversarial image, epsilon], if found, else None ''' Timers.tic('try_single') rv = None t = Settings.ADVERSARIAL_TARGET criterion = fb.criteria.Misclassification() if t is None else fb.criteria.TargetClass(t) with self.sess.as_default(): attack = SingleEpsilonRPGD(self.fmodel, distance=fb.distances.Linfinity, criterion=criterion) # subtract a small amount since attack was overshooting by numerical precision SingleEpsilonRPGD.set_epsilon(self.epsilon - 1e-6) Timers.tic('attack') a = attack(self.orig_image, self.labels, unpack=False)[0] Timers.toc('attack') dist = a.distance.value if dist != np.inf: rv = [a.perturbed, dist] rv[0].shape = self.orig_image.shape Timers.toc('try_single') return rv
def zono_might_violate_spec(self, zono): '''is it possible that the zonotope violates the spec? sometimes we can prove it's impossible. If this returns True, though, it doesn't mean there's an intersection (except in the case of single-row specifications) returns True or False ''' # strategy: check if each row individually can have a violation... necessary condition for intersection Timers.tic('zono_might_violate_spec') might_violate = True for i, row in enumerate(self.mat): min_dot = zono.minimize_val(row) if min_dot > self.rhs[i]: might_violate = False break Timers.toc('zono_might_violate_spec') return might_violate
def set_constraints_csr(self, data, glpk_indices, indptr, shape): ''' set the constrains row by row to be equal to the passed-in csr matrix attribues glpk_indices is already offset by one ''' Timers.tic('set_constraints_csr') assert shape[0] <= self.get_num_rows() assert shape[1] <= self.get_num_cols() # actually set the constraints row by row assert isinstance(data, list), "data was not a list" for row in range(shape[0]): # we must copy the indices since glpk is offset by 1 :( count = int(indptr[row + 1] - indptr[row]) #indices_list = glpk_indices[indptr[row]:indptr[row+1]] #indices_vec = SwigArray.as_int_array(indices_list) indices_vec = SwigArray.as_int_array( glpk_indices[indptr[row]:indptr[row + 1]], count) #data_row_list = [float(d) for d in data[indptr[row]:indptr[row+1]]] #data_vec = SwigArray.as_double_array(data_row_list) data_vec = SwigArray.as_double_array( data[indptr[row]:indptr[row + 1]], count) glpk.glp_set_mat_row(self.lp, 1 + row, count, indices_vec, data_vec) Timers.toc('set_constraints_csr')
def from_init_box(self, uncompressed_init_box): 'initialize from an initial box' Timers.tic('make bm') if Settings.COMPRESS_INIT_BOX: init_bm, init_bias, init_box = compress_init_box( uncompressed_init_box) else: dims = len(uncompressed_init_box) #init_bm = np.identity(dims) #init_bias = np.zeros(dims) init_bm = None init_bias = None init_box = uncompressed_init_box Timers.toc('make bm') # for finding concrete counterexamples Timers.tic('star') self.star = LpStar(init_bm, init_bias, init_box) Timers.toc('star') self.prefilter = Prefilter() self.prefilter.init_from_uncompressed_box(uncompressed_init_box, self.star, init_box)
def found_unsafe(self, concrete_io_tuple): '''found a concrete counter-example, update shared variables. concrete_io_tuple may be None, in the case of unconfirmed counterexamples ''' if self.shared.result.found_confirmed_counterexample.value == 0: ######################### Timers.tic('update_shared') self.shared.mutex.acquire() self.shared.result.found_counterexample.value = 1 if concrete_io_tuple is not None: self.shared.result.found_confirmed_counterexample.value = 1 self.shared.should_exit.value = True for i, val in enumerate(concrete_io_tuple[0]): self.shared.result.cinput[i] = val for i, val in enumerate(concrete_io_tuple[1]): self.shared.result.coutput[i] = val self.shared.mutex.release() Timers.toc('update_shared')
def _v_h_rep_given_init_simplex(init_simplex, supp_point_func, epsilon=1e-7): '''get all the vertices and hyperplanes of (an epsilon approximation of) the set, defined through supp_point_func This function is provided with an initial simplex which spans the space this returns verts, equations, where equations is from the Convex Hull's (hull.equations) ''' new_pts = init_simplex verts = [] iteration = 0 max_error = None while new_pts: iteration += 1 #print(f"\nIteration {iteration}. Verts: {len(verts)}, new_pts: {len(new_pts)}, max_error: {max_error}") first_new_index = len(verts) verts += new_pts new_pts = [] max_error = 0 Timers.tic('ConvexHull') hull = ConvexHull(verts) Timers.toc('ConvexHull') for i, simplex in enumerate(hull.simplices): is_new = False for index in simplex: if index >= first_new_index: is_new = True break if not is_new: continue # skip this simplex # get hyperplane for simplex normal = hull.equations[i, :-1] rhs = -1 * hull.equations[i, -1] Timers.tic('supp_point_func') supporting_pt = supp_point_func(normal) Timers.toc('supp_point_func') error = np.dot(supporting_pt, normal) - rhs max_error = max(max_error, error) #assert error >= -1e-7, "supporting point was inside facet?" if error >= epsilon: # add the point... at this point points may be added twice... this doesn't seem to matter new_pts.append(supporting_pt) #points[hull.vertices] return np.array(verts, dtype=float), hull.equations
def make_split_indices(layer_bounds): 'make split indices from layer bounds' Timers.tic('make_split_indices') split_indices = np.nonzero(np.logical_and(layer_bounds[:, 0] < -Settings.SPLIT_TOLERANCE, \ layer_bounds[:, 1] > Settings.SPLIT_TOLERANCE))[0] Timers.toc('make_split_indices') return split_indices
def try_mixed_adversarial(self, iteration, random_only): ''' try generating an adversarial using a mixed strategy, depending on the iteration returns [adversarial image, epsilon], if found, else None ''' rv = None classes = [fb.attacks.FGSM, # 0.057 in 41ms fb.attacks.ContrastReductionAttack, # 0.05 in 64 ms fb.attacks.BlendedUniformNoiseAttack, # 0.09 in 93ms fb.attacks.DecoupledDirectionNormL2Attack, # 0.074 in 124ms fb.attacks.BIM, # 0.044 in 300 ms fb.attacks.PGD, # 0.05 in 1302 ms fb.attacks.MomentumIterativeAttack, # 0.04 in 300 ms fb.attacks.AdamPGD, #0.055 in 800ms fb.attacks.AdamRandomPGD, # 0.042 in 700ms fb.attacks.RandomPGD # best ] # pick the attack class... attack_class = None if not random_only and iteration < len(classes): attack_class = classes[iteration] t = Settings.ADVERSARIAL_TARGET criterion = fb.criteria.Misclassification() if t is None else fb.criteria.TargetClass(t) with self.sess.as_default(): if attack_class is None: attack_class = SingleEpsilonRPGD attack = SingleEpsilonRPGD(self.fmodel, distance=fb.distances.Linfinity, criterion=criterion) # subtract a small amount since attack was overshooting by numerical precision SingleEpsilonRPGD.set_epsilon(self.epsilon - 1e-6) else: attack = attack_class(self.fmodel, distance=fb.distances.Linfinity, criterion=criterion) Timers.tic('attack') a = attack(self.orig_image, self.labels, unpack=False)[0] Timers.toc('attack') dist = a.distance.value #print(f"attack class: {attack_class}, ep: {dist}, iteration {iteration}") if dist <= Settings.ADVERSARIAL_EPSILON: rv = a.perturbed rv.shape = self.orig_image.shape if rv is not None: print(f"try_mixed_adversarial found violation image on iteration {iteration} with ep={dist} and " + \ f"attack class: {attack_class}") return rv
def get_verts_nd(lpi, dims): ''' get an the n-dimensional vertices if dims is an int, this uses the first dim coordinates of the lpi ''' assert isinstance(dims, list), f"unsupported dims type: {type(dims)}" dim_list = dims for dim in dim_list: assert dim < lpi.dims, f"lpi has {lpi.dims} dims, but requested dim_list was {dim_list}" # algorithm: Kamenev's method in n-d def supp_point_nd(vec): 'return a supporting point for the given direction (maximize)' assert len(vec) == len(dim_list) assert lpi.dims > 0 Timers.tic('construct') d = np.zeros((lpi.dims, ), dtype=float) # negative here because we want to MAXIMIZE not minimize for i, dim_index in enumerate(dim_list): d[dim_index] = -vec[i] Timers.toc('construct') Timers.tic('set_minimize_dir') lpi.set_minimize_direction(d) Timers.toc('set_minimize_dir') Timers.tic('lpi.minimize') res = lpi.minimize( columns=[lpi.cur_vars_offset + n for n in range(lpi.dims)]) Timers.toc('lpi.minimize') Timers.tic('make res') rv = [] for dim in dim_list: rv.append(res[dim]) rv = np.array(rv, dtype=float) Timers.toc('make res') return rv Timers.tic('kamenev.get_verts') verts = kamenev.get_verts(len(dim_list), supp_point_nd) Timers.toc('kamenev.get_verts') return verts
def put_queue(self, ss): 'put a starstate on the queue' Timers.tic('put_queue') if self.multithreaded: ss.star.lpi.serialize() self.more_work_queue.put(ss) Timers.toc('put_queue')
def _find_init_simplex(dims, supp_point_func): ''' find an n-dimensional initial simplex ''' Timers.tic('init_simplex') # first, construct the initial simplex and determine a basis for the convex set (it may be degenerate) init_simplex = _find_two_points(dims, supp_point_func) if len(init_simplex ) == 2: # S may be a degenerate shape consisting of a single point init_vec = init_simplex[1] - init_simplex[0] spanning_dirs = [init_vec] degenerate_dirs = [] vecs = [init_vec] for _ in range(dims - 1): new_dir, rank = _get_orthonormal_rank(vecs) # min/max in direction v, checking if it increases the rank of vecs pt = supp_point_func(new_dir) vecs.append(pt - init_simplex[0]) if _get_rank(vecs) > rank: init_simplex.append(pt) spanning_dirs.append(vecs[-1]) continue # rank did not increase with maximize, try minimize vecs = vecs[0:-1] # pop vec pt = supp_point_func(-1 * new_dir) vecs.append(pt - init_simplex[0]) if _get_rank(vecs) > rank: init_simplex.append(pt) spanning_dirs.append(vecs[-1]) continue # rank still didn't increase, new_dir is orthogonal to shape S vecs = vecs[0:-1] # pop vec vecs.append( new_dir ) # forces a new orthonormal direction during the next iteration degenerate_dirs.append(new_dir) Timers.toc('init_simplex') return init_simplex
def supp_point_nd(vec): 'return a supporting point for the given direction (maximize)' assert len(vec) == len(dim_list) assert lpi.dims > 0 Timers.tic('construct') d = np.zeros((lpi.dims, ), dtype=float) # negative here because we want to MAXIMIZE not minimize for i, dim_index in enumerate(dim_list): d[dim_index] = -vec[i] Timers.toc('construct') Timers.tic('set_minimize_dir') lpi.set_minimize_direction(d) Timers.toc('set_minimize_dir') Timers.tic('lpi.minimize') res = lpi.minimize( columns=[lpi.cur_vars_offset + n for n in range(lpi.dims)]) Timers.toc('lpi.minimize') Timers.tic('make res') rv = [] for dim in dim_list: rv.append(res[dim]) rv = np.array(rv, dtype=float) Timers.toc('make res') return rv
def save_poly(self, ss): 'save the polygon verts for the current, finished star into result.polys' Timers.tic('save_poly') xdim, ydim = Settings.RESULT_SAVE_POLYS_DIMS # save polygon verts = ss.star.verts(xdim, ydim, epsilon=Settings.RESULT_SAVE_POLYS_EPSILON) self.shared.result.polys.append(verts) Timers.toc('save_poly')
def split_overapprox(self, layer_num, new_generators_bm, i, lb, ub): '''helper for execute_relus_overapprox split a ReLU using a star overapproximation''' Timers.tic('split_overapprox') # make a new variable y for the output self.lpi.add_positive_cols([f'y{layer_num}_{i}']) num_cols = self.lpi.get_num_cols() num_zeros = num_cols - self.a_mat.shape[1] - 1 #zero_row = np.zeros((self.star.a_mat.shape[1],)) # create 3 constraints for new variable # (1) y >= 0 === -y <= 0 # this constraint is automatically added in lp_star for all non-cur variables # (2) y >= x[i] === x[i] - y <= 0 # x[i] equals row i in the basis matrix (also the bias on the rhs) row = np.zeros((num_cols, ), dtype=self.a_mat.dtype) a_mat_width = self.a_mat.shape[1] assert a_mat_width <= num_cols, f"a_mat_width: {a_mat_width}, num_cols: {num_cols}" row[:a_mat_width] = self.a_mat[i, :] row[-1] = -1 self.lpi.add_dense_row(row, -self.bias[i]) # (3) y <= ub*(x[i]-lb) / (ub - lb) === y - ub*x[i] / (ub - lb) - (ub*(-lb) / (ub - lb)) <= 0 # === y - ub(ub - lb) * x[i] <= ub*(-lb) / (ub - lb) # x[i] equals row i in the basis matrix factor = ub / (ub - lb) row = np.zeros((num_cols, ), dtype=self.a_mat.dtype) row[:self.a_mat.shape[1]] = -1 * factor * self.a_mat[i] row[-1] = 1 rhs = -lb * factor + self.bias[i] * factor self.lpi.add_dense_row(row, rhs) # reset the current bias # the rhs of the current variable is not referenced by other constraints (constraints never ref rhs) self.bias[i] = 0 # ReLU case, introduce new variable self.a_mat[i] = 0 new_generators_bm[i, num_zeros] = 1 Timers.toc('split_overapprox')
def push_init(self, ss): 'put the initial init box or star onto the work queue' Timers.tic('push_init') # without the mutex here, if the threads start quickly, they may exit before finding the first piece of work # since the queue can be asynchronous ############################## self.mutex.acquire() self.put_queue(ss) self.stars_in_progress.value = 1 self.mutex.release() ############################## Timers.toc('push_init')
def construct_last_io(self): '''construct the last concrete input/output pair from the optimization performed when minimize_output was called note that the input will be the compressed input if input space is not full dimensional ''' Timers.tic('construct_last_io') i = self.last_lp_result o = np.dot(self.a_mat, i) + self.bias Timers.toc('construct_last_io') return [i, o]
def shuffle_work(self): 'shuffle work' Timers.tic('shuffle') if self.priv.worker_index == 0: # print queues qsize = self.shared.more_work_queue.qsize() #self.priv.next_shuffle_step *= 2 # exponential backoff self.priv.next_shuffle_time += self.priv.next_shuffle_step global_work = [] while True: ss = self.shared.get_global_queue(timeout=0.01) if ss is None: break global_work.append(ss) if self.priv.ss: i = self.priv.worker_index my = len(self.priv.work_list) print(f".{i}: my work size {my}", flush=True) self.shared.put_queue(self.priv.ss) self.priv.num_offloaded += 1 #self.priv.work_list.append(self.priv.ss) self.priv.ss = None self.priv.work_list += global_work # shuffle remaining work and put it all into the queue random.shuffle(self.priv.work_list) #for ss in self.priv.work_list: # self.shared.put_queue(ss) # self.priv.num_offloaded += 1 #self.priv.work_list = [] #self.priv.shared_update_urgent = True #self.priv.fulfillment_requested_time = time.perf_counter() Timers.toc('shuffle')
def execute(self, state, save_branching=False): '''execute the layer on a concrete state if save_branching is True, returns (output, branch_list), where branch_list is a list of booleans for each neuron in the layer that is True if the nonnegative branch of the ReLU was taken, False if negative otherwise, just returns output ''' Timers.tic('execute relu') if save_branching: branch_list = [] assert state.shape == self.get_input_shape(), f"state shape to fully connected layer was {state.shape}, " + \ f"expected {self.get_input_shape()}" state = nn_flatten(state) if save_branching: for i, val in enumerate(state): if self.filter_func is not None: if not self.filter_func(i): continue branch_list.append(val >= 0) if self.filter_func is None: state = np.clip(state, 0, np.inf) else: res = [] for i, val in enumerate(state): if not self.filter_func(i): res.append(val) else: res.append(max(0, val)) state = np.array(res, dtype=float) rv = nn_unflatten(state, self.shape) rv = (rv, branch_list) if save_branching else rv Timers.toc('execute relu') return rv
def verts(self, xdim=0, ydim=1, epsilon=1e-7): 'get a 2-d projection of this lp_star' dims = self.a_mat.shape[0] if isinstance(xdim, int): assert 0 <= xdim < dims, f"xdim {xdim} out of bounds for star with {dims} dims" vec = np.zeros(dims, dtype=float) vec[xdim] = 1 xdim = vec else: assert xdim.size == dims if isinstance(ydim, int): assert 0 <= ydim < dims, f"ydim {ydim} out of bounds for star with {dims} dims" vec = np.zeros(dims, dtype=float) vec[ydim] = 1 ydim = vec else: assert ydim.size == dims def supp_point_func(vec2d): 'maximize a support function direction' Timers.tic('supp_point_func') # use negative to maximize lpdir = -vec2d[0] * xdim + -vec2d[1] * ydim res = self.minimize_vec(lpdir) Timers.toc('supp_point_func') # project onto x and y resx = np.dot(xdim, res) resy = np.dot(ydim, res) return np.array([resx, resy], dtype=float) Timers.tic('kamenev.get_verts') verts = kamenev.get_verts(2, supp_point_func, epsilon=epsilon) Timers.toc('kamenev.get_verts') #assert np.allclose(verts[0], verts[-1]) return verts
def try_quick_overapprox(ss, network, spec, start_time, found_adv): 'try a quick overapproximation, return True if safe' Timers.tic('try_quick_overapprox') overapprox_types = Settings.QUICK_OVERAPPROX_TYPES def check_cancel_func(): 'worker cancel func. can raise OverapproxCanceledException' diff = time.perf_counter() - start_time if diff > Settings.TIMEOUT: raise OverapproxCanceledException('timeout exceeded') if found_adv is not None and found_adv.value != 0: raise OverapproxCanceledException('found_adv was set') try: check_cancel_func() prerelu_sims = make_prerelu_sims(ss, network) check_cancel_func() if Settings.PRINT_OUTPUT and Settings.PRINT_OVERAPPROX_OUTPUT: print( f"Doing quick overapprox with {len(overapprox_types)} rounds..." ) rr = do_overapprox_rounds(ss, network, spec, prerelu_sims, check_cancel_func=check_cancel_func, overapprox_types=overapprox_types) rv = rr.is_safe, rr.concrete_io_tuple except OverapproxCanceledException as e: if Settings.PRINT_OUTPUT: print(f"Overapprox canceled ({e})") rv = False, None Timers.toc('try_quick_overapprox') return rv
def supp_point_func(vec2d): 'maximize a support function direction' Timers.tic('supp_point_func') # use negative to maximize lpdir = -vec2d[0] * xdim + -vec2d[1] * ydim res = self.minimize_vec(lpdir) Timers.toc('supp_point_func') # project onto x and y resx = np.dot(xdim, res) resy = np.dot(ydim, res) return np.array([resx, resy], dtype=float)
def make_init_ss(init, network, spec, start_time): 'make the initial star state' network_inputs = network.get_num_inputs() network_outputs = network.get_num_outputs() if spec is not None: assert network_outputs == spec.get_num_expected_variables(), \ f"spec expected {spec.get_num_expected_variables()} outputs; network had {network_outputs} outputs" if isinstance(init, (list, tuple, np.ndarray)): init_box = init assert len( init_box ) == network_inputs, f"expected {network_inputs} dim init box, got {len(init_box)}" ss = LpStarState(init_box, spec=spec) elif isinstance(init, LpStar): assert isinstance(init, LpStar), 'init must be box or star' assert len(init.bias) == network_inputs ss = LpStarState(spec=spec) ss.from_init_star(init) else: assert isinstance(init, LpStarState) ss = init ss.should_try_overapprox = False # propagate the initial star up to the first split timer_name = Timers.stack[-1].name if Timers.stack else None try: # catch lp timeout Timers.tic('propagate_up_to_split') ss.propagate_up_to_split(network, start_time) Timers.toc('propagate_up_to_split') except LpCanceledException: while Timers.stack and Timers.stack[-1].name != timer_name: Timers.toc(Timers.stack[-1].name) ss = None return ss
def contract_lp(self, star): '''do lp zonotope contraction returns True if domain was tightened ''' Timers.tic("contract_lp") cur_box = self.init_bounds new_bounds_list = star.input_box_bounds(cur_box, count_lps=True) for dim, lb, ub in new_bounds_list: self.update_init_bounds(dim, (lb, ub)) Timers.toc("contract_lp") return new_bounds_list
def exists_idle_worker(self): 'do idle workers (with no work) exist?' Timers.tic('exists_idle_worker') rv = False # checking qsize here slows things down for i, size in enumerate(self.shared.heap_sizes): if i != self.priv.worker_index: if size == 0: rv = True break Timers.toc('exists_idle_worker') return rv
def contract_from_violation(self, violation_stars): '''contract from a list of violation stars returns True if contracted ''' Timers.tic('contract_from_violation') max_dim = self.star.a_mat.shape[1] #self_box = self.star.input_box_bounds(None) zono_box = self.prefilter.zono.init_bounds # print(f"prefilter box bounds: {}") print(f"\nnum violation stars: {len(violation_stars)}") vio_box = [[np.inf, -np.inf] for _ in range(max_dim)] for star in violation_stars: single_vio_box = star.input_box_bounds(None, max_dim=max_dim) for dim, lb, ub in single_vio_box: vio_box[dim][0] = min(vio_box[dim][0], lb) vio_box[dim][1] = max(vio_box[dim][1], ub) tol = 1e-7 contracted = False for i, (lb, ub) in enumerate(vio_box): if lb > zono_box[i][0] + tol or ub < zono_box[i][1] - tol: print(f"contracting {i} from {zono_box[i]} to {lb, ub}") self.prefilter.zono.update_init_bounds(i, (lb, ub)) self.star.lpi.set_col_bounds(i, lb, ub) contracted = True Timers.toc('contract_from_violation') if contracted: self.prefilter.domain_shrank(self.star) return contracted
def __init__(self, other_lpi=None): 'initialize the lp instance' self.lp = glpk.glp_create_prob() # pylint: disable=invalid-name if other_lpi is None: # internal bookkeeping self.names = [] # column names # setup lp params else: # initialize from other lpi self.names = other_lpi.names.copy() Timers.tic('glp_copy_prob') glpk.glp_copy_prob(self.lp, other_lpi.lp, glpk.GLP_OFF) Timers.toc('glp_copy_prob') self.freeze_attrs()