def next(self, state, **runopts): CG = self.constraint_graph size = self.size constraints = self.constraints bqm = state.problem # get a random constraint to start with n = random.choice(list(CG.nodes)) if len(constraints[n]) > size: raise NotImplementedError # starting from our constraint, do a breadth-first search adding constraints until our max # size is reached variables = set(constraints[n]) for _, ci in nx.bfs_edges(CG, n): proposed = [v for v in constraints[ci] if v not in variables] if len(proposed) + len(variables) <= size: variables.union(proposed) if len(variables) == size: # can exit early break sample = state.samples.change_vartype(bqm.vartype).first.sample subbqm = bqm_induced_by(bqm, variables, sample) return state.updated(subproblem=subbqm)
def next(self, state): CG = self.constraint_graph size = self.size constraints = self.constraints bqm = state.problem # get a random constraint to start with. # for some reason random.choice(CG.nodes) does not work, so we rely on the fact that our # graph is index-labeled n = random.choice(range(len(CG))) if len(constraints[n]) > size: raise NotImplementedError # starting from our constraint, do a breadth-first search adding constraints until our max # size is reached variables = set(constraints[n]) for _, ci in nx.bfs_edges(CG, n): proposed = [v for v in constraints[ci] if v not in variables] if len(proposed) + len(variables) <= size: variables.add(proposed) if len(variables) == size: # can exit early break sample = state.samples.change_vartype(bqm.vartype).first.sample subbqm = bqm_induced_by(bqm, variables, sample) return state.updated(subproblem=subbqm)
def next(self, state): bqm = state.problem if self.max_size > len(bqm): raise ValueError( "subproblem size cannot be greater than the problem size") # select a new subset of `max_size` variables, making sure they differ # from previous iteration by at least `min_diff` variables sample = state.samples.change_vartype(bqm.vartype).first.sample variables = select_localsearch_adversaries(bqm, sample, min_gain=self.min_gain) # TODO: soft fail strategy? skip one iteration or relax vars selection? if len(variables) < self.min_diff: raise ValueError("less than min_diff variables identified as" " contributors to min_gain energy increase") offset = 0 next_vars = set(variables[offset:offset + self.max_size]) while len(next_vars ^ self._prev_vars) < self.min_diff: offset += self.stride next_vars = set(variables[offset:offset + self.max_size]) logger.debug("Select variables: %r (diff from prev = %r)", next_vars, next_vars ^ self._prev_vars) self._prev_vars = next_vars # induce sub-bqm based on selected variables and global sample subbqm = bqm_induced_by(bqm, next_vars, sample) return state.updated(subproblem=subbqm)
def next(self, state): """Each call returns a subsequent block of size `self.size` Chimera cells.""" bqm = state.problem pos, embedding = next(self.blocks) variables = embedding.keys() sample = state.samples.change_vartype(bqm.vartype).first.sample subbqm = bqm_induced_by(bqm, variables, sample) return state.updated(subproblem=subbqm, embedding=embedding)
def next(self, state, **runopts): bqm = state.problem if self.size > len(bqm): raise ValueError("subproblem size cannot be greater than the problem size") variables = select_random_subgraph(bqm, self.size) sample = state.samples.change_vartype(bqm.vartype).first.sample subbqm = bqm_induced_by(bqm, variables, sample) return state.updated(subproblem=subbqm)
def next(self, state, **runopts): # run time options override silent_rewind = runopts.get('silent_rewind', self.silent_rewind) bqm = state.problem sample = state.samples.change_vartype(bqm.vartype).first.sample size = self.size if self.size > len(bqm): logger.debug( "subproblem size greater than the problem size, adapting to problem size" ) size = len(bqm) bqm_changed = bqm != self._rolling_bqm sample_changed = sample != self._prev_sample if bqm_changed: self._rewind_rolling(state) if sample_changed: self._prev_sample = sample # cache energy impact calculation per (bqm, sample) if bqm_changed or sample_changed or not self._variable_priority: impact = flip_energy_gains(bqm, sample, min_gain=self.min_gain) self._variable_priority = collections.OrderedDict( (v, en) for en, v in impact) if self.rolling: if len(self._unrolled_vars) >= self.rolling_history * len(bqm): logger.debug("Rolling reset at unrolled history size = %d", len(self._unrolled_vars)) self._rewind_rolling(state) # reset before exception, to be ready on a subsequent call if not silent_rewind: raise EndOfStream # pick variables for the next subproblem next_vars = self.traverse(bqm, sample, ordered_priority=self._variable_priority, visited=self._unrolled_vars, size=self.size) logger.debug("Selected %d subproblem variables: %r", len(next_vars), next_vars) if self.rolling: self._unrolled_vars.update(next_vars) # induce sub-bqm based on selected variables and global sample subbqm = bqm_induced_by(bqm, next_vars, sample) return state.updated(subproblem=subbqm)
def next(self, state, **runopts): bqm = state.problem size = self.size if size > len(bqm): logger.debug("{} subproblem size greater than the problem size, " "adapting to problem size".format(self.name)) size = len(bqm) variables = select_random_subgraph(bqm, size) sample = state.samples.change_vartype(bqm.vartype).first.sample subbqm = bqm_induced_by(bqm, variables, sample) logger.debug("{} selected {} subproblem variables: {!r}".format( self.name, len(variables), variables)) return state.updated(subproblem=subbqm)
def next(self, state): bqm = state.problem sample = state.samples.change_vartype(bqm.vartype).first.sample size = self.size if self.size > len(bqm): logger.debug( "subproblem size greater than the problem size, adapting to problem size" ) size = len(bqm) bqm_changed = bqm != self._rolling_bqm sample_changed = sample != self._prev_sample if bqm_changed: self._rewind_rolling(state) if sample_changed: self._prev_sample = sample if bqm_changed or sample_changed or not self._variables: self._variables = select_localsearch_adversaries( bqm, sample, min_gain=self.min_gain) if self.rolling: if len(self._unrolled_vars) >= self.rolling_history * len(bqm): logger.debug("Rolling reset at unrolled history size = %d", len(self._unrolled_vars)) self._rewind_rolling(state) # reset before exception, to be ready on a subsequent call if not self.silent_rewind: raise EndOfStream novel_vars = [ v for v in self._variables if v not in self._unrolled_vars ] next_vars = novel_vars[:size] logger.debug("Selected %d subproblem variables: %r", len(next_vars), next_vars) if self.rolling: self._unrolled_vars.update(next_vars) # induce sub-bqm based on selected variables and global sample subbqm = bqm_induced_by(bqm, next_vars, sample) return state.updated(subproblem=subbqm)
def next(self, state, **runopts): silent_rewind = runopts.get('silent_rewind', self.silent_rewind) bqm = state.problem if bqm.num_variables <= 1: return state.updated(subproblem=bqm) if self.rolling: if bqm != self._rolling_bqm: # This is the first time this problem was called self._rolling_bqm = bqm self._iter_components = self._get_iter_components(bqm) try: component = next(self._iter_components) except StopIteration: # We've already used every component in this problem if not silent_rewind: self._rolling_bqm = None # Reset to be ready for subsequent call raise EndOfStream # Rewind self._iter_components = self._get_iter_components(bqm) component = next(self._iter_components) else: self._iter_components = self._get_iter_components(bqm) if self.key is None: component = next(self._iter_components) else: if self.reverse: component = max(self._iter_components, key=self.key) else: component = min(self._iter_components, key=self.key) sample = state.samples.change_vartype(bqm.vartype).first.sample subbqm = bqm_induced_by(bqm, component, sample) return state.updated(subproblem=subbqm)
def next(self, state): bqm = state.problem if bqm != self._rolling_bqm: self._reset_rolling(state) if self.max_size > len(bqm): raise ValueError( "subproblem size cannot be greater than the problem size") sample = state.samples.change_vartype(bqm.vartype).first.sample variables = select_localsearch_adversaries(bqm, sample, min_gain=self.min_gain) if self.rolling and len( self._unrolled_vars ) + self.max_size > self.rolling_history * len(bqm): logger.debug("rolling reset at unrolled history size = %d", len(self._unrolled_vars)) # reset before exception, to be ready on a subsequent call self._reset_rolling(state) if not self.silent_reset: raise EndOfStream novel_vars = [v for v in variables if v not in self._unrolled_vars] next_vars = novel_vars[:self.max_size] logger.debug("Selected %d subproblem variables: %r", len(next_vars), next_vars) if self.rolling: self._unrolled_vars.update(next_vars) # induce sub-bqm based on selected variables and global sample subbqm = bqm_induced_by(bqm, next_vars, sample) return state.updated(subproblem=subbqm)
def next(self, state, **runopts): bqm = state.problem if 'geometric_offset' not in state: # Select uniformly at random amongst available geometric offsets geometric_offset = [ self.random.randint(dim) for dim in state.problem_dims ] # Do not offset excluded dimensions if 'exclude_dims' in state: for dim in state.exclude_dims: if dim < 0 or dim >= len(geometric_offset): raise ValueError('exclude_dimension state variable ' 'indexes an invalid dimension') geometric_offset[dim] = 0 else: if len(state.problem_dims) != len(state.geometric_offset): raise ValueError( 'problem_dimension and geometric_offset state ' 'variables are of incompatible length') for idx, offset in enumerate(state.geometric_offset): if not (offset < state.problem_dims[idx] and 0 <= offset): raise ValueError( 'geometric_offset state variable values are outside the ' f'lattice allowed ranges [0, problem_dimension[idx]), idx={idx}' ) geometric_offset = state.geometric_offset def key_transform(initial_coordinates): # The geometric keys are offset, with wrapping about periodic # boundary conditions. final_coordinates = list(initial_coordinates) if 'problem_dims' in state: for idx, val in enumerate(geometric_offset): final_coordinates[idx] += val final_coordinates[idx] %= state.problem_dims[idx] else: for idx, val in enumerate(geometric_offset): final_coordinates[idx] += val return tuple(final_coordinates) # For now we explicitely encode different automorphism as different # origin_embeddings, but is would be natural to allow symmetry # operations (automorphisms) with respect to some fixed embedding # and lattice class. if 'origin_embedding_index' not in state: #Select uniformly at random amongst available embeddings: origin_embedding_index = self.random.randint( len(state.origin_embeddings)) else: if (state.origin_embedding_index > len(state.origin_embeddings) or state.origin_embedding_index < -len(state.origin_embeddings)): raise ValueError( 'embedding_index state variable specifies an ' 'origin_embeddings element beyond the list range') origin_embedding_index = state.origin_embedding_index # Create the embedding: embedding = { key_transform(key): value for key, value in state.origin_embeddings[origin_embedding_index].items() } # Create the associated subproblem, conditioned on best boundary sample # values: variables = embedding.keys() sample = state.samples.change_vartype(bqm.vartype).first.sample subbqm = bqm_induced_by(bqm, variables, sample) logger.debug("{} selected {} subproblem variables: {!r}".format( self.name, len(variables), variables)) return state.updated(subproblem=subbqm, embedding=embedding)