def SimplifiedQbsolv(max_iter=10, max_time=None, convergence=3, energy_threshold=None, max_subproblem_size=30): """Races a Tabu solver and a QPU-based sampler of flip-energy-impact induced subproblems. For arguments description see: :class:`~hybrid.reference.kerberos.Kerberos`. """ energy_reached = None if energy_threshold is not None: energy_reached = lambda en: en <= energy_threshold workflow = hybrid.Loop(hybrid.Race( hybrid.InterruptableTabuSampler(), hybrid.EnergyImpactDecomposer( size=max_subproblem_size, rolling=True, rolling_history=0.15) | hybrid.QPUSubproblemAutoEmbeddingSampler() | hybrid.SplatComposer()) | hybrid.ArgMin() | hybrid.TrackMin(output=True), max_iter=max_iter, max_time=max_time, convergence=convergence, terminate=energy_reached) return workflow
def HybridizedPopulationAnnealing(num_reads=20, num_iter=20, num_sweeps=1000): """Workflow generator for population annealing initialized with QPU samples. Args: num_reads (int): Size of the population of samples. num_iter (int): Number of temperatures over which we iterate fixed-temperature sampling / resampling. num_sweeps (int): Number of sweeps in the fixed temperature sampling step. Returns: Workflow (:class:`~hybrid.core.Runnable` instance). """ # QPU initial sampling: limits the PA workflow to QPU-sized problems qpu_init = (hybrid.IdentityDecomposer() | hybrid.QPUSubproblemAutoEmbeddingSampler(num_reads=num_reads) | hybrid.IdentityComposer()) | hybrid.AggregatedSamples(False) # PA workflow: after initial QPU sampling and initial beta schedule estimation, # we do `num_iter` steps (one per beta/temperature) of fixed-temperature # sampling / weighted resampling workflow = qpu_init | CalculateAnnealingBetaSchedule( length=num_iter) | hybrid.Loop( ProgressBetaAlongSchedule() | hybrid.FixedTemperatureSampler(num_sweeps=num_sweeps) | EnergyWeightedResampler(), max_iter=num_iter) return workflow
def PopulationAnnealing(num_reads=20, num_iter=20, num_sweeps=1000): """Population annealing workflow generator. Args: num_reads (int): Size of the population of samples. num_iter (int): Number of temperatures over which we iterate fixed-temperature sampling / resampling. num_sweeps (int): Number of sweeps in the fixed temperature sampling step. Returns: Workflow (:class:`~hybrid.core.Runnable` instance). """ # PA workflow: after initial beta schedule estimation, we do `num_iter` steps # (one per beta/temperature) of fixed-temperature sampling / weighted resampling workflow = CalculateAnnealingBetaSchedule(length=num_iter) | hybrid.Loop( ProgressBetaAlongSchedule() | hybrid.FixedTemperatureSampler(num_sweeps=num_sweeps, num_reads=num_reads) | EnergyWeightedResampler(), max_iter=num_iter) return workflow
def ParallelTempering(num_sweeps=10000, num_replicas=10, max_iter=None, max_time=None, convergence=3): """Parallel tempering workflow generator. Args: num_sweeps (int, optional): Number of sweeps in the fixed temperature sampling. num_replicas (int, optional): Number of replicas (parallel states / workflow branches). max_iter (int/None, optional): Maximum number of iterations of the update/swaps loop. max_time (int/None, optional): Maximum wall clock runtime (in seconds) allowed in the update/swaps loop. convergence (int/None, optional): Number of times best energy of the coldest replica has to repeat before we terminate. Returns: Workflow (:class:`~hybrid.core.Runnable` instance). """ # expand single input state into `num_replicas` replica states preprocess = SpawnParallelTemperingReplicas(num_replicas=num_replicas) # fixed temperature sampling on all replicas in parallel update = hybrid.Map(FixedTemperatureSampler(num_sweeps=num_sweeps)) # replica exchange step: do the top-down sweep over adjacent pairs # (good hot samples sink to bottom) swap = SwapReplicasDownsweep() # loop termination key function def key(states): if states is not None: return states[-1].samples.first.energy # replicas update/swap until Loop termination criteria reached loop = hybrid.Loop(update | swap, max_iter=max_iter, max_time=max_time, convergence=convergence, key=key) # collapse all replicas (although the bottom one should be the best) postprocess = hybrid.MergeSamples(aggregate=True) workflow = preprocess | loop | postprocess return workflow
def hybrid_solver(): workflow = hybrid.Loop(hybrid.RacingBranches( hybrid.InterruptableTabuSampler(), hybrid.EnergyImpactDecomposer( size=30, rolling=True, rolling_history=0.75) | hybrid.QPUSubproblemAutoEmbeddingSampler() | hybrid.SplatComposer()) | hybrid.ArgMin(), convergence=1) return hybrid.HybridSampler(workflow)
def test_hybrid(self): import dimod import hybrid bqm = dimod.BinaryQuadraticModel({}, {'ab': 1, 'bc': -1, 'ca': 1}, 0, dimod.SPIN) workflow = hybrid.Loop(hybrid.Race( hybrid.InterruptableTabuSampler(), hybrid.EnergyImpactDecomposer(size=2) | hybrid.SimulatedAnnealingSubproblemSampler() | hybrid.SplatComposer() ) | hybrid.ArgMin(), convergence=3) result = workflow.run(hybrid.State.from_problem(bqm)).result() self.assertEqual(result.samples.first.energy, -3.0)
def HybridizedPopulationAnnealing(num_reads=100, num_iter=100, num_sweeps=100, beta_range=None): """Workflow generator for population annealing initialized with QPU samples. Args: num_reads (int): Size of the population of samples. num_iter (int): Number of temperatures over which we iterate fixed-temperature sampling / resampling. num_sweeps (int): Number of sweeps in the fixed temperature sampling step. beta_range (tuple[float], optional): A 2-tuple defining the beginning and end of the beta schedule, where beta is the inverse temperature. Passed to :class:`.CalculateAnnealingBetaSchedule` for linear schedule generation. Returns: Workflow (:class:`~hybrid.core.Runnable` instance). """ # QPU initial sampling: limits the PA workflow to QPU-sized problems qpu_init = (hybrid.IdentityDecomposer() | hybrid.QPUSubproblemAutoEmbeddingSampler(num_reads=num_reads) | hybrid.IdentityComposer()) | hybrid.AggregatedSamples(False) # PA workflow: after initial QPU sampling and initial beta schedule estimation, # we do `num_iter` steps (one per beta/temperature) of fixed-temperature # sampling / weighted resampling schedule_init = CalculateAnnealingBetaSchedule(length=num_iter, beta_range=beta_range, interpolation='linear') workflow = qpu_init | schedule_init | hybrid.Loop( ProgressBetaAlongSchedule() | hybrid.FixedTemperatureSampler(num_sweeps=num_sweeps) | EnergyWeightedResampler(), max_iter=num_iter) return workflow
def PopulationAnnealing(num_reads=100, num_iter=100, num_sweeps=100, beta_range=None): """Population annealing workflow generator. Args: num_reads (int): Size of the population of samples. num_iter (int): Number of temperatures over which we iterate fixed-temperature sampling / resampling. num_sweeps (int): Number of sweeps in the fixed temperature sampling step. beta_range (tuple[float], optional): A 2-tuple defining the beginning and end of the beta schedule, where beta is the inverse temperature. Passed to :class:`.CalculateAnnealingBetaSchedule` for linear schedule generation. Returns: Workflow (:class:`~hybrid.core.Runnable` instance). """ # PA workflow: after initial beta schedule estimation, we do `num_iter` steps # (one per beta/temperature) of fixed-temperature sampling / weighted resampling schedule_init = CalculateAnnealingBetaSchedule(length=num_iter, beta_range=beta_range, interpolation='linear') workflow = schedule_init | hybrid.Loop( ProgressBetaAlongSchedule() | hybrid.FixedTemperatureSampler(num_sweeps=num_sweeps, num_reads=num_reads) | EnergyWeightedResampler(), max_iter=num_iter) return workflow
problem = sys.argv[1] with open(problem) as fp: bqm = dimod.BinaryQuadraticModel.from_coo(fp) # construct a Dialectic Search workflow generate_antithesis = (hybrid.IdentityDecomposer() | hybrid.RandomSubproblemSampler() | hybrid.SplatComposer() | hybrid.TabuProblemSampler()) generate_synthesis = (hybrid.GreedyPathMerge() | hybrid.TabuProblemSampler()) tracker = hybrid.TrackMin() local_update = hybrid.LoopWhileNoImprovement( hybrid.Parallel(hybrid.Identity(), generate_antithesis) | generate_synthesis | tracker, max_tries=10) global_update = hybrid.Loop(generate_antithesis | local_update, max_iter=10) # run the workflow init_state = hybrid.State.from_sample(hybrid.min_sample(bqm), bqm) final_state = global_update.run(init_state).result() # show execution profile hybrid.profiling.print_counters(global_update) # show results print("Solution: sample={.samples.first}".format(tracker.best))
# hybrid.InterruptableTabuSampler(), # hybrid.EnergyImpactDecomposer(size=2) # | hybrid.QPUSubproblemAutoEmbeddingSampler() # | hybrid.SplatComposer() # ) | hybrid.ArgMin() #iteration = hybrid.RacingBranches( iteration = hybrid.Race( hybrid.InterruptableTabuSampler(), #hybrid.SimulatedAnnealingProblemSampler(), subproblem | subsampler) | hybrid.ArgMin() # iteration = hybrid.Race( # hybrid.SimulatedAnnealingProblemSampler(), # subproblem | subsampler # ) | hybrid.ArgMin() #workflow = hybrid.LoopUntilNoImprovement(iteration, convergence=3) #workflow = hybrid.Loop(iteration, max_iter=5, convergence=3) workflow = hybrid.Loop(iteration, max_iter=1) start_t = perf_counter() # Solve the problem init_state = hybrid.State.from_problem(bqm) final_state = workflow.run(init_state).result() elapsed_t = perf_counter() - start_t # Print results print("Solution: sample={.samples.first}".format(final_state)) print("Elapsed time: {}".format(elapsed_t)) hybrid.profiling.print_counters(workflow)
n_replicas = 10 n_random_swaps = n_replicas - 1 n_iterations = 10 # states are randomly initialized state = hybrid.State.from_problem(bqm) # get a reasonable beta range beta_hot, beta_cold = neal.default_beta_range(bqm) # generate betas for all branches/replicas betas = np.geomspace(beta_hot, beta_cold, n_replicas) # create n_replicas with geometric distribution of betas (inverse temperature) replicas = hybrid.States(*[state.updated(beta=b) for b in betas]) # run replicas update/swap for n_iterations # (after each update/sampling step, do n_replicas-1 swap operations) update = hybrid.Map(FixedTemperatureSampler(num_sweeps=n_sweeps)) swap = hybrid.Loop(SwapReplicas(), max_iter=n_random_swaps) workflow = hybrid.Loop(update | swap, max_iter=n_iterations) \ | hybrid.MergeSamples(aggregate=True) solution = workflow.run(replicas).result() # show execution profile hybrid.profiling.print_counters(workflow) # show results print("Solution: sample={0.samples.first}, energy={0.samples.first.energy}".format(solution))
H = 0 for i in range(len(arr)): if (i % 10 == 0): #high impact variables H += 1000 * arr[i] * Spin(f'x_{i}') continue H += arr[i] * Spin(f'x_{i}') H *= H print("Compiling...") model = H.compile() bqm = model.to_bqm() print("OK") import hybrid hybrid.logger.setLevel(hybrid.logging.DEBUG) workflow = hybrid.Loop(hybrid.RacingBranches( hybrid.InterruptableSimulatedAnnealingProblemSampler(), hybrid.EnergyImpactDecomposer(size=10, rolling=True, rolling_history=.3) | hybrid.QPUSubproblemAutoEmbeddingSampler(num_reads=2) | hybrid.SplatComposer()) | hybrid.ArgMin(), convergence=3) # not our workflow result = hybrid.KerberosSampler().sample(bqm) hybrid.Unwind(workflow) print("Solution: sample={}".format(result.first)) #hybrid.print_structure(workflow) print("-----------------------") #hybrid.print_counters(workflow)
def sample(self, bqm, init_sample=None, max_iter=100, convergence=3, num_reads=1, sa_reads=1, sa_sweeps=10000, tabu_timeout=500, qpu_reads=100, qpu_sampler=None, qpu_params=None, max_subproblem_size=50, energy_threshold=None): """Run Tabu search, Simulated annealing and QPU subproblem sampling (for high energy impact problem variables) in parallel and return the best samples. Args: bqm (:obj:`~dimod.BinaryQuadraticModel`): Binary quadratic model to be sampled from. init_sample (:class:`~dimod.SampleSet`, callable, ``None``): Initial sample set (or sample generator) used for each "read". Use a random sample for each read by default. max_iter (int): Number of iterations in the hybrid algorithm. convergence (int): Number of iterations with no improvement that terminates sampling. num_reads (int): Number of reads. Each sample is the result of a single run of the hybrid algorithm. sa_reads (int): Number of reads in the simulated annealing branch. sa_sweeps (int): Number of sweeps in the simulated annealing branch. tabu_timeout (int): Timeout for non-interruptable operation of tabu search (time in milliseconds). qpu_reads (int): Number of reads in the QPU branch. qpu_sampler (:class:`dimod.Sampler`, optional, default=DWaveSampler()): Quantum sampler such as a D-Wave system. qpu_params (dict): Dictionary of keyword arguments with values that will be used on every call of the QPU sampler. max_subproblem_size (int): Maximum size of the subproblem selected in the QPU branch. energy_threshold (float, optional): Terminate when this energy threshold is surpassed. Check is performed at the end of each iteration. Returns: :obj:`~dimod.SampleSet`: A `dimod` :obj:`.~dimod.SampleSet` object. """ if callable(init_sample): init_state_gen = lambda: hybrid.State.from_sample( init_sample(), bqm) elif init_sample is None: init_state_gen = lambda: hybrid.State.from_sample( hybrid.random_sample(bqm), bqm) elif isinstance(init_sample, dimod.SampleSet): init_state_gen = lambda: hybrid.State.from_sample(init_sample, bqm) else: raise TypeError( "'init_sample' should be a SampleSet or a SampleSet generator") subproblem_size = min(len(bqm), max_subproblem_size) energy_reached = None if energy_threshold is not None: energy_reached = lambda en: en <= energy_threshold iteration = hybrid.Race( hybrid.Identity(), hybrid.InterruptableTabuSampler(timeout=tabu_timeout), hybrid.InterruptableSimulatedAnnealingProblemSampler( num_reads=sa_reads, num_sweeps=sa_sweeps), hybrid.EnergyImpactDecomposer(size=subproblem_size, rolling=True, rolling_history=0.3, traversal='bfs') | hybrid.QPUSubproblemAutoEmbeddingSampler(num_reads=qpu_reads, qpu_sampler=qpu_sampler, qpu_params=qpu_params) | hybrid.SplatComposer(), ) | hybrid.ArgMin() self.runnable = hybrid.Loop(iteration, max_iter=max_iter, convergence=convergence, terminate=energy_reached) samples = [] energies = [] for _ in range(num_reads): init_state = init_state_gen() final_state = self.runnable.run(init_state) # the best sample from each run is one "read" ss = final_state.result().samples ss.change_vartype(bqm.vartype, inplace=True) samples.append(ss.first.sample) energies.append(ss.first.energy) return dimod.SampleSet.from_samples(samples, vartype=bqm.vartype, energy=energies)
n_random_swaps = n_replicas - 1 n_iterations = 10 # replicas are initialized with random samples state = hybrid.State.from_problem(bqm) replicas = hybrid.States(*[state.updated() for _ in range(n_replicas)]) # get a reasonable beta range beta_hot, beta_cold = neal.default_beta_range(bqm) # generate betas for all branches/replicas betas = np.geomspace(beta_hot, beta_cold, n_replicas) # run replicas update/swap for n_iterations # (after each update/sampling step, do n_replicas-1 random adjacent pair swaps) update = hybrid.Branches(*[ FixedTemperatureSampler(beta=beta, num_sweeps=n_sweeps) for beta in betas ]) swap = hybrid.Loop(SwapReplicaPairRandom(betas=betas), max_iter=n_random_swaps) workflow = hybrid.Loop(update | swap, max_iter=n_iterations) \ | hybrid.MergeSamples(aggregate=True) solution = workflow.run(replicas).result() # show execution profile hybrid.profiling.print_counters(workflow) # show results print("Solution: sample={0.samples.first}, energy={0.samples.first.energy}". format(solution))
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys import dimod import hybrid # load a problem problem = sys.argv[1] with open(problem) as fp: bqm = dimod.BinaryQuadraticModel.from_coo(fp) # run Tabu in parallel with QPU, but post-process QPU samples with very short Tabu iteration = hybrid.Race( hybrid.InterruptableTabuSampler(), hybrid.EnergyImpactDecomposer(size=50) | hybrid.QPUSubproblemAutoEmbeddingSampler(num_reads=100) | hybrid.SplatComposer() | hybrid.TabuProblemSampler(timeout=1)) | hybrid.ArgMin() main = hybrid.Loop(iteration, max_iter=10, convergence=3) # run the workflow init_state = hybrid.State.from_sample(hybrid.utils.min_sample(bqm), bqm) solution = main.run(init_state).result() # show results print("Solution: sample={.samples.first}".format(solution))
# https://docs.ocean.dwavesys.com/en/latest/examples/hybrid1.html#hybrid1 import dimod import networkx as nx import random graph = nx.barabasi_albert_graph(100, 3, seed=1) h = {v: 0.0 for v in graph.nodes} J = {edge: random.choice([-1, 1]) for edge in graph.edges} bqm = dimod.BQM(h, J, offset=0, vartype=dimod.SPIN) import hybrid hybrid.logger.setLevel(hybrid.logging.DEBUG) workflow = hybrid.Loop(hybrid.RacingBranches( hybrid.InterruptableTabuSampler(max_time=0), hybrid.EnergyImpactDecomposer(size=100, rolling=True, rolling_history=0.75) | hybrid.QPUSubproblemAutoEmbeddingSampler(num_reads=1) | hybrid.SplatComposer()) | hybrid.ArgMin(), convergence=3) result = hybrid.HybridSampler(workflow).sample(bqm) hybrid.Unwind(workflow) print("Solution: sample={}".format(result.first)) hybrid.print_structure(workflow) print("-----------------------") hybrid.print_counters(workflow)
def Kerberos(max_iter=100, max_time=None, convergence=3, energy_threshold=None, sa_reads=1, sa_sweeps=10000, tabu_timeout=500, qpu_reads=100, qpu_sampler=None, qpu_params=None, max_subproblem_size=50): """An opinionated hybrid asynchronous decomposition sampler for problems of arbitrary structure and size. Runs Tabu search, Simulated annealing and QPU subproblem sampling (for high energy impact problem variables) in parallel and returns the best samples. Kerberos workflow is used by :class:`KerberosSampler`. Termination Criteria Args: max_iter (int): Number of iterations in the hybrid algorithm. max_time (float/None, optional, default=None): Wall clock runtime termination criterion. Unlimited by default. convergence (int): Number of iterations with no improvement that terminates sampling. energy_threshold (float, optional): Terminate when this energy threshold is surpassed. Check is performed at the end of each iteration. Simulated Annealing Parameters: sa_reads (int): Number of reads in the simulated annealing branch. sa_sweeps (int): Number of sweeps in the simulated annealing branch. Tabu Search Parameters: tabu_timeout (int): Timeout for non-interruptable operation of tabu search (time in milliseconds). QPU Sampling Parameters: qpu_reads (int): Number of reads in the QPU branch. qpu_sampler (:class:`dimod.Sampler`, optional, default=DWaveSampler()): Quantum sampler such as a D-Wave system. qpu_params (dict): Dictionary of keyword arguments with values that will be used on every call of the QPU sampler. max_subproblem_size (int): Maximum size of the subproblem selected in the QPU branch. Returns: Workflow (:class:`~hybrid.core.Runnable` instance). """ energy_reached = None if energy_threshold is not None: energy_reached = lambda en: en <= energy_threshold iteration = hybrid.Race( hybrid.Identity(), hybrid.InterruptableTabuSampler(timeout=tabu_timeout), hybrid.InterruptableSimulatedAnnealingProblemSampler( num_reads=sa_reads, num_sweeps=sa_sweeps), hybrid.EnergyImpactDecomposer(size=max_subproblem_size, rolling=True, rolling_history=0.3, traversal='bfs') | hybrid.QPUSubproblemAutoEmbeddingSampler(num_reads=qpu_reads, qpu_sampler=qpu_sampler, qpu_params=qpu_params) | hybrid.SplatComposer()) | hybrid.ArgMin() workflow = hybrid.Loop(iteration, max_iter=max_iter, max_time=max_time, convergence=convergence, terminate=energy_reached) return workflow
print("BQM: {} nodes, {} edges, {:.2f} density".format( len(bqm), len(bqm.quadratic), hybrid.bqm_density(bqm))) # sweeps per fixed-temperature sampling step num_sweeps = 1000 # number of generations, or temperatures to progress through num_iter = 20 # population size num_samples = 20 # PA workflow: after initial beta schedule estimation, we do `num_iter` steps # (one per beta/temperature) of fixed-temperature sampling / weighted resampling workflow = CalculateAnnealingBetaSchedule(length=num_iter) | hybrid.Loop( ProgressBetaAlongSchedule() | FixedTemperatureSampler(num_sweeps=num_sweeps, num_reads=num_samples) | EnergyWeightedResampler(), max_iter=num_iter) # run the workflow state = hybrid.State.from_problem(bqm) solution = workflow.run(state).result() # show execution profile hybrid.profiling.print_counters(workflow) # show results print("Solution: sample={0.samples.first}, energy={0.samples.first.energy}". format(solution))
sorted(glob('../problems/ac3/*'))[:problems_per_group], )) workflows = [ ("10s-tabu", lambda **kw: hybrid.TabuProblemSampler(timeout=10000)), ("10k-sa", lambda **kw: (hybrid.IdentityDecomposer() | hybrid.SimulatedAnnealingSubproblemSampler(sweeps=10000) | hybrid.SplatComposer())), ("qbsolv-like", lambda qpu, energy_threshold, **kw: hybrid.Loop(hybrid.Race( hybrid.InterruptableTabuSampler(timeout=200), hybrid.EnergyImpactDecomposer( size=50, rolling=True, rolling_history=0.15) | hybrid.QPUSubproblemAutoEmbeddingSampler(qpu_sampler=qpu) | hybrid.SplatComposer()) | hybrid.ArgMin(), max_iter=100, convergence=10, terminate=None if energy_threshold is None else lambda en: en <= energy_threshold)), ("tiling-chimera", lambda qpu, energy_threshold, **kw: hybrid.Loop( hybrid.Race( hybrid.InterruptableTabuSampler(timeout=200), hybrid.TilingChimeraDecomposer(size=(16, 16, 4)) | hybrid.QPUSubproblemExternalEmbeddingSampler(qpu_sampler=qpu) | hybrid.SplatComposer(), ) | hybrid.ArgMin(), max_iter=100, convergence=10,
# number of generations, or temperatures to progress through num_iter = 20 # population size num_samples = 20 # QPU initial sampling: limits the PA workflow to QPU-sized problems qpu_init = (hybrid.IdentityDecomposer() | hybrid.QPUSubproblemAutoEmbeddingSampler(num_reads=num_samples) | hybrid.IdentityComposer()) | hybrid.AggregatedSamples(False) # PA workflow: after initial beta schedule estimation, we do `num_iter` steps # (one per beta/temperature) of fixed-temperature sampling / weighted resampling workflow = qpu_init | CalculateAnnealingBetaSchedule( length=num_iter) | hybrid.Loop( ProgressBetaAlongSchedule() | FixedTemperatureSampler( num_sweeps=num_sweeps) | EnergyWeightedResampler(), max_iter=num_iter) # run the workflow state = hybrid.State.from_problem(bqm) solution = workflow.run(state).result() # show execution profile hybrid.profiling.print_counters(workflow) # show results print("Solution: sample={0.samples.first}, energy={0.samples.first.energy}". format(solution))
n_replicas = 10 n_iterations = 10 # states are randomly initialized state = hybrid.State.from_problem(bqm) # get a reasonable beta range beta_hot, beta_cold = neal.default_beta_range(bqm) # generate betas for all branches/replicas betas = np.geomspace(beta_hot, beta_cold, n_replicas) # create n_replicas with geometric distribution of betas (inverse temperature) replicas = hybrid.States(*[state.updated(beta=b) for b in betas]) # run replicas update/swap for n_iterations # (after each update/sampling step, do n_replicas-1 swap operations) update = hybrid.Map(FixedTemperatureSampler(num_sweeps=n_sweeps)) swap = SwapReplicasDownsweep() workflow = hybrid.Loop(update | swap, max_iter=n_iterations) \ | hybrid.MergeSamples(aggregate=True) solution = workflow.run(replicas).result() # show execution profile hybrid.profiling.print_counters(workflow) # show results print("Solution: sample={0.samples.first}, energy={0.samples.first.energy}". format(solution))
# distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import print_function import sys import dimod import hybrid # load a problem problem = sys.argv[1] with open(problem) as fp: bqm = dimod.BinaryQuadraticModel.from_coo(fp) # define the workflow workflow = hybrid.Loop(hybrid.RacingBranches( hybrid.Identity(), hybrid.InterruptableTabuSampler(), hybrid.EnergyImpactDecomposer(size=50, rolling=True, traversal='bfs') | hybrid.QPUSubproblemAutoEmbeddingSampler() | hybrid.SplatComposer()) | hybrid.ArgMin(), convergence=3) # create a dimod sampler that runs the workflow and sample result = hybrid.HybridSampler(workflow).sample(bqm) # show results print("Solution: sample={.first}".format(result))
def solve(self): self.n_bins_truth = self._data.x.shape[0] self.n_bins_reco = self._data.d.shape[0] if not self._data.R.shape[1] == self.n_bins_truth: raise Exception( "Number of bins at truth level do not match between 1D spectrum (%i) and response matrix (%i)" % (self.n_bins_truth, self._data.R.shape[1])) if not self._data.R.shape[0] == self.n_bins_reco: raise Exception( "Number of bins at reco level do not match between 1D spectrum (%i) and response matrix (%i)" % (self.n_bins_reco, self._data.R.shape[0])) self.convert_to_binary() print("INFO: N bins:", self._data.x.shape[0]) print("INFO: n-bits encoding:", self.rho) print("INFO: Signal truth-level x:") print(self._data.x) print("INFO: pseudo-data b:") print(self._data.d) print("INFO: Response matrix:") print(self._data.R) self.Q = self.make_qubo_matrix() self._bqm = dimod.BinaryQuadraticModel.from_numpy_matrix(self.Q) print("INFO: solving the QUBO model (size=%i)..." % len(self._bqm)) if self.backend in [Backends.cpu]: print("INFO: running on CPU...") self._results = dimod.ExactSolver().sample(self._bqm) self._status = StatusCode.success elif self.backend in [Backends.sim]: num_reads = self.solver_parameters['num_reads'] print("INFO: running on simulated annealer (neal), num_reads=", num_reads) sampler = neal.SimulatedAnnealingSampler() self._results = sampler.sample(self._bqm, num_reads=num_reads).aggregate() self._status = StatusCode.success elif self.backend in [ Backends.qpu, Backends.qpu_hinoise, Backends.qpu_lonoise, Backends.hyb, Backends.qsolv ]: print("INFO: running on QPU") config_file = self.get_config_file() self._hardware_sampler = DWaveSampler(config_file=config_file) print("INFO: QPU configuration file:", config_file) print("INFO: finding optimal minor embedding...") n_bits_avg = np.mean(self._encoder.rho) thr = 4. / float(self.n_bins_truth) n_tries = 5 if n_bits_avg < thr else 10 J = qubo_quadratic_terms_from_np_array(self.Q) embedding = self.find_embedding(J, n_tries) print("INFO: creating DWave sampler...") sampler = FixedEmbeddingComposite(self._hardware_sampler, embedding) if self.backend in [ Backends.qpu, Backends.qpu_hinoise, Backends.qpu_lonoise ]: print("INFO: Running on QPU") params = self.solver_parameters self._results = sampler.sample(self._bqm, **params).aggregate() self._status = StatusCode.success elif self.backend in [Backends.hyb]: print("INFO: hybrid execution") import hybrid num_reads = self.solver_parameters['num_reads'] # Define the workflow # hybrid.EnergyImpactDecomposer(size=len(bqm), rolling_history=0.15) iteration = hybrid.RacingBranches( hybrid.InterruptableTabuSampler(), hybrid.EnergyImpactDecomposer(size=len(self._bqm) // 2, rolling=True) | hybrid.QPUSubproblemAutoEmbeddingSampler( num_reads=num_reads) | hybrid.SplatComposer()) | hybrid.ArgMin() #workflow = hybrid.LoopUntilNoImprovement(iteration, convergence=3) workflow = hybrid.Loop(iteration, max_iter=20, convergence=3) init_state = hybrid.State.from_problem(self._bqm) self._results = workflow.run(init_state).result().samples self._status = StatusCode.success # show execution profile print("INFO: timing:") workflow.timers hybrid.print_structure(workflow) hybrid.profiling.print_counters(workflow) elif self.backend in [Backends.qsolv]: print("INFO: using QBsolve with FixedEmbeddingComposite") self._results = QBSolv().sample_qubo(S, solver=sampler, solver_limit=5) self._status = StatusCode.success else: raise Exception("ERROR: unknown backend", self.backend) print("DEBUG: status =", self._status) return self._status