def test_concurrent_tabu_samples(self): t1 = hybrid.TabuProblemSampler(timeout=1000) t2 = hybrid.TabuProblemSampler(timeout=2000) workflow = hybrid.Parallel(t1, t2) bqm = dimod.BinaryQuadraticModel({'a': 1}, {}, 0, 'BINARY') state = hybrid.State.from_problem(bqm) with self.assertRuntimeWithin(1900, 2500): workflow.run(state).result()
def test_sa_concurrency(self): params = dict(num_reads=1, num_sweeps=1000000) # serial and parallel SA runs s = (hybrid.SimulatedAnnealingProblemSampler(**params) | hybrid.SimulatedAnnealingProblemSampler(**params)) p = hybrid.Parallel(hybrid.SimulatedAnnealingProblemSampler(**params), hybrid.SimulatedAnnealingProblemSampler(**params)) bqm = dimod.generators.uniform(graph=1, vartype=dimod.SPIN) state = hybrid.State.from_problem(bqm) # average wall clock workflow runtime over `repeat` runs def time_workflow(workflow, state, repeat=10): with hybrid.tictoc() as timer: for _ in range(repeat): workflow.run(state).result() return timer.dt / repeat # measure speed-up of parallel SA runs over sequential # NOTE: relatively weak lower bound on speedup was chosen so we don't # fail on the unreliable/inconsistent CI VMs, but to verify some level # of concurrency does exist if os.name == 'nt': # appveyor sucks minimally_acceptable_speedup = 1.0 else: minimally_acceptable_speedup = 1.5 # NOTE: on average, the observed speed-up is between 1.5x and 2x, but # it's highly dependant on the system load and availability of threads. # That's why we do multiple runs, and bail out on the first good speedup speedups = [] best_speedup = 0 for run in range(250): # alternatively, run for up to X sec t_s = time_workflow(s, state) t_p = time_workflow(p, state) speedup = t_s / t_p speedups.append(speedup) best_speedup = max(best_speedup, speedup) if best_speedup > minimally_acceptable_speedup: break info = "best speed-up of {} achieved within {} runs: {!r}".format( best_speedup, run + 1, speedups) self.assertGreaterEqual(best_speedup, minimally_acceptable_speedup, info)
def test_concurrent_sa_samples(self): s1 = hybrid.SimulatedAnnealingProblemSampler(num_reads=1000, sweeps=10000) s2 = hybrid.SimulatedAnnealingProblemSampler(num_reads=1000, sweeps=10000) p = hybrid.Parallel(s1, s2) bqm = dimod.BinaryQuadraticModel({'a': 1}, {}, 0, 'BINARY') state = hybrid.State.from_problem(bqm) def time_runnable(runnable, init): runnable.run(init).result() return sum(runnable.timers['dispatch.next']) t_s1 = time_runnable(s1, state) t_s2 = time_runnable(s2, state) t_p = time_runnable(p, state) # parallel execution must not be slower than the longest running branch + 75% # NOTE: the extremely weak upper bound was chosen so we don't fail on the # unreliable/inconsistent CI VMs, and yet to show some concurrency does exist t_expected_max = max(t_s1, t_s2) * 1.75 self.assertLess(t_p, t_expected_max)
problem = sys.argv[1] with open(problem) as fp: bqm = dimod.BinaryQuadraticModel.from_coo(fp) # construct a Dialectic Search workflow generate_antithesis = (hybrid.IdentityDecomposer() | hybrid.RandomSubproblemSampler() | hybrid.SplatComposer() | hybrid.TabuProblemSampler()) generate_synthesis = (hybrid.GreedyPathMerge() | hybrid.TabuProblemSampler()) tracker = hybrid.TrackMin() local_update = hybrid.LoopWhileNoImprovement( hybrid.Parallel(hybrid.Identity(), generate_antithesis) | generate_synthesis | tracker, max_tries=10) global_update = hybrid.Loop(generate_antithesis | local_update, max_iter=10) # run the workflow init_state = hybrid.State.from_sample(hybrid.min_sample(bqm), bqm) final_state = global_update.run(init_state).result() # show execution profile hybrid.profiling.print_counters(global_update) # show results print("Solution: sample={.samples.first}".format(tracker.best))
def merge_substates(_, substates): a, b = substates return a.updated( subsamples=hybrid.hstack_samplesets(a.subsamples, b.subsamples)) subproblems = hybrid.Unwind( hybrid.EnergyImpactDecomposer(size=50, rolling_history=0.15)) qpu = hybrid.Map(hybrid.QPUSubproblemAutoEmbeddingSampler()) | hybrid.Reduce( hybrid.Lambda(merge_substates)) | hybrid.SplatComposer() random = hybrid.Map(hybrid.RandomSubproblemSampler()) | hybrid.Reduce( hybrid.Lambda(merge_substates)) | hybrid.SplatComposer() subsampler = hybrid.Parallel(qpu, random, endomorphic=False) | hybrid.ArgMin() iteration = hybrid.Race(hybrid.InterruptableTabuSampler(), subproblems | subsampler) | hybrid.ArgMin() main = hybrid.Loop(iteration, max_iter=10, convergence=3) # run the workflow init_state = hybrid.State.from_sample(hybrid.min_sample(bqm), bqm) solution = main.run(init_state).result() # show execution profile hybrid.profiling.print_counters(main) # show results print("Solution: sample={.samples.first}".format(solution))
def merge_substates(_, substates): a, b = substates return a.updated( subsamples=hybrid.hstack_samplesets(a.subsamples, b.subsamples)) subproblems = hybrid.Unwind( hybrid.EnergyImpactDecomposer(size=50, rolling_history=0.15)) qpu = hybrid.Map(hybrid.QPUSubproblemAutoEmbeddingSampler()) | hybrid.Reduce( hybrid.Lambda(merge_substates)) | hybrid.SplatComposer() random = hybrid.Map(hybrid.RandomSubproblemSampler()) | hybrid.Reduce( hybrid.Lambda(merge_substates)) | hybrid.SplatComposer() subsampler = hybrid.Parallel(qpu, random) | hybrid.ArgMin() iteration = hybrid.Race( hybrid.InterruptableTabuSampler(), subproblems | subsampler) | hybrid.ArgMin() | hybrid.TrackMin(output=True) main = hybrid.Loop(iteration, max_iter=10, convergence=3) # run the workflow init_state = hybrid.State.from_sample(hybrid.min_sample(bqm), bqm) solution = main.run(init_state).result() # show execution profile hybrid.profiling.print_counters(main) # show results