Ejemplo n.º 1
0
    def test_get_jss_bqm(self):
        jobs = {
            "sandwich": [("bread", 1), ("roast_beef", 1)],
            "french_toast": [("egg", 1), ("bread", 1)]
        }
        max_time = 3

        bqm = get_jss_bqm(jobs, max_time)
        self.assertIsInstance(bqm, BinaryQuadraticModel)
Ejemplo n.º 2
0
def num_of_errors_in_chain_strengths(qpu=False):
    jobs = {
        "1": [(0, 2), (1, 1), (2, 1)],
        "2": [(1, 1), (0, 1), (3, 2)],
        "3": [(2, 1), (3, 1), (1, 1)]
    }

    strengths = (0.5, 1, 1.5, 1.8, 2.0, 2.1, 2.3, 2.5, 3.0, 3.5, 4.0)
    errors = defaultdict(list)
    for strength in strengths:
        for i in range(12):
            print("tick " + str(strength) + " " + str(i))
            try:
                bqm = get_jss_bqm(jobs,
                                  8,
                                  stitch_kwargs={'min_classical_gap': 2.0})
                if qpu:
                    sampler = EmbeddingComposite(
                        DWaveSampler(solver={'qpu': True}))
                    sampleset = sampler.sample(bqm,
                                               chain_strength=strength,
                                               num_reads=1000)
                else:
                    sampler = neal.SimulatedAnnealingSampler()
                    sampleset = sampler.sample(bqm, num_reads=1000)
                sol_dict = printResults(sampleset, jobs)
                errors[strength].append(sol_dict['error'])
            except Exception as e:
                print(f"error: {strength}")
                print(e)
                continue
    medians = []
    margins = []
    for key, values in errors.items():
        values.sort()
        values = values[1:-1]
        medians.append(median(values))
        margins.append([
            abs(values[0] - median(values)),
            abs(values[-1] - median(values))
        ])
    plt.errorbar(errors.keys(),
                 medians,
                 yerr=np.array(margins).T,
                 fmt='o-',
                 color='blue')
    plt.xlabel('chain strength')
    plt.ylabel('number of error solutions provided (out of 1000)')
    # plt.show()
    plt.savefig('chain_strength.png')
    print(errors)
Ejemplo n.º 3
0
def num_of_errors_in_min_gap(qpu=False, start=1.0):
    jobs = {"1": [(0, 2), (1, 1), (2, 1)],
            "2": [(1, 1), (2, 2), (0, 1)],
            "3": [(2, 2), (0, 1), (1, 2)]}

    # best_solution = { "1": [0,2,4],
    #                   "2": [0,2,4],
    #                   "3": [0,2,3]}
    #  result: 5

    import csv
    # wyniki.csv structure:
    # min_classical_gap, not found, incorrect, num_of_reads, 5, 6, 7, 8, 9, more

    with open("wyniki_min_gap.csv", mode='a') as csvfile:
        filewriter = csv.writer(csvfile, delimiter=',',
                                quotechar='|', quoting=csv.QUOTE_MINIMAL)

        # strengths = (25, 30, 35, 40, 45)
        # strengths = list(range(20, 25))
        from numpy import arange
        gaps = list(arange(start, start+.5, 0.1))
        num_reads = 1000
        for gap in gaps:
            for _ in range(10):
                try:
                    bqm = get_jss_bqm(jobs, 8, stitch_kwargs={
                        'min_classical_gap': gap})
                    if qpu:
                        sampler = EmbeddingComposite(
                            DWaveSampler(solver={'qpu': True}))
                        sampleset = sampler.sample(
                            bqm, chain_strength=10.0, num_reads=num_reads)
                    else:
                        sampler = neal.SimulatedAnnealingSampler()
                        sampleset = sampler.sample(bqm, num_reads=num_reads)
                    sol_dict = printResults(sampleset, jobs)
                except Exception as e:
                    print(f"error: {gap}")
                    print(e)
                    from time import sleep
                    sleep(60)
                    continue
                result_row = [gap, sol_dict['error'], sol_dict['incorrect'],
                               num_reads] + [sol_dict[i] for i in range(5, 10)]
                filewriter.writerow(result_row)
                print('zapisane', gap)

        from time import sleep
        sleep(30)
Ejemplo n.º 4
0
def num_of_errors_in_times(qpu=False):
    jobs = {
        "1": [(0, 2), (1, 1), (0, 1)],
        "2": [(1, 1), (0, 1), (2, 2)],
        "3": [(2, 1), (2, 1), (1, 1)]
    }

    times = range(4, 12)
    errors = defaultdict(list)
    for time in times:
        for i in range(12):
            try:
                bqm = get_jss_bqm(jobs,
                                  time,
                                  stitch_kwargs={'min_classical_gap': 2.0})
                if qpu:
                    sampler = EmbeddingComposite(
                        DWaveSampler(solver={'qpu': True}))
                    sampleset = sampler.sample(bqm,
                                               chain_strength=2,
                                               num_reads=1000)
                else:
                    sampler = neal.SimulatedAnnealingSampler()
                    sampleset = sampler.sample(bqm, num_reads=1000)
                sol_dict = printResults(sampleset, jobs)
                errors[time].append(sol_dict['error'])
            except:
                print(f"error: {time}")
                continue
    medians = []
    margins = []
    for key, values in errors.items():
        values.sort()
        values = values[1:-1]
        medians.append(median(values))
        margins.append([
            abs(values[0] - median(values)),
            abs(values[-1] - median(values))
        ])
    plt.errorbar(errors.keys(),
                 medians,
                 yerr=np.array(margins).T,
                 fmt='o',
                 color='blue')
    plt.xlabel('max_time value')
    plt.ylabel('number of error solutions provided (out of 1000)')
    # plt.show()
    plt.savefig('times.png')
    print(errors)
Ejemplo n.º 5
0
    def test_stitch_kwargs(self):
        """Ensure stitch_kwargs is being passed through get_jss_bqm to dwavebinarycsp.stitch
        """
        jobs = {
            "sandwich": [("bread", 1), ("roast_beef", 1)],
            "french_toast": [("egg", 1), ("bread", 1)]
        }
        max_time = 3

        # Verify that reasonable stitch args result in a BQM
        good_stitch_kwargs = {"max_graph_size": 6, "min_classical_gap": 1.5}
        bqm = get_jss_bqm(jobs, max_time, good_stitch_kwargs)
        self.assertIsInstance(bqm, BinaryQuadraticModel)

        # ImpossibleBQM should be raised, as the max_graph size is too small
        bad_stitch_kwargs = {"max_graph_size": 0}
        self.assertRaises(ImpossibleBQM, get_jss_bqm, jobs, max_time,
                          bad_stitch_kwargs)
Ejemplo n.º 6
0
# Please enter the S3 bucket you created during onboarding in the code below
my_bucket = f"yourbacket" # the name of the bucket
my_prefix = "yourfolder" # the name of the folder in the bucket
s3_folder = (my_bucket, my_prefix)

device = AwsDevice("arn:aws:braket:::device/qpu/d-wave/DW_2000Q_6")
print('Device:', device)

from job_shop_scheduler import get_jss_bqm

# Construct a BQM for the jobs
jobs = {"cupcakes": [("mixer", 2), ("oven", 1)],
        "smoothie": [("mixer", 1)],
        "lasagna": [("oven", 2)]}
max_time = 4	  # Upperbound on how long the schedule can be; 4 is arbitrary
bqm = get_jss_bqm(jobs, max_time)
print(bqm)

# Submit BQM
# Note: may need to tweak the chain strength and the number of reads

sampler = BraketDWaveSampler(s3_folder,'arn:aws:braket:::device/qpu/d-wave/DW_2000Q_6')
sampler = EmbeddingComposite(sampler)

sampleset = sampler.sample(bqm, chain_strength=2, num_reads=100)

# Grab solution
solution = sampleset.first.sample

# Visualize solution
# Note0: we are making the solution simpler to interpret by restructuring it
def solve_with_pbruteforce(jobs,
                           solution,
                           qpu=False,
                           num_reads=2000,
                           max_time=None,
                           window_size=5,
                           chain_strength=2,
                           times=10):
    if max_time is None:
        max_time = get_result(jobs, solution) + 3
    for iteration_number in range(times):
        print(iteration_number)
        try:
            if qpu:
                sampler = EmbeddingComposite(
                    DWaveSampler(solver={'qpu': True}))
            else:
                sampler = neal.SimulatedAnnealingSampler()

            for i in range(max_time - window_size):
                info = find_time_window(jobs, solution, i, i + window_size)
                new_jobs, indexes, disable_till, disable_since, disabled_variables = info

                if not bool(new_jobs):  # if new_jobs dict is empty
                    continue

                try:
                    bqm = get_jss_bqm(new_jobs,
                                      window_size + 1,
                                      disable_till,
                                      disable_since,
                                      disabled_variables,
                                      stitch_kwargs={'min_classical_gap': 2})
                except ImpossibleBQM:
                    print('*' * 25 + " It's impossible to construct a BQM " +
                          '*' * 25)
                    continue

                if qpu:
                    sampleset = sampler.sample(bqm,
                                               chain_strength=chain_strength,
                                               num_reads=num_reads)
                else:
                    sampleset = sampler.sample(bqm, num_reads=num_reads)

                solution1 = sampleset.first.sample
                selected_nodes = [
                    k for k, v in solution1.items()
                    if v == 1 and not k.startswith('aux')
                ]
                # Parse node information
                task_times = {k: [-1] * len(v) for k, v in new_jobs.items()}
                for node in selected_nodes:
                    job_name, task_time = node.rsplit("_", 1)
                    task_index, start_time = map(int, task_time.split(","))

                    task_times[int(job_name)][task_index] = start_time

                # improving original solution
                sol_found = deepcopy(solution)
                for job, times in task_times.items():
                    for j in range(len(times)):
                        sol_found[job][indexes[job]
                                       [j]] = task_times[job][j] + i
                if True:  # FIXME: checkValidity(jobs, sol_found):
                    solution = sol_found
                    yield solution, i  # rozwiązanie i miejsce ramki
        except Exception as e:
            yield 'ex', 'ex'
            print(e)
            continue
Ejemplo n.º 8
0
def solve_with_pbruteforce(jobs,
                           solution,
                           qpu=False,
                           num_reads=2000,
                           max_time=None,
                           window_size=5,
                           chain_strength=2,
                           num_of_iterations=10,
                           min_classical_gap=2):

    # default, safe value of max_time to give some room for improvement
    if max_time is None:
        max_time = get_result(jobs, solution) + 3

    # main loop, iterates over whole instance
    for iteration_number in range(num_of_iterations):
        print('-' * 10, f"iteration {iteration_number+1}/{num_of_iterations}",
              '-' * 10)
        try:
            if qpu:
                sampler = EmbeddingComposite(DWaveSampler())
            else:
                sampler = neal.SimulatedAnnealingSampler()

            # looping over parts of the instance, solving small sub-instances
            # of size window_size
            from random import sample
            for i in sample(range(max_time - window_size),
                            len(range(max_time - window_size))):

                # cutting out the sub-instance
                info = find_time_window(jobs, solution, i, i + window_size)

                # new_jobs - tasks present in the sub-instance
                # indexes - old (full-instance) indexes of tasks in new_jobs
                # disable_till, disable_since and disabled_variables are all
                # explained in instance_parser.py
                new_jobs, indexes, disable_till, disable_since, disabled_variables = info

                if not bool(new_jobs):  # if sub-instance is empty
                    continue

                # constructing Binary Quadratic Model
                try:
                    bqm = get_jss_bqm(
                        new_jobs,
                        window_size + 1,
                        disable_till,
                        disable_since,
                        disabled_variables,
                        stitch_kwargs={'min_classical_gap': min_classical_gap})
                except ImpossibleBQM:
                    print('*' * 25 + " It's impossible to construct a BQM " +
                          '*' * 25)
                    continue

                # reding num_reads responses from the sampler
                sampleset = sampler.sample(bqm,
                                           chain_strength=chain_strength,
                                           num_reads=num_reads)

                # using the best (lowest energy) sample
                solution1 = sampleset.first.sample

                # variables that were selected by the sampler
                # (apart from the auxiliary variables)
                selected_nodes = [
                    k for k, v in solution1.items()
                    if v == 1 and not k.startswith('aux')
                ]

                # parsing aquired information
                task_times = {k: [-1] * len(v) for k, v in new_jobs.items()}
                for node in selected_nodes:
                    job_name, task_time = node.rsplit("_", 1)
                    task_index, start_time = map(int, task_time.split(","))
                    task_times[int(job_name)][task_index] = start_time

                # constructing a new solution, improved by the aquired info
                # newly scheduled tasks are injected into a full instance
                sol_found = deepcopy(solution)
                for job, times in task_times.items():
                    for j in range(len(times)):
                        sol_found[job][indexes[job]
                                       [j]] = task_times[job][j] + i

                # checking if the new, improved solution is valid
                if checkValidity(jobs, sol_found):
                    solution = deepcopy(sol_found)
                    # solution = sol_found
                    yield solution, i  # solution and current position of window

        except Exception as e:
            # uncomment this if you want to apply some behaviuor
            # in demo.py when exception occurs:
            # yield 'ex', 'ex'
            print(e)
            continue

if __name__ == '__main__':

    num_reads = 1000
    with open("wyniki_rozmiar_instancji.csv", mode='a') as csvfile:
        filewriter = csv.writer(csvfile,
                                delimiter=',',
                                quotechar='|',
                                quoting=csv.QUOTE_MINIMAL)
        for size in range(2, 5):
            for i in range(20):
                jobs = get_instance(size)
                try:
                    bqm = get_jss_bqm(jobs,
                                      size + 2,
                                      stitch_kwargs={'min_classical_gap': 2.0})
                    sampler = EmbeddingComposite(
                        DWaveSampler(solver={'qpu': True}))
                    sampleset = sampler.sample(bqm,
                                               chain_strength=10,
                                               num_reads=num_reads)
                    sol_dict = printResults(sampleset, jobs)
                except Exception as e:
                    print(f"error: {size}")
                    print(e)
                    from time import sleep
                    sleep(60)
                    continue
                result_row = [
                    size, sol_dict['error'], sol_dict['incorrect'], num_reads,