Exemple #1
0
'''
  combine the objective and constraints into a single dictionary, which is really a graph, to send to the qpu.
'''
qubo = {**objective, **constraints}

'''
  create a sampler, this is what actually solves the issue
'''
sampler = DWaveSampler(solver={"qpu":True})


'''
  create an embedding on the qpu automatically for the problem w/ the sampler. 
  The embedding composite tries to fit the problem on the qpu, which we get from creating a sampler
'''
response = EmbeddingComposite(sampler).sample_qubo(qubo, chain_strength=5, num_reads=100)

'''
  print the response, returns in a custom table
'''
print(response.info["problem_id"])

'''
  save as json
'''
with open("test.json", "w") as t_r:
    json.dump(response.to_serializable(), t_r) 


'''
  show the data embedded on the qpu.
Exemple #2
0
from job_shop_scheduler import get_jss_bqm

# Construct a BQM for the jobs
jobs = {
    "cupcakes": [("mixer", 2), ("oven", 1)],
    "smoothie": [("mixer", 1)],
    "lasagna": [("oven", 2)]
}
max_time = 4  # Upperbound on how long the schedule can be; 4 is arbitrary
bqm = get_jss_bqm(jobs, max_time)
print(bqm)

# Submit BQM
# Note: may need to tweak the chain strength and the number of reads
sampler = EmbeddingComposite(DWaveSampler(solver={'qpu': True}))
sampleset = sampler.sample(bqm, chain_strength=2, num_reads=1000)

# Grab solution
solution = sampleset.first.sample

# Visualize solution
# Note0: we are making the solution simpler to interpret by restructuring it
#  into the following format:
#   task_times = {"job": [start_time_for_task0, start_time_for_task1, ..],
#                 "other_job": [start_time_for_task0, ..]
#                 ..}
#
# Note1: each node in our BQM is labelled as "<job>_<task_index>,<time>".
#  For example, the node "cupcakes_1,2" refers to job 'cupcakes', its 1st task
#  (where we are using zero-indexing, so task '("oven", 1)'), starting at time
Exemple #3
0
    def __init__(self,
                 objective_function=None,
                 dwave_sampler=None,
                 dwave_sampler_kwargs=None,
                 num_activation_vectors=None,
                 activation_vec_hamming_dist=1,
                 max_hd=None,
                 parse_samples=True,
                 experiment_type=None,
                 num_reads=None,
                 num_iters=None,
                 network_type='minimum'):
        super().__init__(objective_function=objective_function)

        # Initialize switch network:
        # The default behavior here is to choose the smaller of either permutation or
        # sorting networks for the given input size.
        self.n_obj = self.objective_function.n
        if network_type == 'sorting':
            self.network = SortingNetwork(self.n_obj)
        elif network_type == 'permutation':
            self.network = PermutationNetwork(self.n_obj)
        elif network_type == 'minimum':
            s = SortingNetwork(self.n_obj)
            p = PermutationNetwork(self.n_obj)
            if s.depth <= p.depth:
                self.network = s
            else:
                self.network = p
        else:
            raise TypeError('Network type {} not recognized'.format(str(network_type)))
        self.n_qubo = self.network.depth
        self.dwave_solver = None
        self.sampler_kwargs = None
        self.qpu = False

        # Initialize dwave sampler:
        if dwave_sampler == 'QPU':
            self.dwave_solver = EmbeddingComposite(DWaveSampler())
            self.qpu = True
            if dwave_sampler_kwargs:
                self.sampler_kwargs = dwave_sampler_kwargs
            else:
                self.sampler_kwargs = dict()
        elif dwave_sampler == 'SA':
            self.dwave_solver = SimulatedAnnealingSampler()
            if num_reads:
                self.sampler_kwargs = {
                    'num_reads': num_reads
                }
            else:
                self.sampler_kwargs = {
                    'num_reads': 25
                }
        elif dwave_sampler == 'Tabu':
            self.dwave_solver = TabuSampler()
            if num_reads:
                self.sampler_kwargs = {
                    'num_reads': num_reads
                }
            else:
                self.sampler_kwargs = {
                    'num_reads': 250
                }

        self.stopwatch = 0

        # Initialize type of experiment
        # When running a timed experiment there is a high number of iterations and a 30 sec wall clock
        # When running a iteration experiment there is a iteration limit of 30 and no wall clock
        if experiment_type == 'time_lim':
            self.n_iters = 1000
            self.time_limit = 30

        if experiment_type == 'iter_lim' and num_iters:
            self.n_iters = num_iters
            self.time_limit = False
        else:
            self.n_iters = 50
            self.time_limit = False

        if max_hd:
            self.max_hd = max_hd
        else:
            self.max_hd = 0

        if num_activation_vectors:
            self.num_activation_vec = num_activation_vectors
        else:
            self.num_activation_vec = self.n_qubo

        self.form_qubo = LQUBO(objective_function=self.objective_function,
                               switch_network=self.network,
                               max_hamming_dist=self.max_hd,
                               num_activation_vectors=self.num_activation_vec,
                               activation_vec_hamming_dist=activation_vec_hamming_dist)

        self.solution = self.objective_function.min_v

        if parse_samples:
            self.selection = CheckAndSelect
        else:
            self.selection = Select
Exemple #4
0
import networkx as nx
w5 = nx.wheel_graph(5)

from dwave.system.samplers import DWaveSampler
from dwave.system.composites import EmbeddingComposite
sampler = EmbeddingComposite(DWaveSampler())

import dwave_networkx as dnx
print(dnx.min_vertex_cover(w5, sampler))

print(dnx.min_vertex_cover(w5, sampler))
Exemple #5
0
 def test_intermediate_composites(self):
     child = dimod.StructureComposite(dimod.NullSampler(), [0, 1], [(0, 1)])
     intermediate = dimod.TrackingComposite(child)
     sampler = EmbeddingComposite(intermediate)
     self.assertEqual(sampler.target_structure.nodelist, [0, 1])
 def default(cls):
     with open('dwave_credentials.txt') as token_file:
         sapi_token = token_file.read()
     sampler = EmbeddingComposite(DWaveSampler(token=sapi_token, endpoint=cls.DEFAULT_SAPI_URL))
     return cls(sampler)
Exemple #7
0
import matplotlib
matplotlib.use("agg")
import matplotlib.pyplot as plt

# Importo l'analizzatore
import dwave.inspector

# Selezioni il sampler che useremo. Voglio un metodo quantistico,
# quindi uso il DwaweSampler
from dwave.system.samplers import DWaveSampler

# Utilizzo un composite per fare l'embedding
from dwave.system.composites import EmbeddingComposite

# Preparo il sampler usando l'embedding scelto
sampler = EmbeddingComposite(DWaveSampler(solver='Advantage_system1.1'))

# Creo un grafo vuoto
G = nx.Graph()

# Prendo un grafo da un file
grafo = open(
    "/workspace/Tesi/Esempi-commentati/Pipelines-Antennas/JOHNSON8-2-4.txt",
    "rb")
#grafo = open("/workspace/Tesi/Esempi-commentati/Pipelines-Antennas/chesapeake.txt", "rb")
G = nx.read_edgelist(grafo)


#Creo la funzione per formulare il QUBO
def massimo_set_indipendente_qubo(G, weight=None, lagrange=2.0):
    # un QUBO vuoto per un grafo vuoto
Exemple #8
0
from collections import defaultdict
from dwave.system.samplers import DwaveSampler
from dwave.system.composites import EmbeddingComposite

G = nx.Graph()

G.add_edges_from([(0, 4), (0, 5), (1, 2), (1, 6), (2, 4), (3, 7), (5, 6),
                  (6, 7)])

Q = defaultdict(int)

# Constraint

for i in range(8):
    Q[(i, 1)] += -7
    for j in range(i + 1, 8):
        Q[(i, j)] += 2

# Objective

for i, j in G.edges:
    Q[(i, i)] += 1
    Q[(j, j)] += 1
    Q[(i, j)] += -2

sampler = EmbeddingComposite(DWaveSampler(solver='DW_2000Q_6'))

sampleset = sampler.sample_qubo(Q, num_reads=10)

print(sampleset)
"""
Follow the four scenarios listed in task 1 by changing the variables in this file

qubit_1   qubit_2

  O -------- O 
        
h_1, J_val, h_2
 
"""

qubit_1 = 0
qubit_2 = 1

h_1 = 0
h_2 = 0

J_val = -1

h = {qubit_1: h_1, qubit_2: h_2}
J = {(qubit_1, qubit_2): J_val}

sampler = EmbeddingComposite(DWaveSampler(solver=dict(qpu=True)))

response = sampler.sample_ising(h,
                                J,
                                num_reads=1000,
                                num_spin_reversal_transforms=0)
print(response.aggregate())
dwi.show(response)
Q_not = {('x', 'x'): -1, ('x', 'z'): 2, ('z', 'x'): 0, ('z', 'z'): -1}

from dwave.system.samplers import DWaveSampler
from dwave.system.composites import EmbeddingComposite
sampler = DWaveSampler()
sampler_embedded = EmbeddingComposite(sampler)

print(sampler.adjacency[sampler.nodelist[0]])

from dwave.system.composites import FixedEmbeddingComposite
sampler_embedded = FixedEmbeddingComposite(sampler, {'x': [0], 'z': [4]})
print(sampler_embedded.adjacency)

response = sampler_embedded.sample_qubo(Q_not, num_reads=10)
for datum in response.data(['sample', 'energy', 'num_occurrences']):
    print(datum.sample, "Energy: ", datum.energy, "Occurrences: ",
          datum.num_occurrences)
Exemple #11
0
s5 = nx.star_graph(4) # create a star graph where node 0 is hub to four other nodes.

# Solving Classically on a CPU

from dimod.reference.samplers import ExactSolver
sampler = ExactSolver() # returns the BQM's value for every possible assignment of variable values

import dwave_networkx as dnx
print(dnx.min_vertex_cover(s5, sampler)) # produce a BQM for our s5 graph and solve it on our selected sampler

print('################################################################################')

# Solving on a D-Wave System

from dwave.system.samplers import DWaveSampler
from dwave.system.composites import EmbeddingComposite # EmbeddingComposite(), maps unstructured problems to the graph structure of the selected sampler, a process known as minor-embedding
sampler = EmbeddingComposite(DWaveSampler()) # endpoint='https://URL_to_my_D-Wave_system/', token='ABC-123456789012345678901234567890', solver='My_D-Wave_Solver'
print(dnx.min_vertex_cover(s5, sampler))

print('################################################################################')

w5 = nx.wheel_graph(5) # creates a new graph
print(dnx.min_vertex_cover(w5, sampler)) # solves on a D-Wave system
print(dnx.min_vertex_cover(w5, sampler))

print('################################################################################')

c5 = nx.circular_ladder_graph(5) # replaces the problem graph
print(dnx.min_vertex_cover(c5, sampler)) # submits twice to the D-Wave system for solution
print(dnx.min_vertex_cover(c5, sampler)) # producing two of the possible valid solutions.
from dwave.system.samplers import DWaveSampler
from dwave.system.composites import EmbeddingComposite
import pandas as pd
import numpy as np
import time
import sympy
from sympy import *
sampler = EmbeddingComposite(
    DWaveSampler(endpoint='https://cloud.dwavesys.com/sapi',
                 token='',
                 solver='DW_2000Q_2_1'))
orig_stdout = sys.stdout
f = open('mapColoring4ColorsDwaveResults.txt', 'w')
sys.stdout = f
print("\n# MAP COLOURING PROBLEM WITH 4 COLOURS ON D-WAVE #\n")

print("\nSymbolic Computing\n")

#### Symbolic Computing

h = 0.0000005  #small number

n = 4  #four colors


def alpha(n, h):
    return (h + h)


def beta(n, alfa, h):
    return (((n ^ 3 + n ^ 2 + 1) * alfa) + h)
def solve_with_pbruteforce(jobs,
                           solution,
                           qpu=False,
                           num_reads=2000,
                           max_time=None,
                           window_size=5,
                           chain_strength=2,
                           times=10):
    if max_time is None:
        max_time = get_result(jobs, solution) + 3
    for iteration_number in range(times):
        print(iteration_number)
        try:
            if qpu:
                sampler = EmbeddingComposite(
                    DWaveSampler(solver={'qpu': True}))
            else:
                sampler = neal.SimulatedAnnealingSampler()

            for i in range(max_time - window_size):
                info = find_time_window(jobs, solution, i, i + window_size)
                new_jobs, indexes, disable_till, disable_since, disabled_variables = info

                if not bool(new_jobs):  # if new_jobs dict is empty
                    continue

                try:
                    bqm = get_jss_bqm(new_jobs,
                                      window_size + 1,
                                      disable_till,
                                      disable_since,
                                      disabled_variables,
                                      stitch_kwargs={'min_classical_gap': 2})
                except ImpossibleBQM:
                    print('*' * 25 + " It's impossible to construct a BQM " +
                          '*' * 25)
                    continue

                if qpu:
                    sampleset = sampler.sample(bqm,
                                               chain_strength=chain_strength,
                                               num_reads=num_reads)
                else:
                    sampleset = sampler.sample(bqm, num_reads=num_reads)

                solution1 = sampleset.first.sample
                selected_nodes = [
                    k for k, v in solution1.items()
                    if v == 1 and not k.startswith('aux')
                ]
                # Parse node information
                task_times = {k: [-1] * len(v) for k, v in new_jobs.items()}
                for node in selected_nodes:
                    job_name, task_time = node.rsplit("_", 1)
                    task_index, start_time = map(int, task_time.split(","))

                    task_times[int(job_name)][task_index] = start_time

                # improving original solution
                sol_found = deepcopy(solution)
                for job, times in task_times.items():
                    for j in range(len(times)):
                        sol_found[job][indexes[job]
                                       [j]] = task_times[job][j] + i
                if checkValidity(jobs, sol_found):
                    solution = sol_found
                    yield solution, i  # solution and place in frame
        except Exception as e:
            # uncomment this if you want to apply some behaviuor when exception occurs
            # yield 'ex', 'ex'
            print(e)
            continue
Exemple #14
0
    def test_instantiation_smoketest(self):
        sampler = EmbeddingComposite(MockSampler())

        dtest.assert_sampler_api(sampler)
Exemple #15
0
def train_model(X_train, y_train, X_test, y_test, lmd):
    """
    Train qboost model

    :param X_train: train input
    :param y_train: train label
    :param X_test: test input
    :param y_test: test label
    :param lmd: lmbda to control regularization term
    :return:
    """
    NUM_READS = 3000
    NUM_WEAK_CLASSIFIERS = 35
    # lmd = 0.5
    TREE_DEPTH = 3

    # define sampler
    dwave_sampler = DWaveSampler()
    # sa_sampler = micro.dimod.SimulatedAnnealingSampler()
    emb_sampler = EmbeddingComposite(dwave_sampler)

    N_train = len(X_train)
    N_test = len(X_test)

    print("\n======================================")
    print("Train#: %d, Test: %d" %(N_train, N_test))
    print('Num weak classifiers:', NUM_WEAK_CLASSIFIERS)
    print('Tree depth:', TREE_DEPTH)


    # input: dataset X and labels y (in {+1, -1}

    # Preprocessing data
    imputer = preprocessing.Imputer()
    # scaler = preprocessing.MinMaxScaler()
    scaler = preprocessing.StandardScaler()
    normalizer = preprocessing.Normalizer()
    centerer = preprocessing.KernelCenterer()


    # X = imputer.fit_transform(X)
    X_train = scaler.fit_transform(X_train)
    X_train = normalizer.fit_transform(X_train)
    X_train = centerer.fit_transform(X_train)

    # X_test = imputer.fit_transform(X_test)
    X_test = scaler.fit_transform(X_test)
    X_test = normalizer.fit_transform(X_test)
    X_test = centerer.fit_transform(X_test)


    ## Adaboost
    print('\nAdaboost')

    clf = AdaBoostClassifier(n_estimators=NUM_WEAK_CLASSIFIERS)

    # scores = cross_val_score(clf, X, y, cv=5, scoring='accuracy')
    print('fitting...')
    clf.fit(X_train, y_train)

    hypotheses_ada = clf.estimators_
    # clf.estimator_weights_ = np.random.uniform(0,1,size=NUM_WEAK_CLASSIFIERS)
    print('testing...')
    y_train_pred = clf.predict(X_train)
    y_test_pred = clf.predict(X_test)

    print('accu (train): %5.2f'%(metric(y_train, y_train_pred)))
    print('accu (test): %5.2f'%(metric(y_test, y_test_pred)))

    # Ensembles of Decision Tree
    print('\nDecision tree')

    clf2 = WeakClassifiers(n_estimators=NUM_WEAK_CLASSIFIERS, max_depth=TREE_DEPTH)
    clf2.fit(X_train, y_train)

    y_train_pred2 = clf2.predict(X_train)
    y_test_pred2 = clf2.predict(X_test)
    print(clf2.estimator_weights)

    print('accu (train): %5.2f' % (metric(y_train, y_train_pred2)))
    print('accu (test): %5.2f' % (metric(y_test, y_test_pred2)))

    # Ensembles of Decision Tree
    print('\nQBoost')

    DW_PARAMS = {'num_reads': NUM_READS,
                 'auto_scale': True,
                 # "answer_mode": "histogram",
                 'num_spin_reversal_transforms': 10,
                 # 'annealing_time': 10,
                 'postprocess': 'optimization',
                 }

    clf3 = QBoostClassifier(n_estimators=NUM_WEAK_CLASSIFIERS, max_depth=TREE_DEPTH)
    clf3.fit(X_train, y_train, emb_sampler, lmd=lmd, **DW_PARAMS)

    y_train_dw = clf3.predict(X_train)
    y_test_dw = clf3.predict(X_test)

    print(clf3.estimator_weights)

    print('accu (train): %5.2f' % (metric(y_train, y_train_dw)))
    print('accu (test): %5.2f' % (metric(y_test, y_test_dw)))


    # Ensembles of Decision Tree
    print('\nQBoostPlus')
    clf4 = QboostPlus([clf, clf2, clf3])
    clf4.fit(X_train, y_train, emb_sampler, lmd=lmd, **DW_PARAMS)
    y_train4 = clf4.predict(X_train)
    y_test4 = clf4.predict(X_test)
    print(clf4.estimator_weights)

    print('accu (train): %5.2f' % (metric(y_train, y_train4)))
    print('accu (test): %5.2f' % (metric(y_test, y_test4)))


    print("=============================================")
    print("Method \t Adaboost \t DecisionTree \t Qboost \t QboostIt")
    print("Train\t %5.2f \t\t %5.2f \t\t\t %5.2f \t\t %5.2f"% (metric(y_train, y_train_pred),
                                                               metric(y_train, y_train_pred2),
                                                               metric(y_train, y_train_dw),
                                                               metric(y_train, y_train4)))
    print("Test\t %5.2f \t\t %5.2f \t\t\t %5.2f \t\t %5.2f"% (metric(y_test, y_test_pred),
                                                              metric(y_test,y_test_pred2),
                                                              metric(y_test, y_test_dw),
                                                              metric(y_test, y_test4)))
    print("=============================================")

    # plt.subplot(211)
    # plt.bar(range(len(y_test)), y_test)
    # plt.subplot(212)
    # plt.bar(range(len(y_test)), y_test_dw)
    # plt.show()

    return
Exemple #16
0
# Add constraint that each pair of nodes with a shared edge not both select one color
for neighbor in neighbors:
    v, u = neighbor
    for i in range(colors):
        variables = [v + str(i), u + str(i)]
        csp.add_constraint(not_both_1, variables)

# Convert the binary constraint satisfaction problem to a binary quadratic model
bqm = dwavebinarycsp.stitch(csp)

#Set up a solver using the local system’s default D-Wave Cloud Client configuration file
#and sample 50 times

sampler = EmbeddingComposite(
    DWaveSampler(endpoint='https://cloud.dwavesys.com/sapi',
                 token=''))  #Paste your token here

#sampler = neal.SimulatedAnnealingSampler()
response = sampler.sample(bqm, num_reads=2000)  # doctest: +SKIP

# Plot the lowest-energy sample if it meets the constraints

# print(response)
#print(response.samples())

sample = next(response.samples())  # doctest: +SKIP
if not csp.check(sample):  # doctest: +SKIP
    print("Failed to color map")
else:
    plot_map(sample)
    scaled_quadratic = {
        key: quadratic[key] / scaling_factor
        for key in quadratic
    }

    return scaled_linear, scaled_quadratic


scaled_linear, scaled_quadratic = scale_bias_couplings(linear, quadratic)

bqm = dimod.BinaryQuadraticModel(scaled_linear, scaled_quadratic, 0.0,
                                 dimod.BINARY)

reads = 1000
sol_limit = 64
system = EmbeddingComposite(DWaveSampler(solver='DW_2000Q_2_1'))
sampler = dwave_qbsolv.QBSolv()
Tref = time()
response = sampler.sample(bqm,
                          num_reads=reads,
                          solver='tabu',
                          solver_limit=sol_limit,
                          verbosity=0)
#response = system.sample(bqm, num_reads=reads)
Tfin = time()

# BEST SOLUTION AVAILABLE RN
best_solution = response.first
solution_data = list(dict(best_solution[0]).values())

# Matrix form of solution
Exemple #18
0
def try_k_coloring(k_colors, graph_name, is_simulated=False):
    print("processing ", graph_name, "... ")
    num_vertices, num_edges, list_edges = read_graph(graph_name)
    vertices = [str(i + 1) for i in range(num_vertices)]
    csp = dwavebinarycsp.ConstraintSatisfactionProblem(dwavebinarycsp.BINARY)
    one_color_configurations = set()

    def not_same_color(v1, v2):
        #constraint: not to adyacent nodes share same color
        return not (v1 and v2)

    for i in range(k_colors):
        one_color_configurations.add(
            tuple(1 if i == j else 0 for j in range(k_colors)))

    #constraint: just one color per vertex
    for vertex in vertices:
        variables = [vertex + "c" + str(i) for i in range(k_colors)]
        csp.add_constraint(one_color_configurations, variables)

    for edge in list_edges:
        v1, v2 = edge
        for i in range(k_colors):
            variables = [str(v1) + "c" + str(i), str(v2) + "c" + str(i)]
            csp.add_constraint(not_same_color, variables)

    def plot_map(self):
        G = nx.Graph()
        G.add_nodes_from(vertices)
        G.add_edges_from(list_edges)
        # Translate from binary to integer color representation
        color_map = {}
        for province in vertices:
            for i in range(k_colors):
                if sample[province + "c" + str(i)]:
                    color_map[province] = i
        # Plot the sample with color-coded nodes
        node_colors = [color_map.get(node) for node in G.nodes()]
        nx.draw_circular(G,
                         with_labels=True,
                         node_color=node_colors,
                         node_size=3000,
                         cmap=plt.cm.rainbow)
        plt.show()

    bqm = dwavebinarycsp.stitch(csp)
    counts = []
    idSample = 0
    if not is_simulated:
        client_qpu = Client.from_config()
        client_cpu = Client.from_config(profile='prod')

        # Set up a solver using the local system’s default D-Wave Cloud Client configuration file
        # and sample 50 times
        sampler = EmbeddingComposite(DWaveSampler())  # doctest: +SKIP
        start_time = time.time()
        response = sampler.sample(bqm, num_reads=1024)  # doctest: +SKIP
        elapsed_time = time.time() - start_time
        for s in response.data():
            if not isSampleInSamples(s, counts):
                counts.append({
                    'id': idSample,
                    'sample': s.sample,
                    'num_occurrences': s.num_occurrences,
                    'energy': s.energy
                })
                idSample += 1

        # Plot the lowest-energy sample if it meets the constraints

        sample = next(response.samples())  # doctest: +SKIP
        if not csp.check(sample):  # doctest: +SKIP
            print("Failed to color map")
        print("execution time: ", elapsed_time)
        return counts, response
    else:
        sampler = neal.SimulatedAnnealingSampler()
        start_time = time.time()
        response = sampler.sample(bqm, num_reads=1024)
        elapsed_time = time.time() - start_time
        sample = next(response.samples())  # doctest: +SKIP

        for s in response.data():

            if not isSampleInSamples(s, counts):
                counts.append({
                    'id': idSample,
                    'sample': s.sample,
                    'num_occurrences': s.num_occurrences,
                    'energy': s.energy
                })
                idSample += 1

        if not csp.check(sample):  # doctest: +SKIP
            print("Failed to color map")
        print("execution time: ", elapsed_time)
        return counts, response
Exemple #19
0
print('=============================')
print('Given virtual qubits a0, a1, b0, b1, c0, c1, c2, c3, c4;')
print('list possible solutions where:')
print('  C = A * B, and C = 9 (c3=1, c2=0, c1=0, c0=1)')
print('')

# At the top of this file, set useQpu to True to use a live QPU.
#
# For this tutorial we need a triangular configution, but the physical
# topology of the QPU does not have triangles. There is a technique
# called "embedding" that allows us to map our virtual problem onto a
# physical platform. The concept of embedding is described more
# thoroughly in the embedding tutorials.
if (useQpu):
    sampler = DWaveSampler()  # live QPU
    sampler_embedded = EmbeddingComposite(sampler)  # we will need to embed
    # See these pages for information on embedding:
    # https://docs.dwavesys.com/docs/latest/c_gs_4.html
    # https://docs.dwavesys.com/docs/latest/c_handbook_5.html
else:
    sampler = SimulatedAnnealingSampler()  # simulated quantum annealer
    sampler_embedded = sampler  # we do not need to embed for a simulation

csp = dwavebinarycsp.ConstraintSatisfactionProblem(dwavebinarycsp.BINARY)

"""
Now we tie together logic gates and operators in the form of
constraints. Listing constraints is an easy way to get a problem onto
the D-Wave, but it is not necessarily an optimal way to do it.

Gates that are allowed include:
### H = -q1 - q2 - q3 - q4 + 2*(q1*q2 + q2*q3 + q3*q4 + q1*q3 + q1*q4 + q2*q4)

linear = {
    key: -1
    for key in [('q' + str(color), 'q' + str(color)) for color in range(1, 5)]
}
quadratic = {
    key: 2
    for key in [('q' + str(color1), 'q' + str(color2))
                for color1 in range(1, 5) for color2 in range(2, 5)
                if color1 < color2]
}
Q = dict(linear)
Q.update(quadratic)

response = EmbeddingComposite(DWaveSamplerInstance).sample_qubo(Q,
                                                                num_reads=1000)
for sample in response.data():
    print(sample[0], "Energy: ", sample[1], "Occurrences: ", sample[2])

###############################################################################################################################################################################################
### Two regions: A,B; 2 colours: 1,2
### 1: only 1 colour per region can be selected, 2: two regions cannot have the same colour
### H1 = -qA1 - qA2 + 2*qA1*qA2 - qB1 - qB2 + 2*qB1*qB2       #qA1 = region A has colour 1
### H2 = -qA1 - qB1 + 2*qA1*qB1 - qA2 - qB2 + 2*qA2*qB2
### H = H1 + H2 = -2*qA1 -2*qA2 -2*qB1 - 2*qB2 + 2*qA1*qA2 + 2*qB1*qB2 + 2*qA1*qB1 + 2*qA2*qB2

c_linear = {
    key: -2
    for key in [('q' + region + str(color), 'q' + region + str(color))
                for region in ['A', 'B'] for color in range(1, 3)]
}
Exemple #21
0
print('================')
print('     ??????     ')
print('    ??    ??    ')
print('   ??  ??  ??   ')
print('   ??  ??  ??   ')
print('    ??    ??    ')
print('     ??????     ')
print('Flip a bunch of coins and show the distribution.')
print('')

# At the top of this file, set useQpu to True to use a live QPU.
if (useQpu):
    sampler = DWaveSampler()
    # We need an embedding composite sampler because not all qubits are
    # working. A trivial embedding lets us avoid dead qubits.
    sampler = EmbeddingComposite(sampler)
else:
    sampler = SimulatedAnnealingSampler()

# Initialize a binary quadratic model.
# It will use 2000 qubits. All biases are 0 and all couplings are 0.
bqm = {}       # binary quadratic model
distrib = {}   # distribution

msg = 'How many coins do you want to flip at the same time?'
try:
    coins = raw_input(msg)
except:
    try:
        coins = input(msg)
    except:
hJ_lsq = optimize_lsq(p_true, NODES)

# Define the solver
NUM_READS = 5000
NUM_EPOCHS = 100
lrate = 0.2
DW_PARAMS = {'auto_scale': True, 'num_spin_reversal_transforms': 5}

# define sampler
dwave_sampler = DWaveSampler(solver={'qpu': True})

# Some accounts need to replace this line with the next:
# dwave_sampler = DWaveSampler(token = 'My API Token', solver='Solver Name')
sa_sampler = dimod.SimulatedAnnealingSampler()
emb_sampler = EmbeddingComposite(dwave_sampler)

emb_sampler.parameters

T = 1. / 20
Q_lsq = dict(((key, key), T * value)
             for (key, value) in zip(NODES, hJ_lsq[:len(NODES)]))
Q_lsq.update(
    dict(((NODES[key[0]], NODES[key[1]]), T * value)
         for (key, value) in zip(MORAL_EDGES, hJ_lsq[len(NODES):])))

print('Solving Q_LSQ on QPU...')
response_lsq = emb_sampler.sample_qubo(Q_lsq, num_reads=NUM_READS, **DW_PARAMS)

samples_lsq = np.asarray([[datum.sample[v] for v in NODES]
                          for datum in response_lsq.data()
Exemple #23
0
import dwave.embedding

from dwave.system.composites import (
    EmbeddingComposite,
    FixedEmbeddingComposite,
    LazyFixedEmbeddingComposite,
    LazyEmbeddingComposite,
    AutoEmbeddingComposite,
)

from dwave.system.testing import MockDWaveSampler, mock
from dwave.embedding import chain_breaks
from dwave.system.warnings import ChainStrengthWarning


@dimod.testing.load_sampler_bqm_tests(EmbeddingComposite(MockDWaveSampler()))
class TestEmbeddingComposite(unittest.TestCase):
    def test_instantiation_smoketest(self):
        sampler = EmbeddingComposite(MockDWaveSampler())

        dimod.testing.assert_sampler_api(sampler)

    def test_sample_ising(self):
        sampler = EmbeddingComposite(MockDWaveSampler())

        h = {0: -1., 4: 2}
        J = {(0, 4): 1.5}

        response = sampler.sample_ising(h, J)

        # nothing failed and we got at least one response back
Exemple #24
0
colors = len(one_color_configurations)

# Create a binary constraint satisfaction problem
csp = dwavebinarycsp.ConstraintSatisfactionProblem(dwavebinarycsp.BINARY)

# Add constraint that each node (province) select a single color
for province in provinces:
    variables = [province+str(i) for i in range(colors)]
    csp.add_constraint(one_color_configurations, variables)

# Add constraint that each pair of nodes with a shared edge not both select one color
for neighbor in neighbors:
    v, u = neighbor
    for i in range(colors):
        variables = [v+str(i), u+str(i)]
        csp.add_constraint(not_both_1, variables)

# Convert the binary constraint satisfaction problem to a binary quadratic model
bqm = dwavebinarycsp.stitch(csp)

# Set up a solver using the local system’s default D-Wave Cloud Client configuration file
# and sample 5 times
sampler = EmbeddingComposite(DWaveSampler())         # doctest: +SKIP
response = sampler.sample(bqm, num_reads=10)         # doctest: +SKIP

# Plot the lowest-energy sample if it meets the constraints
sample = next(response.samples())      # doctest: +SKIP
if not csp.check(sample):              # doctest: +SKIP
    print("Failed to color map")
else:
    plot_map(sample)
Exemple #25
0
    def test_instantiation_smoketest(self):
        sampler = EmbeddingComposite(MockDWaveSampler())

        dimod.testing.assert_sampler_api(sampler)
def solve_with_pbruteforce(jobs,
                           solution,
                           qpu=False,
                           num_reads=2000,
                           max_time=None,
                           window_size=5,
                           chain_strength=2,
                           num_of_iterations=10,
                           min_classical_gap=2):

    # default, safe value of max_time to give some room for improvement
    if max_time is None:
        max_time = get_result(jobs, solution) + 3

    # main loop, iterates over whole instance
    for iteration_number in range(num_of_iterations):
        print('-' * 10, f"iteration {iteration_number+1}/{num_of_iterations}",
              '-' * 10)
        try:
            if qpu:
                sampler = EmbeddingComposite(DWaveSampler())
            else:
                sampler = neal.SimulatedAnnealingSampler()

            # looping over parts of the instance, solving small sub-instances
            # of size window_size
            from random import sample
            for i in sample(range(max_time - window_size),
                            len(range(max_time - window_size))):

                # cutting out the sub-instance
                info = find_time_window(jobs, solution, i, i + window_size)

                # new_jobs - tasks present in the sub-instance
                # indexes - old (full-instance) indexes of tasks in new_jobs
                # disable_till, disable_since and disabled_variables are all
                # explained in instance_parser.py
                new_jobs, indexes, disable_till, disable_since, disabled_variables = info

                if not bool(new_jobs):  # if sub-instance is empty
                    continue

                # constructing Binary Quadratic Model
                try:
                    bqm = get_jss_bqm(
                        new_jobs,
                        window_size + 1,
                        disable_till,
                        disable_since,
                        disabled_variables,
                        stitch_kwargs={'min_classical_gap': min_classical_gap})
                except ImpossibleBQM:
                    print('*' * 25 + " It's impossible to construct a BQM " +
                          '*' * 25)
                    continue

                # reding num_reads responses from the sampler
                sampleset = sampler.sample(bqm,
                                           chain_strength=chain_strength,
                                           num_reads=num_reads)

                # using the best (lowest energy) sample
                solution1 = sampleset.first.sample

                # variables that were selected by the sampler
                # (apart from the auxiliary variables)
                selected_nodes = [
                    k for k, v in solution1.items()
                    if v == 1 and not k.startswith('aux')
                ]

                # parsing aquired information
                task_times = {k: [-1] * len(v) for k, v in new_jobs.items()}
                for node in selected_nodes:
                    job_name, task_time = node.rsplit("_", 1)
                    task_index, start_time = map(int, task_time.split(","))
                    task_times[int(job_name)][task_index] = start_time

                # constructing a new solution, improved by the aquired info
                # newly scheduled tasks are injected into a full instance
                sol_found = deepcopy(solution)
                for job, times in task_times.items():
                    for j in range(len(times)):
                        sol_found[job][indexes[job]
                                       [j]] = task_times[job][j] + i

                # checking if the new, improved solution is valid
                if checkValidity(jobs, sol_found):
                    solution = deepcopy(sol_found)
                    # solution = sol_found
                    yield solution, i  # solution and current position of window

        except Exception as e:
            # uncomment this if you want to apply some behaviuor
            # in demo.py when exception occurs:
            # yield 'ex', 'ex'
            print(e)
            continue
Exemple #27
0
#CONSTRAINTS

## ------- Run our QUBO on the QPU -------
# Set up QPU parameters
chainstrength = 1500
numruns = 40
gam = 100

Q = {}
for i in range(len(S)):
    Q[(i, i)] = (S[i] * S[i] - 30 * S[i]) * gam - C[i]

for i in range(len(S)):
    for j in range((i + 1), len(S)):
        Q[(i, j)] = (2 * S[i] * S[j] + 2) * gam

# Run the QUBO on the solver from your config file
from dwave.system.samplers import DWaveSampler
from dwave.system.composites import EmbeddingComposite
response = EmbeddingComposite(DWaveSampler()).sample_qubo(
    Q, chain_strength=chainstrength, num_reads=numruns)

## ------- Return results to user -------
R = iter(response)
E = iter(response.data())
for line in response:
    sample = next(R)
    S1 = [S[i] for i in sample if sample[i] > 0]
    S0 = [S[i] for i in sample if sample[i] < 1]
    print("S1 Sum: ", sum(S1), "\t", S1)
Exemple #28
0
import matplotlib.pyplot as plt
pos = nx.circular_layout(G)
#nx.draw(G,pos,with_labels=True)
nlabels=dict((n,(n,d['weight'])) for n,d in G.nodes(data=True))
#nx.draw(G,pos,labels=nlabels, node_size=2000)
elabels = nx.get_edge_attributes(G,'weight')
#nx.draw_networkx_edge_labels(G,pos,edge_labels=elabels,ax=plt.gca())
#plt.show()

# Parker's direct-to-DWave code
from dwave.system.samplers import DWaveSampler
from dwave.system.composites import EmbeddingComposite
# Set Q for the problem QUBO
linear = {('x0', 'x0'): -1, ('x1', 'x1'): -1, ('x2', 'x2'): -1}
quadratic = {('x0', 'x1'): 2, ('x0', 'x2'): 2, ('x1', 'x2'): 2}
Q = dict(linear)
Q.update(quadratic)

# Try converting G to this format
linear_G = {(n,n):d['weight'] for n,d in G.nodes(data=True)}
quadratic_G = nx.get_edge_attributes(G,'weight') # Note this is the same as elabels
QG = dict(linear_G)
QG.update(quadratic_G)


# Minor-embed and sample 1000 times on a default D-Wave system
#response = EmbeddingComposite(DWaveSampler()).sample_qubo(Q, num_reads=1000)
response = EmbeddingComposite(DWaveSampler()).sample_qubo(QG, num_reads=1000)
for datum in response.data(['sample', 'energy', 'num_occurrences']):
    print(datum.sample, "Energy: ", datum.energy, "Occurrences: ", datum.num_occurrences)
def anneal(C_i, C_ij, mu, sigma, l, strength_scale, energy_fraction, ngauges,
           max_excited_states):
    #Initialising h and J as dictionnaries
    h = {}
    J = {}

    for i in range(len(C_i)):
        h_i = -2 * sigma[i] * C_i[i]
        for j in range(len(C_ij[0])):
            if j > i:
                J[(i, j)] = float(2 * C_ij[i][j] * sigma[i] * sigma[j])
            h_i += 2 * (sigma[i] * C_ij[i][j] * mu[j])
        h[i] = h_i

    #applying cutoff
    print("Number of J before : " + str(len(J)))  #J before cutoff
    float_vals = []
    for i in J.values():
        float_vals.append(i)
    cutoff = np.percentile(float_vals, AUGMENT_CUTOFF_PERCENTILE)
    to_delete = []
    for k, v in J.items():
        if v < cutoff:
            to_delete.append(k)
    for k in to_delete:
        del J[k]
    print("Number of J after : " + str(len(J)))  # J after cutof
    new_Q = {}
    isingpartial = {}

    if FIXING_VARIABLES:
        #Optimising heuristically the number of coupling terms
        Q, _ = dimod.ising_to_qubo(h, J, offset=0.0)
        bqm = dimod.BinaryQuadraticModel.from_qubo(Q, offset=0.0)
        simple = dimod.fix_variables(bqm, sampling_mode=False)
        if simple == {}:
            new_Q = Q
        else:
            Q_indices = []
            for i in Q:
                if i in simple.keys():
                    continue
                else:
                    Q_indices.append(i)
            new_Q = {key: Q[key] for key in Q_indices}
        print('new length', len(new_Q))
        isingpartial = simple

    if (not FIXING_VARIABLES) or len(new_Q) > 0:
        mapping = []
        offset = 0
        for i in range(len(C_i)):
            if i in isingpartial:
                mapping.append(None)
                offset += 1
            else:
                mapping.append(i - offset)
        if FIXING_VARIABLES:
            new_Q_mapped = {}
            for (first, second), val in new_Q.items():
                new_Q_mapped[(mapping[first], mapping[second])] = val
            h, J, _ = dimod.qubo_to_ising(new_Q_mapped)

        #Run gauges
        qaresults = []
        print("Number of variables to anneal :" + str(len(h)))
        for g in range(ngauges):
            #Finding embedding
            qaresult = []
            embedded = False
            for attempt in range(5):
                a = np.sign(np.random.rand(len(h)) - 0.5)
                float_h = []
                for i in h.values():
                    float_h.append(i)
                h_gauge = float_h * a
                J_gauge = {}
                for i in range(len(h)):
                    for j in range(len(h)):
                        if (i, j) in J:
                            J_gauge[(i, j)] = J[(i, j)] * a[i] * a[j]
                try:
                    print("Trying to find embeding")
                    sampler = EmbeddingComposite(
                        DWaveSampler(token='secret_token'))
                    embedded = True
                    break
                except ValueError:  # no embedding found
                    print('no embedding found')
                    embedded = False
                    continue

            if not embedded:
                continue
            print("emebeding found")

            print("Quantum annealing")
            try_again = True
            while try_again:
                try:
                    #Annealing, saving energy and sample list
                    sampleset = sampler.sample_ising(
                        h_gauge,
                        J_gauge,
                        chain_strength=strength_scale,
                        num_reads=200,
                        annealing_time=20)
                    try_again = False
                except:
                    print('runtime or ioerror, trying again')
                    time.sleep(10)
                    try_again = True
            print("Quantum done")

            qaresult.append(sampleset.record[0][0].tolist())
            qaresult = np.asarray(qaresult)
            qaresult = qaresult * a
            qaresults[g * nreads:(g + 1) * nreads] = qaresult

        full_strings = np.zeros((len(qaresults), len(C_i)))
        full_strings = np.asarray(full_strings)
        qaresults = np.asarray(qaresults)
        if FIXING_VARIABLES:
            j = 0
            for i in range(len(C_i)):
                if i in isingpartial:
                    full_strings[:, i] = 2 * isingpartial[i] - 1
                else:
                    full_strings[:, i] = qaresults[:, j]
                    j += 1
        else:
            full_strings = qaresults

        s = np.asarray(full_strings)
        energies = np.zeros(len(qaresults))
        s[np.where(s > 1)] = 1.0
        s[np.where(s < -1)] = -1.0
        bits = len(s[0])
        for i in range(bits):
            energies += 2 * s[:, i] * (-sigma[i] * C_i[i])
            for j in range(bits):
                if j > i:
                    energies += 2 * s[:, i] * s[:, j] * sigma[i] * sigma[
                        j] * C_ij[i][j]
                energies += 2 * s[:, i] * sigma[i] * C_ij[i][j] * mu[j]

        unique_energies, unique_indices = np.unique(energies,
                                                    return_index=True)
        ground_energy = np.amin(unique_energies)
        if ground_energy < 0:
            threshold_energy = (1 - energy_fraction) * ground_energy
        else:
            threshold_energy = (1 + energy_fraction) * ground_energy
        lowest = np.where(unique_energies < threshold_energy)
        unique_indices = unique_indices[lowest]
        if len(unique_indices) > max_excited_states:
            sorted_indices = np.argsort(
                energies[unique_indices])[-max_excited_states:]
            unique_indices = unique_indices[sorted_indices]
        print("unique indices : ", unique_indices)
        print(type(unique_indices[0]))
        print(type(full_strings))
        final_answers = full_strings[unique_indices]
        print('number of selected excited states', len(final_answers))

        return final_answers

    else:
        final_answer = []
        print("Evrything resolved by FIXING_VARIABLES")
        for i in range(len(C_i)):
            if i in isingpartial:
                final_answer.append(2 * isingpartial[i] - 1)
        final_answer = np.array(final_answer)
        return np.array([final_answer])
Exemple #30
0
from dwave.system.samplers import DWaveSampler
from dwave.system.composites import EmbeddingComposite
import setting

linear = {('x0', 'x0'): -1, ('x1', 'x1'): -1, ('x2', 'x2'): -1}
quadratic = {('x0', 'x1'): 2, ('x0', 'x2'): 2, ('x1', 'x2'): 2}

Q = dict(linear)
Q.update(quadratic)

response = EmbeddingComposite(
    DWaveSampler(token=setting.tokencode)).sample_qubo(Q, num_reads=1000)

for sample, energy, num_occurrences, chain_break_fraction in list(
        response.data()):
    print(sample, "Energy: ", energy, "Occurrences: ", num_occurrences)

print("Total_real_time ", response.info["timing"]["total_real_time"], "us")