예제 #1
0
    def solve(self):

        if (self.useQPU):
            sampler = EmbeddingComposite(DWaveSampler(solver={'qpu': True}))
            sampleset = sampler.sample_qubo(self.Q,
                                            num_reads=self.n_reads,
                                            chain_strength=self.chain)
        elif (self.useNeal):
            bqm = BinaryQuadraticModel.from_qubo(self.Q, offset=self.offset)
            sampler = neal.SimulatedAnnealingSampler()
            sampleset = sampler.sample(bqm,
                                       num_reads=self.n_reads,
                                       chain_strength=self.chain)
        elif (self.useHyb):
            bqm = BinaryQuadraticModel.from_qubo(self.Q, offset=self.offset)
            sampler = LeapHybridSampler()
            sampleset = sampler.sample(bqm, num_reads=self.n_reads)
        else:
            bqm = BinaryQuadraticModel.from_qubo(self.Q, offset=self.offset)
            sampler = TabuSampler()
            sampleset = sampler.sample(bqm,
                                       num_reads=self.n_reads,
                                       chain_strength=self.chain)

        self.sampleset = sampleset
예제 #2
0
def get_bqm(Q):
    """Returns a bqm representation of the problem.
    
    Args:
        Q(qubo): dictionary representing a QUBO
    """
    # Convert to bqm
    bqm = BinaryQuadraticModel.from_qubo(Q)

    return bqm
예제 #3
0
파일: test.py 프로젝트: kevinab107/q-algos
def solve_tsp_hybrid(Q, G):
    bqm = BinaryQuadraticModel.from_qubo(Q)
    response = LeapHybridSampler().sample(bqm, time_limit=40)
    sample = response.first.sample
    route = [None] * len(G)
    for (city, time), val in sample.items():
        if val:
            route[time] = city
    cost = calculate_cost(nx.to_numpy_array(G), route)
    return (route, cost)
예제 #4
0
def get_bqm(nodes, edges):
    """Returns a bqm representation of the problem.

    Args:
        nodes(list of integers): nodes for the graph
        edges(list of tuples): each tuple represents an edge in the graph
    """
    # Create QUBO based on min((-1 * (SUMxi)) + (gamma * (SUMxi*xj))
    gamma = 3
    Q = {}

    for i in G.nodes:
        Q[(i, i)] = -1
    for i, j in G.edges:
        Q[(i, j)] = gamma

    # Convert to bqm
    bqm = BinaryQuadraticModel.from_qubo(Q)

    return bqm
예제 #5
0
    def solve(self, 
              useQPU=False, 
              useNeal=False, 
              useHyb=True,
              time_limit = 10,
              num_reads = 100,
              chain_strength = 10000):
        
        Q = self.Q
        BQM_offset = 0 # TODO: Use the accumulated quadratic constants from the constraints

        bqm = BinaryQuadraticModel.from_qubo(Q, offset=BQM_offset)

        self.sampleset = None
        
        # Call the requested solver
        
        if ( useQPU ):
            print("Solving using the DWaveSampler on the QPU...")
            sampler = EmbeddingComposite(DWaveSampler(solver={'qpu': True}))
            sampleset = sampler.sample_qubo(Q, num_reads=num_reads,chain_strength = chain_strength)
        elif ( useHyb ): 
            print("Solving using the LeapHybridSolver...")
            sampler = LeapHybridSampler()
            sampleset = sampler.sample(bqm, time_limit = time_limit)
        elif ( useNeal ): 
            print("Solving using the SimulatedAnnealing...")
            sampler = neal.SimulatedAnnealingSampler()
            sampleset = sampler.sample(bqm, num_reads = num_reads)
        else:
            print("Solving using the TabuSampler...")
            sampler = TabuSampler()
            sampleset = sampler.sample(bqm, num_reads = num_reads)

        self.sampleset = sampleset
        
        count = 0
        for res in self.sampleset.data(): count += 1
        
        return (count)
예제 #6
0
파일: sixx.py 프로젝트: sploiber/inspector
from dijkstra_solver import dijkstra_solver
import networkx as nx
from dimod import BinaryQuadraticModel
import dimod
import dwave.inspector
from dwave.system import DWaveSampler, EmbeddingComposite

G = nx.Graph()
G.add_weighted_edges_from([('S', 'a', 4.0), ('a', 'b', 7.0), ('b', 'G', 2.0),
                           ('G', 'd', 3.0), ('c', 'd', 8.0), ('c', 'S', 6.0),
                           ('c', 'b', 9.0)])
lagrange = 20
chainstrength = 20
numruns = 1000

model = dijkstra_solver(G, 'S', 'G', lagrange)
Q = model.to_qubo()
bqm = BinaryQuadraticModel.from_qubo(Q[0], offset=Q[1])
sampler = EmbeddingComposite(DWaveSampler())
sampleset = sampler.sample(bqm,
                           chain_strength=chainstrength,
                           num_reads=numruns)
dwave.inspector.show(bqm, sampleset, sampler)
예제 #7
0
def doMaxCut(G,
             numMaxCutUBNodes=1,
             numLBs=2,
             numMinCutUBNodes=1,
             maxLmtNodes=1024,
             numIterMaxCut=1,
             numIterMinCut=1,
             mode=SA):
    '''
        Do max cut.
        @param G: Graph model instance of networkx.
        @param numMaxCutUBNodes: Number of a max cut upper bound' nodes.
        @param numLBs: Number of low bounds set.
        @param numMinCutUBNodes: Number of a min cut upper bound' nodes.
        @param maxLmtNodes: Maximum number of nodes of a low bound.
        @param numIterMaxCut: Number of iteration for max cut.
        @param numIterMinCut: Number of iteration for min cut.
        @param mode: SA or QA.
    '''

    biVarVals = np.zeros((G.number_of_nodes()),
                         dtype=np.int64)  # Zero index based.

    # Make a node index map.
    i2n = list(G.nodes)  # Key order of dictionary. Nodes not sorted.
    n2i = dict()

    for i, n in enumerate(i2n):
        n2i[n] = i

    # Get low bounds via minimum cut.
    LBs, UBNodes = getLowBoundsViaSAQABasedMinCut(
        G,
        numMaxCutUBNodes=numMaxCutUBNodes,
        numLBs=numLBs,
        numMinCutUBNodes=numMinCutUBNodes,
        maxLmtNodes=maxLmtNodes,
        numIter=numIterMinCut,
        mode=mode)

    # Get indexes of the upper bound's values for each graph.
    ubIdxes = [[] for _ in range(len(LBs))]

    for ub in UBNodes:
        for i, g in enumerate(LBs):
            ubIdxes[i].append((np.asarray(list(
                g.nodes)) == ub).nonzero()[0][0])  # Nodes not sorted.

    biVarVals[[n2i[n] for n in UBNodes]] = 1

    # Conduct max cut.
    for i in range(numIterMaxCut):
        if mode == SA:
            for k, tG in enumerate(LBs):

                # Make a node index map.
                pi2n = list(
                    tG.nodes)  # Key order of dictionary. Nodes not sorted.
                pn2i = dict()

                for i, n in enumerate(pi2n):
                    pn2i[n] = i

                # Calculate Q for a vertex set.
                vSet = set(list(tG.nodes))
                Q = QUBO(len(vSet))

                # Apply objective and constraint conditions.
                # Objective
                vEdgeSet = set(list(G.getEdgesForNodes(list(vSet))))

                for i in range(len(vSet)):
                    for j in range(i + 1, len(vSet)):
                        q_ij = 1 if vEdgeSet.issuperset(set(
                            [(i, j)])) else 0  # i, j order?

                        # q_ij(x_i + x_j - 2x_ix_j).
                        Q.addCoeff(i, i, -1 * q_ij)
                        Q.addCoeff(j, j, -1 * q_ij)
                        Q.addCoeff(i, j, 2 * q_ij)

                # Constraint.
                if k > 0:
                    fixedVarsBeingOne \
                        = searchForFixedVarsAsOne(biVarVals, n2i, tG, LBs[k-1], G, ubIdxes[k])

                    for n in fixedVarsBeingOne:
                        Q.addCoeff(pn2i[n], pn2i[n], -1)
                        Q.addConstant(1)

                for ub in ubIdxes[k]:
                    Q.addCoeff(ub, ub, -1)
                    Q.addConstant(1)

                bqm = BinaryQuadraticModel.from_qubo(Q.getQDict(),
                                                     offset=Q.getOffset())

                print('Sample solutions via SA...')
                res = SimulatedAnnealingSampler().sample(bqm)
                res = res.record
                res = res.sample[res.energy == res.energy.min()]

                freq = {}
                for v in res:
                    freq[tuple(v)] = freq.get(tuple(v), 0) + 1

                maxFreqSol = list(freq)[np.argmax(
                    np.asarray(list(freq.values())))]

                # Update binary variable values.
                biVarVals[[
                    n2i[pi2n[idx - 1]] for idx, v in enumerate(maxFreqSol)
                    if v == 1
                ]] = 1  #?
        else:
            # Make a node index map.
            pi2n = list(tG.nodes)  # Key order of dictionary. Nodes not sorted.
            pn2i = dict()

            for i, n in enumerate(pi2n):
                pn2i[n] = i

            # Calculate Q for a vertex set.
            vSet = set(list(tG.nodes))
            Q = QUBO(len(vSet))

            # Apply objective and constraint conditions.
            # Objective
            vEdgeSet = set(list(G.getEdgesForNodes(list(vSet))))

            for i in range(len(vSet)):
                for j in range(i + 1, len(vSet)):
                    q_ij = 1 if vEdgeSet.issuperset(set(
                        [(i, j)])) else 0  # i, j order?

                    # q_ij(x_i + x_j - 2x_ix_j).
                    Q.addCoeff(i, i, -1 * q_ij)
                    Q.addCoeff(j, j, -1 * q_ij)
                    Q.addCoeff(i, j, 2 * q_ij)

            # Constraint.
            if k > 0:
                fixedVarsBeingOne \
                    = searchForFixedVarsAsOne(biVarVals, n2i, tG, LBs[k-1], G, ubIdxes[k])

                for n in fixedVarsBeingOne:
                    Q.addCoeff(pn2i[n], pn2i[n], -1)
                    Q.addConstant(1)

            for ub in ubIdxes[k]:
                Q.addCoeff(ub, ub, -1)
                Q.addConstant(1)

            bqm = BinaryQuadraticModel.from_qubo(Q.getQDict(),
                                                 offset=Q.getOffset())

            print('Sample solutions via QA...')
            sampler = EmbeddingComposite(
                DWaveSampler(endpoint='https://cloud.dwavesys.com/sapi',
                             token='xxx',
                             solver='DW_2000Q_2_1'))
            res = sampler.sample(bqm, num_reads=10)
            res = res.record
            res = res.sample[res.energy == res.energy.min()]

            freq = {}
            for v in res:
                freq[tuple(v)] = freq.get(tuple(v), 0) + 1

            maxFreqSol = list(freq)[np.argmax(np.asarray(list(freq.values())))]

            # Update binary variable values.
            biVarVals[[
                n2i[pi2n[idx - 1]] for idx, v in enumerate(maxFreqSol)
                if v == 1
            ]] = 1  #?

        #print(range(len(biVarVals)))
        #print(biVarVals)

        # Rotate LBs and ubIdxes right.
        LBs = [LBs[-1]] + LBs[:-1]
        ubIdxes = [ubIdxes[-1]] + ubIdxes[:-1]

    # Get group1, group2.
    group_1 = []
    group_2 = []

    for i, bit in enumerate(biVarVals):
        if bit == 0:
            group_1.append(i + 1)
        else:
            group_2.append(i + 1)

    # Calculate a max cut value.
    GG1 = G.copy()
    GG2 = G.copy()
    GG1.remove_nodes_from(np.asarray(group_2) - 1)
    GG2.remove_nodes_from(np.asarray(group_1) - 1)

    maxCutVal = calMaxCutVal([GG1, GG2], G)
    #print('Max cut value: ', maxCutVal)

    return group_1, group_2, maxCutVal
예제 #8
0
def getLowBoundsViaSAQABasedMinCut(G,
                                   numMaxCutUBNodes=1,
                                   numLBs=2,
                                   numMinCutUBNodes=1,
                                   maxLmtNodes=1024,
                                   numIter=1,
                                   mode=SA):
    '''
        Get low bounds via SA or QA based minimum cut.
        @param G: Graph model instance of networkx.
        @param numMaxCutUBNodes: Number of a max cut upper bound' nodes.
        @param numLBs: Number of low bounds set.
        @param numMinCutUBNodes: Number of a min cut upper bound' nodes.
        @param maxLmtNodes: Maximum number of nodes of a low bound.
        @param numIter: Number of iteration.
        @param mode: SA or QA.
    '''

    # Get an upper bound. Exception?
    maxCutUBNodes = []
    degrees = np.asarray(list(G.degree.values()))
    numNodes = len(degrees)

    for i in range(numMaxCutUBNodes):
        ubNode = degrees[np.argmax(degrees[:, 1]), 0]
        maxCutUBNodes.append(ubNode)
        degrees = degrees[degrees[:, 0] != ubNode]

    # Check exception. Exception?
    if numLBs == 1:
        maxCutLBs = [G]
        return maxCutLBs, maxCutUBNodes

    if (numNodes // numLBs + numNodes % numLBs \
        + numMaxCutUBNodes * numLBs) > maxLmtNodes:
        raise ValueError(
            'numNodes // numLBs + numNodes % numLBs + numMaxCutUBNodes * numLBs <= maxLmtNodes'
        )

    tG = G.copy()  # Graph without an upper bound's nodes.
    tG.remove_nodes_from(maxCutUBNodes)

    # Separate a graph into low bound graphs via digital annealing based minimum cut.
    # Get low bound graphs for minimum cut.
    minCutLBs = []
    numAssignedNodes = numNodes // numLBs - numMaxCutUBNodes  #?

    # Separate a graph randomly.
    for i in range(numLBs - 1):
        ttG = tG.copy()
        nodes1 = np.random.choice(np.asarray(list(tG.nodes)),
                                  numAssignedNodes,
                                  replace=False)
        tG.remove_nodes_from(nodes1)
        nodes2 = list(tG.nodes)
        ttG.remove_nodes_from(nodes2)
        minCutLBs.append(ttG)

    minCutLBs.append(tG)

    # Separate a graph according to the number of iteration
    # And get graphs having a minimum cut value.
    optMinCutLBs = []
    minCutValR = 1.0

    for i in range(numIter):
        for k in range(len(minCutLBs) - 1):

            # Get a pair of graphs.
            partMinCutLBs = [minCutLBs[k], minCutLBs[k + 1]]
            uG = Graph.union(partMinCutLBs[0], partMinCutLBs[1])

            # Get an upper bound. Exception?
            minCutUBNodes = []
            degrees = np.asarray(list(uG.degree.values()))
            numNodes = len(degrees)

            for _ in range(numMinCutUBNodes):
                ubNode = degrees[np.argmax(degrees[:, 1]), 0]
                minCutUBNodes.append(ubNode)
                degrees = degrees[degrees[:, 0] != ubNode]

            # Create a new pair of graphs.
            uuG = uG.copy()
            uuG.remove_nodes_from(minCutUBNodes)
            nodes1 = np.random.choice(np.asarray(list(uuG.nodes)),
                                      len(uuG.nodes) // 2,
                                      replace=False)
            uuG.remove_nodes_from(nodes1)
            nodes2 = np.asarray(list(uuG.nodes))

            partMinCutLBs[0] = uG.copy()
            partMinCutLBs[0].remove_nodes_from(nodes2)
            partMinCutLBs[1] = uG.copy()
            partMinCutLBs[1].remove_nodes_from(nodes1)

            # Conduct min cut.
            biVarVals = np.zeros((uG.number_of_nodes()),
                                 dtype=np.int64)  # Zero index based.

            # Make a node index map.
            i2n = list(uG.nodes)  # Key order of dictionary. Nodes not sorted.
            n2i = dict()

            for idx, n in enumerate(i2n):
                n2i[n] = idx

            # Get indexes of the upper bound's values for each graph.
            ubIdxes = [[], []]

            for ub in minCutUBNodes:
                ubIdxes[0].append((np.asarray(list(
                    partMinCutLBs[0].nodes)) == ub).nonzero()[0][0])
                ubIdxes[1].append((np.asarray(list(
                    partMinCutLBs[1].nodes)) == ub).nonzero()[0][0])

            biVarVals[[n2i[n] for n in minCutUBNodes]] = 1

            if mode == SA:
                for l, pG in enumerate(partMinCutLBs):

                    # Make a node index map.
                    pi2n = list(
                        pG.nodes)  # Key order of dictionary. Nodes not sorted.

                    vSet = set(list(pG.nodes))

                    # Calculate Q.
                    Q = QUBO(pG.number_of_nodes())

                    # Apply objective and constraint conditions.
                    # Objective
                    vEdgeSet = set(list(G.getEdgesForNodes(list(vSet))))

                    for i in range(len(vSet)):
                        for j in range(i + 1, len(vSet)):
                            q_ij = 1 if vEdgeSet.issuperset(set(
                                [(i, j)])) else 0  # i, j order?

                            # q_ij(x_i + x_j - 2x_ix_j).
                            Q.addCoeff(i, i, 1 * q_ij)
                            Q.addCoeff(j, j, 1 * q_ij)
                            Q.addCoeff(i, j, -2 * q_ij)

                    # Constraint.
                    '''
                    v1Vals = biVarVals[np.asarray(v1Set)] #?
                    v1ValsNZIdxs = (v1Vals == 1).nonzero()[0]
                    
                    for i in v1ValsNZIdxs:
                        Q1.addCoeff(i, i, 1)
                    '''
                    for i in range(len(vSet)):
                        Q.addCoeff(i, i, -1 * (2 * int(len(vSet) / 2) - 1))

                    for i in range(len(vSet)):
                        for j in range(i + 1, len(vSet)):
                            Q.addCoeff(i, j, 2)

                    Q.addConstant(int(np.power(len(vSet) / 2, 2)))

                    for ub in ubIdxes[l]:
                        Q.addCoeff(ub, ub, -1)
                        Q.addConstant(1)

                    bqm = BinaryQuadraticModel.from_qubo(Q.getQDict(),
                                                         offset=Q.getOffset())

                    print('Sample solutions via SA...')
                    res = SimulatedAnnealingSampler().sample(bqm)
                    res = res.record
                    res = res.sample[res.energy == res.energy.min()]

                    freq = {}
                    for v in res:
                        freq[tuple(v)] = freq.get(tuple(v), 0) + 1

                    maxFreqSol = list(freq)[np.argmax(
                        np.asarray(list(freq.values())))]

                    # Update binary variable values.
                    biVarVals[[
                        n2i[pi2n[idx]] for idx, v in enumerate(maxFreqSol)
                        if v == 1
                    ]] = 1  #?
            else:
                for l, pG in enumerate(partMinCutLBs):

                    # Make a node index map.
                    pi2n = list(
                        pG.nodes)  # Key order of dictionary. Nodes not sorted.

                    vSet = set(list(pG.nodes))

                    # Calculate Q.
                    Q = QUBO(pG.number_of_nodes())

                    # Apply objective and constraint conditions.
                    # Objective
                    vEdgeSet = set(list(G.getEdgesForNodes(list(vSet))))

                    for i in range(len(vSet)):
                        for j in range(i + 1, len(vSet)):
                            q_ij = 1 if vEdgeSet.issuperset(set(
                                [(i, j)])) else 0  # i, j order?

                            # q_ij(x_i + x_j - 2x_ix_j).
                            Q.addCoeff(i, i, 1 * q_ij)
                            Q.addCoeff(j, j, 1 * q_ij)
                            Q.addCoeff(i, j, -2 * q_ij)

                    # Constraint.
                    '''
                    v1Vals = biVarVals[np.asarray(v1Set)] #?
                    v1ValsNZIdxs = (v1Vals == 1).nonzero()[0]
                    
                    for i in v1ValsNZIdxs:
                        Q1.addCoeff(i, i, 1)
                    '''
                    for i in range(len(vSet)):
                        Q.addCoeff(i, i, -1 * (2 * int(len(vSet) / 2) - 1))

                    for i in range(len(vSet)):
                        for j in range(i + 1, len(vSet)):
                            Q.addCoeff(i, j, 2)

                    Q.addConstant(int(np.power(len(vSet) / 2, 2)))

                    for ub in ubIdxes[l]:
                        Q.addCoeff(ub, ub, -1)
                        Q.addConstant(1)

                    bqm = BinaryQuadraticModel.from_qubo(Q.getQDict(),
                                                         offset=Q.getOffset())

                    print('Sample solutions via QA...')
                    sampler = EmbeddingComposite(
                        DWaveSampler(
                            endpoint='https://cloud.dwavesys.com/sapi',
                            token='xxx',
                            solver='DW_2000Q_2_1'))
                    res = sampler.sample(bqm, num_reads=10)
                    res = res.record
                    res = res.sample[res.energy == res.energy.min()]

                    freq = {}
                    for v in res:
                        freq[tuple(v)] = freq.get(tuple(v), 0) + 1

                    maxFreqSol = list(freq)[np.argmax(
                        np.asarray(list(freq.values())))]

                    # Update binary variable values.
                    biVarVals[[
                        n2i[pi2n[idx]] for idx, v in enumerate(maxFreqSol)
                        if v == 1
                    ]] = 1  #?

            #print(range(len(biVarVals)))
            #print(biVarVals)

            # Get group1, group2.
            group_1 = []
            group_2 = []

            for idx, bit in enumerate(biVarVals):
                if bit == 0:
                    group_1.append(i2n[idx])
                else:
                    group_2.append(i2n[idx])

            minCutLBs[k] = uG.copy()
            minCutLBs[k].remove_nodes_from(group_2)
            minCutLBs[k + 1] = uG.copy()
            minCutLBs[k + 1].remove_nodes_from(group_1)

        # Calculate a min cut value.
        tG = G.copy()  # Graph without an upper bound's nodes.
        tG.remove_nodes_from(maxCutUBNodes)

        minCutVal = calMinCutVal(minCutLBs, tG)
        #print(minCutVal)

        # Select LBs with a less minimum cut value.
        if minCutVal[1] < minCutValR:
            optMinCutLBs = minCutLBs
            minCutValR = minCutVal[1]

        # Rotate minCutLBs right.
        minCutLBs = [minCutLBs[-1]] + minCutLBs[:-1]

    minCutLBs = optMinCutLBs

    # Adjust the number of a low bound's nodes into <= maxLmtNodes - numMaxCutUBNodes.
    for k, g in enumerate(minCutLBs):

        # Check the number of a low bound's nodes.
        if len(list(g.nodes)) + numMaxCutUBNodes > maxLmtNodes:
            numRemoveNodes = maxLmtNodes - (len(list(g.nodes)) +
                                            numMaxCutUBNodes)
        else:
            continue

        # Remove nodes randomly.
        rNodes = np.random.choice(np.asarray(list(g.nodes)),
                                  numRemoveNodes,
                                  replace=False)
        g.remove_nodes_from(rNodes)

    # Add maxCutUBNodes to each minCutLB.
    LBs = []
    for g in minCutLBs:
        tG = G.copy()
        tG.remove_nodes_from(list(g.nodes) + maxCutUBNodes)
        ttG = G.copy()
        ttG.remove_nodes_from(list(tG.nodes))
        LBs.append(ttG)  # Exception?

    for k, g in enumerate(LBs):
        print(k, len(g.nodes))  #?

    return LBs, maxCutUBNodes
예제 #9
0
        ind = get_index(nurse, day)
        Q[ind, ind] += lagrange_soft_nurse * (preference ** 2 - (2 * min_duty_days * preference))

# Off-diagonal terms in soft nurse constraint
# Include only the same nurse, across days
for nurse in range(n_nurses):
    for day1 in range(n_days):
        for day2 in range(day1 + 1, n_days):

            ind1 = get_index(nurse, day1)
            ind2 = get_index(nurse, day2)
            Q[ind1, ind2] += 2 * lagrange_soft_nurse * preference ** 2

# Solve the problem, and use the offset to scale the energy
e_offset = (lagrange_hard_shift * n_days * workforce ** 2) + (lagrange_soft_nurse * n_nurses * min_duty_days ** 2)
bqm = BinaryQuadraticModel.from_qubo(Q, offset=e_offset)
sampler = LeapHybridSampler()
results = sampler.sample(bqm)

# Get the results
smpl = results.first.sample
energy = results.first.energy
print("Size ", size)
print("Energy ", energy)


# Check the results by doing the sums directly
# J sum
sum_j = 0
for i in range(size):
    for j in range(size):
예제 #10
0
def main(token=None, n_nurses=3, n_days=11, nurses_per_day=1):
    """
    Takes a number of nurses, a number of days, and a wanted capacity
    of nurses per day.
    Also takes in a Dwave Authentication Token.
    Returns its best try at assigning nurses properly to the schedule required.
    One can think of the output as being a grid of size row x col,
    with a desired number of Xs in any column, while avoiding neighbouring
    Xs in any row.

    This runs on the Dwave Hybrid Sampler.
    The solution will be fast and good, but relatively expensive on Dwave
    allowed minutes.

    This code includes an implementation of the algorithm described in Ikeda,
    K., Nakamura, Y. & Humble, T.S. Application of Quantum Annealing to Nurse
    Scheduling Problem. Sci Rep 9, 12837 (2019).
    `DOI: 10.1038/s41598-019-49172-3 <https://doi.org/10.1038/s41598-019-49172-3>`_,
    © The Author(s) 2019, use of
    which is licensed under a Creative Commons Attribution 4.0 International
    License

    :param n_nurses: The number of nurses as an integer (number of rows in solution)

    :param n_days: The number of days as an integer (number of columns in solution)

    :param nurses_per_day: The number of desired nurses per day, an integer
        (desired number of Xs in any given column)

    :param token: The Dwave token to be used.
        This should be a string, in the format used on the dwave leap website.

    :return: Returns a dictionary. Keys supported:

        * "Size" a string describing the problem size
        * "Energy" a string describing the energy of the solution
        * "HardNurseConstraint" a string describing the hard nurse constraint energy
        * "HardShiftConstraint" a string describing the hard shift constraint energy
        * "n_days" an integer - the number of days (columns) of the solution
        * "n_nurses" an integer - the number of nurses (rows) of the solution
        * "schedule" - A 2-dimensional array of integers. Lists the exact days each nurse works (The marked columns for each row).
    
    """
    # Overall model variables: problem size
    # binary variable q_nd is the assignment of nurse n to day d
    n_nurses = n_nurses  # count nurses n = 1 ... n_nurses
    n_days = n_days  # count scheduling days as d = 1 ... n_days
    size = n_days * n_nurses

    print("solving with " + str(n_nurses) + " and " + str(n_days) +
          " nurses and days")

    # Parameters for hard nurse constraint
    # a is a positive correlation coefficient for implementing the hard nurse
    # constraint - value provided by Ikeda, Nakamura, Humble
    a = 3.5

    # Parameters for hard shift constraint
    # Hard shift constraint: at least one nurse working every day
    # Lagrange parameter, for hard shift constraint, on workforce and effort
    lagrange_hard_shift = 1.3
    # workforce = 1     # Workforce function W(d) - set to a constant for now
    workforce = nurses_per_day  # Workforce function W(d)
    effort = 1  # Effort function E(n) - set to a constant for now

    # Parameters for soft nurse constraint
    # Soft nurse constraint: all nurses should have approximately even work
    #                        schedules
    # Lagrange parameter, for shift constraints, on work days is called gamma
    # in the paper
    # Minimum duty days 'min_duty_days' - the number of work days that each
    # nurse wants
    # to be scheduled. At present, each will do the minimum on average.
    # The parameter gamma's value suggested by Ikeda, Nakamura, Humble
    lagrange_soft_nurse = 0.3  # Lagrange parameter for soft nurse, gamma
    preference = 1  # preference function - constant for now
    # min_duty_days = int(n_days/n_nurses)
    min_duty_days = int(n_days * workforce / n_nurses)

    # Find composite index into 1D list for (nurse_index, day_index)
    def get_index(nurse_index, day_index):
        return nurse_index * n_days + day_index

    # Inverse of get_index - given a composite index in a 1D list, return the
    # nurse_index and day_index
    def get_nurse_and_day(index):
        nurse_index, day_index = divmod(index, n_days)
        return nurse_index, day_index

    # Hard nurse constraint: no nurse works two consecutive days
    # It does not have Lagrange parameter - instead, J matrix
    # symmetric, real-valued interaction matrix J, whereas all terms are
    # a or zero.
    # composite indices i(n, d) and j(n, d) as functions of n and d
    # J_i(n,d)j(n,d+1) = a and 0 otherwise.
    J = defaultdict(int)
    for nurse in range(n_nurses):
        for day in range(n_days - 1):
            nurse_day_1 = get_index(nurse, day)
            nurse_day_2 = get_index(nurse, day + 1)
            J[nurse_day_1, nurse_day_2] = a

    # Q matrix assign the cost term, the J matrix
    Q = deepcopy(J)

    # Hard shift constraint: at least one nurse working every day
    # The sum is over each day.
    # This constraint tries to make (effort * sum(q_i)) equal to workforce,
    # which is set to a constant in this implementation, so that one nurse
    # is working each day.
    # Overall hard shift constraint:
    # lagrange_hard_shift * sum_d ((sum_n(effort * q_i(n,d)) - workforce) ** 2)
    #
    # with constant effort and constant workforce:
    # = lagrange_hard_shift * sum_d ( effort * sum_n q_i(n,d) - workforce ) ** 2
    # = lagrange_hard_shift * sum_d [ effort ** 2 * (sum_n q_i(n,d) ** 2)
    #                              - 2 effort * workforce * sum_n q_i(n,d)
    #                              + workforce ** 2 ]
    # The constant term is moved to the offset, below, right before we solve
    # the QUBO
    #
    # Expanding and merging the terms ( m is another sum over n ):
    # lagrange_hard_shift * (effort ** 2 - 2 effort * workforce) *
    # sum_d sum_n q_i(n,d)
    # + lagrange_hard_shift * effort ** 2 * sum_d sum_m sum_n q_i(n,d) q_j(m, d) #

    # Diagonal terms in hard shift constraint, without the workforce**2 term
    for nurse in range(n_nurses):
        for day in range(n_days):
            ind = get_index(nurse, day)
            Q[ind, ind] += lagrange_hard_shift * (effort**2 -
                                                  (2 * workforce * effort))

    # Off-diagonal terms in hard shift constraint
    # Include only the same day, across nurses
    for day in range(n_days):
        for nurse1 in range(n_nurses):
            for nurse2 in range(nurse1 + 1, n_nurses):

                ind1 = get_index(nurse1, day)
                ind2 = get_index(nurse2, day)
                Q[ind1, ind2] += 2 * lagrange_hard_shift * effort**2

    # Soft nurse constraint: all nurses should have approximately even work
    #                        schedules
    # This constraint tries to make preference * sum(q_i) equal to min_duty_days,
    # so that the nurses have the same number of days. The sum of the q_i,
    # over the number of days, is each nurse's number of days worked in the
    # schedule.
    # Overall soft nurse constraint:
    # lagrange_soft_nurse * sum_n ((sum_d(preference * q_i(n,d)) - min_duty_days) ** 2)
    # with constant preference and constant min_duty_days:
    # = lagrange_soft_nurse * sum_n ( preference * sum_d q_i(n,d) - min_duty_days ) ** 2
    # = lagrange_soft_nurse * sum_n [ preference ** 2 * (sum_d q_i(n,d) ** 2)
    #                              - 2 preference * min_duty_days * sum_d q_i(n,d)
    #                              + min_duty_days ** 2 ]
    # The constant term is moved to the offset, below, right before we solve
    # the QUBO
    #
    # The square of the the sum_d term becomes:
    # Expanding and merging the terms (d1 and d2 are sums over d):
    # = lagrange_soft_nurse * (preference ** 2 - 2 preference * min_duty_days) * sum_n sum_d q_i(n,d)
    # + lagrange_soft_nurse * preference ** 2 * sum_n sum_d1 sum_d2 q_i(n,d1)
    #                      * q_j(n, d2)

    # Diagonal terms in soft nurse constraint, without the min_duty_days**2 term
    for nurse in range(n_nurses):
        for day in range(n_days):
            ind = get_index(nurse, day)
            Q[ind,
              ind] += lagrange_soft_nurse * (preference**2 -
                                             (2 * min_duty_days * preference))

    # Off-diagonal terms in soft nurse constraint
    # Include only the same nurse, across days
    for nurse in range(n_nurses):
        for day1 in range(n_days):
            for day2 in range(day1 + 1, n_days):

                ind1 = get_index(nurse, day1)
                ind2 = get_index(nurse, day2)
                Q[ind1, ind2] += 2 * lagrange_soft_nurse * preference**2

    # Solve the problem, and use the offset to scale the energy
    e_offset = (lagrange_hard_shift * n_days * workforce**2) + (
        lagrange_soft_nurse * n_nurses * min_duty_days**2)
    bqm = BinaryQuadraticModel.from_qubo(Q, offset=e_offset)

    endpoint = 'https://cloud.dwavesys.com/sapi/'
    client = 'qpu'
    #solver = 'DW_2000Q_5' # Use this to specify a solver, but leave commented out to let D-Wave's system autochoose a solver
    try:
        qpu_sampler = DWaveSampler(client=client,
                                   endpoint=endpoint,
                                   token=token)
        #solver=solver)
    except:
        return {'error': 'Token not accepted'}
    sampler = LeapHybridSampler(token=token)
    results = sampler.sample(bqm, qpu_sampler=qpu_sampler)

    # Get the results
    ret_value = dict()
    smpl = results.first.sample
    energy = results.first.energy
    print("Size ", size)
    print("Energy ", energy)

    ret_value["Size"] = "Size " + str(size)
    ret_value["Energy"] = "Energy " + str(energy)

    # Check the results by doing the sums directly
    # J sum
    # sum_j = 0
    # for i in range(size):
    #     for j in range(size):
    #         sum_j += J[i, j] * smpl[i] * smpl[j]
    # print("Checking Hard nurse constraint ", sum_j)

    # Removed that method, as it created too many empty elements in J
    # This one only iterates over non-zero values in J

    sum_j = 0
    for (i, j), val in J.items():
        sum_j += val * smpl[i] * smpl[j]
    print("Checking Hard nurse constraint ", sum_j)

    ret_value["HardNurseConstraint"] = "Checking Hard nurse constraint " + str(
        sum_j)

    # workforce sum
    sum_w = 0
    for d in range(n_days):
        sum_n = 0
        for n in range(n_nurses):
            sum_n += effort * smpl[get_index(n, d)]
        sum_w += lagrange_hard_shift * (sum_n - workforce) * (sum_n -
                                                              workforce)
    print("Checking Hard shift constraint ", sum_w)

    ret_value["HardShiftConstraint"] = "Checking Hard shift constraint " + str(
        sum_w)

    # min_duty_days sum
    sum_f = 0
    for n in range(n_nurses):
        sum_d = 0
        for d in range(n_days):
            sum_d += preference * smpl[get_index(n, d)]
        sum_f += lagrange_soft_nurse * (sum_d - min_duty_days) * (
            sum_d - min_duty_days)
    print("Checking Soft nurse constraint ", sum_f)

    # Graphics
    sched = [get_nurse_and_day(j) for j in range(size) if smpl[j] == 1]
    str_header_for_output = " " * 11
    str_header_for_output += "  ".join(map(str, range(n_days)))
    print(str_header_for_output)

    schedule_mat = np.zeros((n_nurses, n_days))
    for n in range(n_nurses):
        str_row = ""
        for d in range(n_days):
            if (n, d) in sched:
                outcome = "X"
                schedule_mat[n, d] = 1
            else:
                outcome = " "

            if d > 9:
                outcome += " "
            str_row += "  " + outcome
        print("Nurse ", n, str_row)

    ret_value["n_days"] = n_days
    ret_value["n_nurses"] = n_nurses
    ret_value["schedule"] = [
        np.where(row == 1)[0].tolist() for row in schedule_mat
    ]
    # print(ret_value)
    return ret_value
예제 #11
0
import dwave.inspector

# Scelgo il gamma in maniera tale che la soluzione x1 x2 sa la energia piu bassa
gamma = 21

# Definisco il problema, facendo una matrice dei coeff.
#min[15x1 + 20x2 + 25x3 + gamma(x1+x2+x3-2)^2] -->
# (15-3gamma)x1 + (20-3gamma)x2 + (25-3gamma)x3 + 2gammax1x2 + 2gammax1x3 + 2gammax2x3 +4gamma
Q = {
    ('x1', 'x1'): 15 - 3 * gamma,
    ('x2', 'x2'): 20 - 3 * gamma,
    ('x3', 'x3'): 25 - 3 * gamma,
    ('x1', 'x2'): 2 * gamma,
    ('x1', 'x3'): 2 * gamma,
    ('x2', 'x3'): 2 * gamma
}

# NOTA che ho messo l'offset ossia il termine noto
# Convert the problem to a BQM
bqm = BinaryQuadraticModel.from_qubo(Q, offset=gamma * 4.0)

# Define the sampler that will be used to run the problem
sampler = EmbeddingComposite(DWaveSampler(solver={'qpu': True}))

# Ho dovuto mettere 100 estrazioni perche ho energie vicine e quindi senno
# cado nel buco sbagliato
# Run the problem on the sampler and print the results
sampleset = sampler.sample(bqm, num_reads=100)
print(sampleset)

dwave.inspector.show(sampleset)
예제 #12
0
# --------------------------------------------------------------------------#

# This program demonstrates a basic Ocean program that runs a QUBO problem on
# the D-Wave QPU as a binary quadratic model (BQM).

# --------------------------------------------------------------------------#

# Import the functions and packages that are used
from dwave.system import EmbeddingComposite, DWaveSampler
from dimod import BinaryQuadraticModel

# Define the problem as a Python dictionary and convert it to a BQM
Q = {('B','B'): 1,
    ('K','K'): 1,
    ('A','C'): 2,
    ('A','K'): -2,
    ('B','C'): -2}

# Convert the problem to a BQM
bqm = BinaryQuadraticModel.from_qubo(Q)

# Define the sampler that will be used to run the problem
sampler = EmbeddingComposite(DWaveSampler())

# Run the problem on the sampler and print the results
sampleset = sampler.sample(bqm,
                           num_reads = 10,
                           label='Example - Simple Ocean Programs: BQM')
print(sampleset)
Q[(1, 3)] = 72
Q[(2, 2)] = -87
Q[(2, 3)] = 72
Q[(3, 3)] = -89

print("\nQUBO:\n")
for i in range(1, 4):
    row = ''
    for j in range(1, 4):
        if (i, j) in Q:
            row += str(Q[(i, j)]) + '\t'
        else:
            row += str(0) + '\t'
    print(row)

qubo_model = BQM.from_qubo(Q)
ising_model = qubo_model.to_ising()

# Pause for the user to hint <enter> to continue
input()
print("\nConverting QUBO to Ising ...")

print("\nIsing:\n")

for i in range(1, 4):
    row = ''
    for j in range(1, 4):
        if j < i:
            row += str(0) + '\t'
        elif j == i:
            row += str(ising_model[0][i]) + '\t'
예제 #14
0
        topology = 'pegasus'  # 'chimera' or 'pegasus'
        sampler = DWaveSampler(solver={
            'topology__type': topology,
            'qpu': True
        })

        embedding = find_embedding(Q.keys(), sampler.edgelist)
        embeddedQ = embed_qubo(Q, embedding, sampler.adjacency)

        ### Energy offset
        ### エネルギー オフセット
        e_offset = lagrange_hard_shift * days * workforce(1)**2
        e_offset += lagrange_soft_nurse * nurses * duty_days**2

        ### BQM
        bqm = BinaryQuadraticModel.from_qubo(embeddedQ, offset=e_offset)
        sbqm = BinaryQuadraticModel.from_qubo(Q, offset=e_offset)

        # Sample solution
        # 解をサンプリングします
        print("Connected to {}. N = {}, D = {}".format(sampler.solver.id,
                                                       nurses, days))
        results = sampler.sample(bqm, num_reads=numSampling)
        samples = unembed_sampleset(results,
                                    embedding,
                                    sbqm,
                                    chain_break_fraction=True)

        ### Save data with pickle for analysis and reverse annealing
        ### 結果分析と逆アニーリングのため pickle を用いてデータを保存します
        fout = "results_%s_N%d_D%d_s%d.p" % (topology, nurses, days,
예제 #15
0
# Construct an automatic embedding over the machine architecture.
_, edgelist, adjacency = sampler.structure
from minorminer import find_embedding
embedding = find_embedding(qubo, edgelist, random_seed=0) # random_seed=0 - which ensures that the same embedding is always generated.


if manual_embed:
	# Pick the method for fixing broken chains.
	from dwave.embedding.chain_breaks import majority_vote # weighted_random
	method = majority_vote
	# Submit the job via an embedded BinaryQuadraticModel.
	from dimod import BinaryQuadraticModel as BQM
	from dwave.embedding import embed_bqm, unembed_sampleset
	# Generate a BQM from the QUBO.
	q = BQM.from_qubo(qubo)
	# Embed the BQM onto the target structure.
	embedded_q = embed_bqm(q, embedding, adjacency) # chain_strength=chain_strength, smear_vartype=dimod.SPIN
	# Collect the sample output.
	response = unembed_sampleset(
	   sampler.sample(embedded_q, num_reads=num_samples),
	   embedding, q, chain_break_method=method,
	   chain_break_fraction=True)
else:
	# Use a FixedEmbeddingComposite if we don't care about chains.
	from dwave.system.composites import FixedEmbeddingComposite
	system_composite = FixedEmbeddingComposite(sampler, embedding)
	response = system_composite.sample_qubo(qubo, num_reads=num_samples)


constant = 0
    def solve(self, R, qubo, samples, exact, verbose, useQPU, useHyb, useNeal,
              useTabu):

        use_QUBO = qubo

        # We obtain the calculations performed at load time
        c_coeffs = self.get_qubo_coeffs(R)

        c_a = c_coeffs['c_a']
        c_b = c_coeffs['c_b']
        c_c = c_coeffs['c_c']
        a1 = c_coeffs['a1']
        a2 = c_coeffs['a2']
        a3 = c_coeffs['a3']

        g_values = self.get_g_values(R)
        g0 = g_values['g0']
        g1 = g_values['g1']
        g2 = g_values['g2']
        g3 = g_values['g3']
        g4 = g_values['g4']
        e = g_values['e']
        g3_addition = g_values['g3add']

        # Solve the equation. First solution is lowest energy
        if use_QUBO:

            #Using QUBO
            Q = defaultdict(float)
            Q[0, 0] = c_a
            Q[0, 1] = c_b
            Q[1, 0] = c_b
            Q[1, 1] = c_a

            #Q = [(2 * a2 - 2 * a3, 2 * a3),(2 * a3,2 * a2 - 2 * a3 )]
            offset = 0

            if (useQPU):
                chain_strength = 4
                if (verbose == True):
                    print("Solving using the DWaveSampler on the QPU...")
                sampler = EmbeddingComposite(
                    DWaveSampler(solver={'qpu': True}))
                sampleset = sampler.sample_qubo(Q,
                                                num_reads=samples,
                                                chain_strength=chain_strength)
            elif (useHyb):
                if (verbose == True):
                    print("Solving using the LeapHybridSolver...")
                time_limit = 3
                bqm = BinaryQuadraticModel.from_qubo(Q, offset=offset)
                sampler = LeapHybridSampler()
                sampleset = sampler.sample(bqm, time_limit=time_limit)
            elif (useNeal):
                if (verbose == True):
                    print("Solving using the Leap SimulatedAnnealing...")
                bqm = BinaryQuadraticModel.from_qubo(Q, offset=offset)
                sampler = neal.SimulatedAnnealingSampler()
                sampleset = sampler.sample(bqm, num_reads=samples)
            else:
                if (verbose == True): print("Solving using the TabuSampler...")
                sampler = TabuSampler()
                bqm = BinaryQuadraticModel.from_qubo(Q, offset=offset)
                sampleset = sampler.sample(bqm, num_reads=samples)

            if (verbose == True): print(sampleset.first.sample)
            if (verbose == True): print(sampleset)

            # Step 3: Get x0 and x1 for first energy result
            for set in sampleset.data():
                x0 = set.sample[0]
                x1 = set.sample[1]
                energy = set.energy
                if (verbose == True): print("x0,x1,ener : ", x0, x1, energy)
                break

            H_b = self.get_energy_from_binary_spins(R, x0, x1)

            Y = 4 * x0 * x1 + (2 * a2 - 2 * a3) * x0 + (
                2 * a2 - 2 * a3) * x1 + a3 - 2 * a2 + a1

            # convert x0,x1 to ising spins
            sz0 = (2 * x0) - 1
            sz1 = (2 * x1) - 1

        else:

            # Using SPIN (ising): H = h_1 * s_1 + h_2 * s_2 + J_{1,2} * s_1 *s_2
            sampler = TabuSampler()
            response = sampler.sample_ising({
                'a': c_a,
                'b': c_a
            }, {('a', 'b'): c_b},
                                            num_reads=samples)

            if (verbose == True): print(response)

            for set in response.data():
                sz0 = set.sample['a']
                sz1 = set.sample['b']
                energy = set.energy
                if (verbose == True):
                    print("sz0,sz1,ener : ", sz0, sz1, energy)
                break

            H_b = self.get_energy_from_ising_spins(R, sz0, sz1)

            # Step 4: Calculate Y = ( a1 + a2( sz0 + sz1 ) + a3 (sz0*sz1))
            Y = (a1 + a2 * (sz0 + sz1) + a3 * (sz0 * sz1))

            # Convert to get x0,x1
            x0 = (sz0 + 1) / 2
            x1 = (sz1 + 1) / 2

        # Get hx1 and hx2 in :
        # hx**2 + 2*g3*hx = Y
        # a = 1, b = 2g3, c = -Y
        a = 1
        b = 2 * g3
        c = -Y

        #print("a,b,c,b**2-4*a*c : ", a,b,c,b**2-4*a*c)

        # Solve H1 for x (minimum of the two possibilities)
        hx1 = (-b + np.sqrt(b**2 - 4 * a * c)) / (2 * a)
        hx2 = (-b - np.sqrt(b**2 - 4 * a * c)) / (2 * a)

        if (hx2 < hx1):
            swp = hx2
            hx2 = hx1
            hx1 = swp

        # Add g3_addition to hx1
        hx1 += g3_addition
        H0_ver = (g1 * sz0) + (g2 * sz1) + (g3 * sz0 * sz1)
        H0 = hx1
        H = H0 + g0

        assert (H_b == H)

        return (H)