def connect_to_dwave(): """ Establish a connection to the D-Wave, and use this to talk to a solver. We rely on the qOp infrastructure to set the environment variables properly. """ try: url = os.environ["DW_INTERNAL__HTTPLINK"] token = os.environ["DW_INTERNAL__TOKEN"] try: proxy = os.environ["DW_INTERNAL__HTTPPROXY"] except KeyError: proxy = "" conn = RemoteConnection(url, token, proxy) except KeyError: url = "<local>" token = "<N/A>" conn = local_connection except IOError as e: qmasm.abend("Failed to establish a remote connection (%s)" % e) try: qmasm.solver_name = os.environ["DW_INTERNAL__SOLVER"] except: # Solver was not specified: Use the first available solver. qmasm.solver_name = conn.solver_names()[0] try: qmasm.solver = conn.get_solver(qmasm.solver_name) except KeyError: qmasm.abend("Failed to find solver %s on connection %s" % (qmasm.solver_name, url))
def sign_in(): global solver global adj print('Connecting to DWave') remote_connection = RemoteConnection(url, token) solver = remote_connection.get_solver(solver_name) adj = list(get_hardware_adjacency(solver)) print('Connected to DWave')
def connectSolver(solver_name): ''' Connects to solver. Parameters ---------- solver_name: str. Name of solver. Can be 'NASA','ISI',or 'DW'. Returns: -------- solver: DW SAPI solver object ''' # connect to solver if solver_name == 'NASA': url = 'https://qfe.nas.nasa.gov/sapi' token = '' remote_connection = RemoteConnection(url, token) solver = remote_connection.get_solver('C16') elif solver_name == 'ISI': url = 'https://usci.qcc.isi.edu/sapi' token = '' remote_connection = RemoteConnection(url, token) solver = remote_connection.get_solver('DW2X') elif solver_name == 'DW': url = 'https://cloud.dwavesys.com/sapi' token = '' remote_connection = RemoteConnection(url, token) solver = remote_connection.get_solver('DW_2000Q_2_1') else: NameError('Unrecognized solver name') return solver
def __init__(self, solver_name, url, token, proxy_url=None): dimod.TemplateSampler.__init__(self) if proxy_url is None: self.connection = connection = RemoteConnection(url, token) else: self.connection = connection = RemoteConnection( url, token, proxy_url) self.solver = solver = connection.get_solver(solver_name) edges = get_hardware_adjacency(solver) self.structure = (set().union(*edges), edges)
def test_async_bad_retry(self): from dwave_sapi2.remote import RemoteConnection from dwave_sapi2.core import async_solve_qubo, await_completion # get a solver solver = RemoteConnection().get_solver(solver_name) Q = {(0, 5): -float('inf')} submitted_problem = async_solve_qubo(solver, Q, num_reads=10) # await_completion([submitted_problem], 1, float('inf')) self.assertEqual(submitted_problem.status()['remote_status'], RemoteConnection.STATUS_FAILED) self.assertEqual(submitted_problem.status()['state'], 'DONE') self.assertEqual(submitted_problem.status()['error_type'], 'SOLVE') # submitted_problem.retry() await_completion([submitted_problem], 1, float('inf')) self.assertEqual(submitted_problem.status()['remote_status'], RemoteConnection.STATUS_FAILED) self.assertEqual(submitted_problem.status()['state'], 'DONE') self.assertEqual(submitted_problem.status()['error_type'], 'SOLVE')
def test_remote_connection_example2(self): from dwave_sapi2.remote import RemoteConnection # define the url and a valid token # url = "http://myURL" # token = "myToken001" # solver_name = "solver_name" # create a remote connection using url and token remote_connection = RemoteConnection(url, token) # get a solver solver = remote_connection.get_solver(solver_name) # get solver's properties self.assertIsInstance(solver.properties, dict)
def test_async_retry(self): from dwave_sapi2.remote import RemoteConnection from dwave_sapi2.core import async_solve_qubo, await_completion # get a solver solver = RemoteConnection().get_solver(solver_name) Q = {(0, 5): -10} submitted_problem = async_solve_qubo(solver, Q, num_reads=10) self.assertEqual(submitted_problem.status()['remote_status'], None) self.assertEqual(submitted_problem.status()['state'], 'SUBMITTING') # Wait until solved await_completion([submitted_problem], 1, float('inf')) # display result self.is_answer(submitted_problem.result()) self.assertEqual(submitted_problem.status()['remote_status'], RemoteConnection.STATUS_COMPLETE) self.assertEqual(submitted_problem.status()['state'], 'DONE') submitted_problem.retry() self.assertEqual(submitted_problem.status()['remote_status'], None) self.assertEqual(submitted_problem.status()['state'], 'SUBMITTING') # Wait until solved await_completion([submitted_problem], 1, float('inf')) # display result self.is_answer(submitted_problem.result()) self.assertEqual(submitted_problem.status()['remote_status'], RemoteConnection.STATUS_COMPLETE) self.assertEqual(submitted_problem.status()['state'], 'DONE')
def test_solve_qubo_example(self): from dwave_sapi2.remote import RemoteConnection from dwave_sapi2.core import solve_qubo # get a solver solver = RemoteConnection().get_solver(solver_name) # solve qubo problem Q = {(0, 5): -10} params = {"num_reads": 10} answer_1 = solve_qubo(solver, Q, **params) self.is_answer(answer_1) answer_2 = solve_qubo(solver, Q, num_reads=10) self.is_answer(answer_2)
def test_async_solve_qubo_example(self): from dwave_sapi2.remote import RemoteConnection from dwave_sapi2.core import async_solve_qubo, await_completion # get a solver solver = RemoteConnection().get_solver(solver_name) Q = {(0, 5): -10} submitted_problem = async_solve_qubo(solver, Q, num_reads=10) # Wait until solved await_completion([submitted_problem], 1, float('inf')) # display result self.is_answer(submitted_problem.result())
def test_solve_ising_example(self): from dwave_sapi2.remote import RemoteConnection from dwave_sapi2.core import solve_ising # get a solver solver = RemoteConnection().get_solver(solver_name) # solve ising problem h = [1, -1, 1, 1, -1, 1, 1] J = {(0, 6): -10} params = {"num_reads": 10, "num_spin_reversal_transforms": 2} answer_1 = solve_ising(solver, h, J, **params) self.is_answer(answer_1) answer_2 = solve_ising(solver, h, J, num_reads=10) self.is_answer(answer_2)
def test_await_completion_example(self): from dwave_sapi2.remote import RemoteConnection from dwave_sapi2.core import async_solve_ising, await_completion # get a solver solver = RemoteConnection().get_solver(solver_name) h = [1, -1, 1, 1, -1, 1, 1] J = {(0, 6): -10} p1 = async_solve_ising(solver, h, J, num_reads=10) p2 = async_solve_ising(solver, h, J, num_reads=20) min_done = 2 timeout = 1.0 done = await_completion([p1, p2], min_done, timeout) if done: self.is_answer(p1.result()) self.is_answer(p2.result())
def runDW_batch(h, J, embedding, stop_point=0.25, num_reads=1000, coupling_init=1.0, coupling_increment=0.1, min_solver_calls=1, max_solver_calls=1000, method='vote', last=True, num_gauges=1, solver_name='NASA', returnProblems=True): ''' Submits an instance to DW as a batch. Note that when used, sometimes the solutions are markedly different than when use runDW (no batch). Generally using run_DW() seems to be a better idea Parameters ----- h : list of lists, with each list is a list of fields J : a list of dictionary, where keys are a tuple corresponding to the coupling. Should be the same length as h. embedding : a list of lists. Can use DW sapi to generate stop_point :float, default: 0.25. Stop increasing coupling strength when returns at least this fraction of solutions are unbroken. num_reads: int, default: 1000. The number of reads. coupling_init: float, default: 1.0. The initial value of coupling, the value of the ferromagnetic coupling between physical qubits. If number of unbroken of solutions is not at least stop_point, then the magnitude of coupling is incremented by coupling_increment. Note however, that the though we specify coupling_init as positive, the coupling is negative. For example, Suppose coupling_init=1.0, coupling_increment (defined below) is 0.1, and stop_point = 0.25. The initial physical ferromagnetic coupling strength will be -1.0. If stop_point isn't reached, coupling is incremented by 0.1, or in other words, the new chain strength is -1.1. coupling is incremented by coupling_increment until stop_point is reached. coupling_increment: float, default: 0.1. Increment of coupling strength, min_solver_calls: int, default: 1. The minimum number of solver calls. max_solver_calls: int, default: 1000. The maximum number of solver calls. method: str, 'minimize_energy', 'vote', or 'discard', default: 'minimize_energy' How to deal with broken chains. 'minimize_energy' uses the energy minimization decoding. 'vote' uses majority vote decoding. 'discard' discard broken chains. last: bool, default: True If True, return the last num_reads solutions. If False, return the first num_reads solutions. num_gauges: int, default: 1 Number of gauge transformations. solver_name: str, 'NASA', 'ISI', or 'DW', default: 'NASA' Which solver to use. 'NASA' uses NASA's DW2000Q. 'ISI' uses ISI's DW2X. 'DW' uses DW's DW2000Q. returnProblems: bool Determines what it returns. If True, return problems, new_emb. If False return solutions only Returns ------- if returnProblems is True, returns problems, new_emb (to be used with get_async_sols) problems: list list of problems from async_solve_ising new_emb: list list of embeddings returned from embed_problem if returnProblems is False, returns solutions sols: np array Array of solutions ''' meths = ['discard', 'vote', 'minimize_energy'] assert (method in meths) if solver_name == 'NASA': url = 'https://qfe.nas.nasa.gov/sapi' token = 'NASA-870f7ee194d029923ad8f9cd063de357ba53b838' remote_connection = RemoteConnection(url, token) solver = remote_connection.get_solver('C16') elif solver_name == 'ISI': url = 'https://usci.qcc.isi.edu/sapi' token = 'QUCB-089028555cb44b4f3da34cd4c6dd4a73ec859bc8' remote_connection = RemoteConnection(url, token) solver = remote_connection.get_solver('DW2X') elif solver_name == 'DW': url = 'https://cloud.dwavesys.com/sapi' token = 'usc-171bafd63a1b07635fd696db283ad4c28b820d14' remote_connection = RemoteConnection(url, token) solver = remote_connection.get_solver('DW_2000Q_2_1') else: NameError('Unrecognized solver name') A = get_hardware_adjacency(solver) h0 = [] j0 = [] jc = [] new_emb = [] for n in range(len(h)): (h0t, j0t, jct, new_embt) = embed_problem(h[0], J[0], embedding, A) maxjh = max(max(np.abs(h0t)), max(np.abs(j0t.values()))) h0t = [el / maxjh for el in h0t] j0t = {ij: v / maxjh for ij, v in zip(j0t.keys(), j0t.values())} h0.append(h0t) j0.append(j0t) jc.append(jct) new_emb.append(new_embt) ncalls = 0 sols = np.empty(len(h0), dtype=object) if isinstance(coupling_init, list): l = coupling_init else: l = [coupling_init] * len(h) print np.unique(l) kwargs = { 'num_reads': num_reads, 'num_spin_reversal_transforms': num_gauges, 'answer_mode': 'raw' } problem = [] for n in range(len(h0)): jct = dict.fromkeys(jc[n], -l[n]) emb_j = j0[n].copy() emb_j.update(jct) if solver_name == 'ISI': _check_wait() problem.append(async_solve_ising(solver, h0[n], emb_j, **kwargs)) await_completion(problem, len(h), 50000) if returnProblems: return problem, new_emb for n in range(len(h0)): answer = problem[n].result() sols[n] = np.array(unembed_answer(answer['solutions'], new_emb[n], broken_chains=method, h=h[n], j=J[n]), dtype=np.int8) # return problem,new_emb return np.array(sols)
def list_remote_solvers(): remote_connection = RemoteConnection(sys.argv[1], sys.argv[2]) solver_names = remote_connection.solver_names() print "solvers' names: ", solver_names
def test_remote_connection(self): from dwave_sapi2.remote import RemoteConnection remote_connection = RemoteConnection(url, token) remote_connection = RemoteConnection(url, token, proxy_url)
def connect_to_remote(): remote_connection = RemoteConnection(sys.argv[1], sys.argv[2]) solver = remote_connection.get_solver(sys.argv[3]) return solver
def dwave(pot, states): if pot['num vars'] > 0: solved = False const = 0 h_ = [] J_ = {} state = [] free_state = [] embedding = [] while not solved: try: #global solver #global adj #if solver == 0: sign_in() #should try to make it so there is a pool of pool_size connections that the various threads can use remote_connection = RemoteConnection(url, token) solver = remote_connection.get_solver(solver_name) adj = list(get_hardware_adjacency(solver)) if 'embedding' in pot: const, h_, j, prob_adj = dwave_prepare(pot) embedding = pot['embedding'] else: # if we're doing a new embedding for each f -> v in state i message, then we'll have frozen a variable # so we need to remap the variables, since otherwise the h will have a 0 for this variable, but the embedding won't consider it map_vars(pot) const, h_, j, prob_adj = dwave_prepare(pot) while len(embedding) == 0: embedding = find_embedding(prob_adj, adj).values() [h, J, chains, embedding] = embed_problem(h_, j, embedding, adj) s = 0.50 h = [a * s for a in h] for k in J: J[k] = J[k] * s for k in chains: if k in J: J[k] += chains[k] else: J[k] = chains[k] # Submit problem #print('submitting problem') submitted_problems = [ async_solve_ising(solver, h, J, num_reads=10000, num_spin_reversal_transforms=5, answer_mode='histogram', auto_scale=True) ] await_completion(submitted_problems, len(submitted_problems), float('180')) res = unembed_answer( submitted_problems[0].result()['solutions'], embedding, 'discard') if len(res) > 0: state = array(res[0]) solved = True except Exception as err: print(err) solved = False #sleep(30) # wait 30 seconds and retry if len(h_) != len(state): print(h_, len(h_)) print(state, len(state)) print(pot) J_, _ = dict_2_mat(j, len(h_)) energy = h_.dot(state) + state.dot(J_.dot(state.transpose())) + const #for v in sorted(free_state): # energy += pot[v]*free_state[v] # state = append(state, free_state[v]) return energy, state else: if 'const' in pot: return pot['const'], states[0] else: return 0, states[0]
return token # Decodes results def decodeResults(qubits, mapping): result_map = dict() result_map['node0'] = qubits[0][mapping['node0']] result_map['node1'] = qubits[0][mapping['node1']] result_map['node2'] = qubits[0][mapping['node2']] return result_map url = 'https://cloud.dwavesys.com/sapi' token = getToken() conn = RemoteConnection(url, token) solver_name = "DW_2000Q_2_1" # Couplers for linked qubits along with coupler strength (dictionary) J = {(0, 4): 1, (0, 5): 1} # Maps node's to qubits nodeQubitMap = dict() nodeQubitMap['node0'] = 0 nodeQubitMap['node1'] = 4 nodeQubitMap['node2'] = 5 # Bias values, we only want one solution, and only using 6 qubits # (list) # Zero-indexed q0 - q5 #h = [-1,0,0,0,1,1] # This will get -5.0 energies for num_occurences
def __init__(self, qubo, qubo_dict): # <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< # INITIALIZATION: # get qubo and qubo_dict from QUBO_linear.py self.qubo = qubo self.qubo_dict = qubo_dict # D-Wave remote connection self.url = 'https://cloud.dwavesys.com/sapi' with open('./apikey.txt') as apikeyfile: apikey = apikeyfile.readline() self.token = apikey # create a remote connection self.conn = RemoteConnection(self.url, self.token) # NB auto_scale is set TRUE so you SHOULD NOT have to rescale the h and J (manual rescaling is optional and # included in this program.) # answer_mode: raw, histogram self.params = { "annealing_time": 1, "answer_mode": "raw", "auto_scale": True, "postprocess": "", "num_reads": 2000, "num_spin_reversal_transforms": 10 } print(self.params) # get the solver self.solver = self.conn.get_solver('DW_2000Q_2_1') # <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< # <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< # EMBEDDING CONTROLS: # this logical value indicates whether to clean up the embedding. AKA removing physical variables that are # adjacent to a single variable in the same chain or not adjacent to any variables in other chains. self.clean = False # this logical value indicates whether to smear an embedding to increase the chain size so that the h values do # not exceed the scale of J values relative to h_range and J_range respectively. self.smear = False # a list representing the range of h values, these values are only used when smear = TRUE self.h_range = [-1, 1] self.J_range = [-1, 1] # SOLVE_ISING VARIABLES: # the hardware adjacency matrix self.Adjacency = None # the embedding self.Embedding = None # <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< # <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< # D-WAVE VARIABLES: # h is the vector containing the linear ising coefficients self.h = None self.h_max = None # J is the matrix containing the quadratic ising coefficients in dictionary form where each qubit and coupler # value is assigned to qubits on the physical hardware self.J = None self.J1 = None # ising_offset is a constant which shifts all ising energies self.ising_offset = None # embedded h values self.h0 = None self.h1 = None # embedded J values self.j0 = None # strong output variable couplings self.jc = None # what the d-wave returns from solve_ising method self.dwave_return = None # the unembedded version of what the d-wave returns self.unembed = None # ising answer self.ising_ans = None self.ising_energies = None self.h_energy = None self.J_energy = None # <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< # <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< # QUBO RESULT VARIABLES: # qubo answer self.qubo_ans = None self.qubo_energy = None self.dwave_energies = None
class DWaveSampler(IsingSampler): """ Samples a PGM using D-Wave's quantum annealer. """ def __init__(self): self.connection = RemoteConnection(config.DWAVE_SAPI_URL, config.DWAVE_TOKEN, config.DWAVE_PROXY) self.solver = self.connection.get_solver(config.DWAVE_SOLVER) self.adjacency_matrix = get_hardware_adjacency(self.solver) def find_best_embedding(self, J, improvements=100, runs=4): """ Since find_embedding is randomized, attempt to find embedding several times and pick the best result. """ # Generate multiple embeddings in parallel raw_embeddings = joblib.Parallel(n_jobs=-1)( joblib.delayed(find_embedding) (J.keys(), self.adjacency_matrix, max_no_improvement=improvements) for i in range(runs)) embeddings = [Embedding(e) for e in raw_embeddings] # Pick the best embedding best_embedding = None for embedding in embeddings: embedding_is_superior = best_embedding is None or any([ best_embedding.max_chain_length > embedding.max_chain_length, best_embedding.avg_chain_length > embedding.avg_chain_length and best_embedding.max_chain_length == embedding.max_chain_length, best_embedding.no_qubits > embedding.no_qubits and best_embedding.max_chain_length == embedding.max_chain_length and best_embedding.avg_chain_length == embedding.avg_chain_length ]) if embedding_is_superior: best_embedding = embedding self.info( "New embedding: {embedding.no_qubits}, max chain: {embedding.max_chain_length}, avg chain: {embedding.avg_chain_length}" .format(embedding=embedding)) return best_embedding def query_dwave(self, h, J, embedding, samples, temperature, batch_size): """ Queries D-Wave multiple times for solution of the given Ising model, aggregating the unembedded results. """ results = { 'energies': [], 'solutions': [], 'num_occurrences': [], 'timing': [] } num_batches = samples // batch_size for i in range(num_batches): batch_solved = False while not batch_solved: try: self.info("Sampling batch {i}".format(i=i)) batch = solve_ising(self.solver, h, J, answer_mode='histogram', auto_scale=True, num_reads=batch_size, num_spin_reversal_transforms=5, beta=1.0 / float(temperature), postprocess='sampling', chains=embedding) batch_solved = True self.info("Done") except Exception as e: self.verbose(str(e)) self.info("Exception occured, retrying...") # Collect the batch results batch_solutions = unembed_answer(batch['solutions'], embedding, broken_chains='vote') results['solutions'].extend(batch_solutions) results['num_occurrences'].extend(batch['num_occurrences']) # Aggregate the same unembedded answers aggregated = defaultdict(dict) data = zip(results['solutions'], results['num_occurrences']) for result, count in data: key = tuple(result) result_data = aggregated.get(key, {}) # Recompute the average solution energy prior_count = result_data.get('count', 0) result_data['count'] = prior_count + count aggregated[key] = result_data # Return as a sorted list aggregated_list = [(key, value['count']) for key, value in aggregated.items()] return list(sorted(aggregated_list, key=lambda x: x[1])) def sample(self, model, num_samples, temperature=1, batch_size=None, embedding=None): # Determine the batch size batch_size = batch_size or min(10000, num_samples) # Extract the model and get h and J formatted for D-Wave API h_dwave, J_dwave = model.as_dwave() # Find the embedding if embedding is None: embedding = self.find_best_embedding(J_dwave).data #self.info(embedding) # Transform J and h using found graph embedding # embed_model can still do some changes to the embedding h_embedded, J_embedded, J_couplings, final_embedding = embed_problem( h_dwave, J_dwave, embedding, adj=self.adjacency_matrix, h_range=(-2, 2), j_range=(-1, 1)) # Compute max coefficient max_coefficient = max([ abs(max(h_embedded)), abs(min(h_embedded)), abs(max(J_embedded.values())), abs(min(J_embedded.values())) ]) # Update J matrix J_embedded.update( {key: -1.0 * max_coefficient for key in J_couplings.keys()}) results = self.query_dwave(h_embedded, J_embedded, final_embedding, num_samples, temperature, batch_size) samples = [ IsingSample(model, assignment, occurences) for (assignment, occurences) in results ] # Return as a sorted list sorted_solutions = SamplePool(samples) return sorted_solutions
def main(args): if args.input_file == None: data = json.load(sys.stdin) else: with open(args.input_file) as file: data = json.load(file) bqpjson.validate(data) if data['variable_domain'] != 'spin': print_err('only spin domains are supported. Given %s' % data['variable_domain']) quit() if data['scale'] != 1.0: print_err('A non-one scaling value is not yet supported. Given %s' % data['scale']) quit() if data['offset'] != 0.0: print_err('A non-zero offset value is not yet supported. Given %s' % data['offset']) quit() # A core assumption of this solver is that the given bqpjson data will magically be compatable with the given D-Wave QPU dw_url = args.dw_url dw_tokens = [args.dw_token] dw_solver_name = args.dw_solver_name dw_chip_id = None if 'dw_url' in data['metadata']: dw_url = data['metadata']['dw_url'].encode('ascii', 'ignore') print_err('using d-wave url provided in data file: %s' % dw_url) if 'dw_solver_name' in data['metadata']: dw_solver_name = data['metadata']['dw_solver_name'].encode( 'ascii', 'ignore') print_err('using d-wave solver name provided in data file: %s' % dw_solver_name) if 'dw_chip_id' in data['metadata']: dw_chip_id = data['metadata']['dw_chip_id'].encode('ascii', 'ignore') print_err('found d-wave chip id in data file: %s' % dw_chip_id) if hasattr(args, 'dw_tokens') and args.dw_tokens != None: dw_tokens = args.dw_tokens if dw_url is None or dw_tokens[0] is None or dw_solver_name is None: print_err('d-wave solver parameters not found') quit() remote_connections = [] for dw_token in dw_tokens: if args.dw_proxy is None: remote_connections.append(RemoteConnection(dw_url, dw_token)) else: remote_connections.append( RemoteConnection(dw_url, dw_token, args.dw_proxy)) solvers = [rc.get_solver(dw_solver_name) for rc in remote_connections] if not dw_chip_id is None: if solvers[0].properties['chip_id'] != dw_chip_id: print_err( 'WARNING: chip ids do not match. data: %s hardware: %s' % (dw_chip_id, solvers[0].properties['chip_id'])) solution_metadata = { 'dw_url': dw_url, 'dw_solver_name': dw_solver_name, 'dw_chip_id': solvers[0].properties['chip_id'], } h = [0] * (max(data['variable_ids']) + 1) for lt in data['linear_terms']: i = lt['id'] assert (i < len(h)) h[i] = lt['coeff'] J = {} for qt in data['quadratic_terms']: i = qt['id_tail'] j = qt['id_head'] assert (not (i, j) in J) J[(i, j)] = qt['coeff'] params = { 'auto_scale': False, 'annealing_time': args.annealing_time, 'num_reads': args.solve_num_reads } if args.spin_reversal_transform_rate != None: params[ 'num_spin_reversal_transforms'] = args.solve_num_reads / args.spin_reversal_transform_rate print_err('') print_err('total num reads: {}'.format(args.num_reads)) print_err('d-wave parameters:') for k, v in params.items(): print_err(' {} - {}'.format(k, v)) print_err('') print_err('starting collection:') submitted_problems = [] num_reads_remaining = args.num_reads problem_index = 0 while num_reads_remaining > 0: num_reads = min(args.solve_num_reads, num_reads_remaining) params['num_reads'] = num_reads print_err(' submit {} of {} remaining'.format(num_reads, num_reads_remaining)) solver_index = problem_index % len(solvers) submitted_problems.append({ 'problem': async_solve_ising(solvers[solver_index], h, J, **params), 'start_time': datetime.datetime.utcnow(), 'params': {k: v for k, v in params.items()} }) num_reads_remaining -= num_reads problem_index += 1 #answers = solve_ising(solver, h, J, **params) print_err(' waiting...') solutions_all = None for i, submitted_problem in enumerate(submitted_problems): problem = submitted_problem['problem'] await_completion([problem], 1, float('inf')) print_err(' collect {} of {} solves'.format(i + 1, len(submitted_problems))) answers = problem.result() solutions = answers_to_solutions(answers, data['variable_ids'], submitted_problem['start_time'], datetime.datetime.utcnow(), submitted_problem['params'], solution_metadata) if solutions_all != None: combis.combine_solution_data(solutions_all, solutions) else: solutions_all = solutions combis.merge_solution_counts(solutions_all) print_err('') total_collected = sum(solution['num_occurrences'] for solution in solutions_all['solutions']) print_err('total collected: {}'.format(total_collected)) for i, solution in enumerate(solutions_all['solutions']): print_err(' %f - %d' % (solution['energy'], solution['num_occurrences'])) if i >= 50: print_err(' first 50 of {} solutions'.format( len(solutions_all['solutions']))) break assert (total_collected == args.num_reads) print_err('') solutions_all['collection_start'] = solutions_all[ 'collection_start'].strftime(combis.TIME_FORMAT) solutions_all['collection_end'] = solutions_all['collection_end'].strftime( combis.TIME_FORMAT) if args.pretty_print: print(json.dumps(solutions_all, **json_dumps_kwargs)) else: print(json.dumps(solutions_all))
from dwave_sapi2.core import solve_ising from dwave_sapi2.embedding import find_embedding, embed_problem, unembed_answer from dwave_sapi2.util import get_hardware_adjacency from dwave_sapi2.remote import RemoteConnection # In order to connect to the D-Wave Solver API you will need a valid API token for their SAPI solver, the SAPI URL and you need to decide which quantum processor you want to use: DWAVE_SAPI_URL = 'https://cloud.dwavesys.com/sapi' DWAVE_TOKEN = [your D-Wave API token] DWAVE_SOLVER = 'DW_2000Q_VFYC_1' # define h as a list and J as a dictionary: J = {(0,4): 1, (4,3): 1, (3,7): 1, (7,0): 1} h = [-1,0,0,0,0,0,0,0,0] # h has 8 entries since we use qubits 0 to 7. We now establish connection to the Solver API and request the D-Wave 2000Q VFYC solver connection = RemoteConnection(DWAVE_SAPI_URL, DWAVE_TOKEN) solver = connection.get_solver(DWAVE_SOLVER) # define the number of readouts and choose answer_mode to be "histogram" which already sorts the results by the number of occurrences params = {"answer_mode": 'histogram', "num_reads": 10000} results = solve_ising(solver, h, J, **params) print results ''' following result { 'timing': { 'total_real_time': 1655206, 'anneal_time_per_run': 20, 'post_processing_overhead_time': 13588,
def dwave_embed(pot, overkill=True): #global solver #global adj #if solver == 0: sign_in() remote_connection = RemoteConnection(url, token) solver = remote_connection.get_solver(solver_name) adj = list(get_hardware_adjacency(solver)) #print('Connecting to DWave') #remote_connection = RemoteConnection('https://qfe.nas.nasa.gov/sapi', 'NASA-f73f6a756b922f9ebfcb6127740bec11bf986527') #solver = remote_connection.get_solver('C16') #adj = list(get_hardware_adjacency(solver)) const, h_, j, prob_adj = dwave_prepare(pot) embedding = [] if overkill: emb = {} beta = 2 max_length = 10e9 try: while emb == {} or max_length > 4: for i in range(3): emb_ = find_embedding(prob_adj, adj, max_beta=beta) # only take this embedding if it has a shorter max length if emb_ != {} and max([len(c) for c in emb_.values() ]) < max_length: emb = emb_.copy() max_length = max([len(c) for c in emb.values()]) if max_length < 4: break if beta > 64: emb_ = find_embedding(prob_adj, adj, tries=100) if emb_ != {} and max([len(c) for c in emb_.values() ]) < max_length: emb = emb_.copy() max_length = max([len(c) for c in emb.values()]) break beta = beta * 2 except RuntimeError as err: print(err) emb = find_embedding(prob_adj, adj) if emb == {}: print('Unable to find embedding for problem') return [False] * 4 else: print('Found an embedding') embedding = emb.values() else: while len(embedding) == 0: embedding = find_embedding(prob_adj, adj).values() remote_connection = 0 solver = 0 adj = 0 return embedding
def __init__(self): self.connection = RemoteConnection(config.DWAVE_SAPI_URL, config.DWAVE_TOKEN, config.DWAVE_PROXY) self.solver = self.connection.get_solver(config.DWAVE_SOLVER) self.adjacency_matrix = get_hardware_adjacency(self.solver)
def get_qpu(url, token, proxy, solver_name, hardware_chimera_degree): chip_id = None cell_size = 8 if not url is None and not token is None and not solver_name is None: print_err( 'QPU connection details found, accessing "{}" at "{}"'.format( solver_name, url)) if proxy is None: remote_connection = RemoteConnection(url, token) else: remote_connection = RemoteConnection(url, token, proxy) solver = remote_connection.get_solver(solver_name) couplers = solver.properties['couplers'] couplers = set([tuple(coupler) for coupler in couplers]) sites = solver.properties['qubits'] solver_chimera_degree = int( math.ceil(math.sqrt(len(sites) / cell_size))) if hardware_chimera_degree != solver_chimera_degree: print_err( 'Warning: the hardware chimera degree was specified as {}, while the solver {} has a degree of {}' .format(hardware_chimera_degree, solver_name, solver_chimera_degree)) hardware_chimera_degree = solver_chimera_degree site_range = Range(*solver.properties['h_range']) coupler_range = Range(*solver.properties['j_range']) chip_id = solver.properties['chip_id'] else: print_err( 'QPU connection details not found, assuming full yield square chimera of degree {}' .format(hardware_chimera_degree)) site_range = Range(-2.0, 2.0) coupler_range = Range(-1.0, 1.0) # the hard coded 4 here assumes an 4x2 unit cell arcs = get_chimera_adjacency(hardware_chimera_degree, hardware_chimera_degree, cell_size / 2) # turn arcs into couplers # this step is nessisary to be consistent with the solver.properties['couplers'] data couplers = [] for i, j in arcs: assert (i != j) if i < j: couplers.append((i, j)) else: couplers.append((j, i)) couplers = set(couplers) sites = set([coupler[0] for coupler in couplers] + [coupler[1] for coupler in couplers]) # sanity check on coupler consistency across both branches for i, j in couplers: assert (i < j) return ChimeraQPU(sites, couplers, cell_size, hardware_chimera_degree, site_range, coupler_range, chip_id=chip_id)
class DWSolveQUBO: def __init__(self, qubo, qubo_dict): # <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< # INITIALIZATION: # get qubo and qubo_dict from QUBO_linear.py self.qubo = qubo self.qubo_dict = qubo_dict # D-Wave remote connection self.url = 'https://cloud.dwavesys.com/sapi' with open('./apikey.txt') as apikeyfile: apikey = apikeyfile.readline() self.token = apikey # create a remote connection self.conn = RemoteConnection(self.url, self.token) # NB auto_scale is set TRUE so you SHOULD NOT have to rescale the h and J (manual rescaling is optional and # included in this program.) # answer_mode: raw, histogram self.params = { "annealing_time": 1, "answer_mode": "raw", "auto_scale": True, "postprocess": "", "num_reads": 2000, "num_spin_reversal_transforms": 10 } print(self.params) # get the solver self.solver = self.conn.get_solver('DW_2000Q_2_1') # <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< # <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< # EMBEDDING CONTROLS: # this logical value indicates whether to clean up the embedding. AKA removing physical variables that are # adjacent to a single variable in the same chain or not adjacent to any variables in other chains. self.clean = False # this logical value indicates whether to smear an embedding to increase the chain size so that the h values do # not exceed the scale of J values relative to h_range and J_range respectively. self.smear = False # a list representing the range of h values, these values are only used when smear = TRUE self.h_range = [-1, 1] self.J_range = [-1, 1] # SOLVE_ISING VARIABLES: # the hardware adjacency matrix self.Adjacency = None # the embedding self.Embedding = None # <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< # <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< # D-WAVE VARIABLES: # h is the vector containing the linear ising coefficients self.h = None self.h_max = None # J is the matrix containing the quadratic ising coefficients in dictionary form where each qubit and coupler # value is assigned to qubits on the physical hardware self.J = None self.J1 = None # ising_offset is a constant which shifts all ising energies self.ising_offset = None # embedded h values self.h0 = None self.h1 = None # embedded J values self.j0 = None # strong output variable couplings self.jc = None # what the d-wave returns from solve_ising method self.dwave_return = None # the unembedded version of what the d-wave returns self.unembed = None # ising answer self.ising_ans = None self.ising_energies = None self.h_energy = None self.J_energy = None # <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< # <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< # QUBO RESULT VARIABLES: # qubo answer self.qubo_ans = None self.qubo_energy = None self.dwave_energies = None def solvequbo(self): # <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< # EMBEDDING: # gets the hardware adjacency for the solver in use. self.Adjacency = get_hardware_adjacency(self.solver) # gets the embedding for the D-Wave hardware self.Embedding = find_embedding(self.qubo_dict, self.Adjacency) # <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< # <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< # CONVERSIONS AND RESCALING: # convert qubo to ising (self.h, self.J, self.ising_offset) = qubo_to_ising(self.qubo_dict) # Even though auto_scale = TRUE, we are rescaling values # Normalize h and J to be between +/-1 self.h_max = max(map(abs, self.h)) if len(self.J.values()) > 0: j_max = max([abs(x) for x in self.J.values()]) else: j_max = 1 # In [0,1], this scales down J values to be less than jc j_scale = 0.8 # Use the largest large value if self.h_max > j_max: j_max = self.h_max # This is the actual scaling rescale = j_scale / j_max self.h1 = map(lambda x: rescale * x, self.h) if len(self.J.values()) > 0: self.J1 = {key: rescale * val for key, val in self.J.items()} else: self.J1 = self.J # <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< # <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< # EMBEDDING: # gets the hardware adjacency for the solver in use. self.Adjacency = get_hardware_adjacency(self.solver) # gets the embedding for the D-Wave hardware self.Embedding = find_embedding(self.qubo_dict, self.Adjacency) # Embed the rescale values into the hardware graph [self.h0, self.j0, self.jc, self.Embedding ] = embed_problem(self.h1, self.J1, self.Embedding, self.Adjacency, self.clean, self.smear, self.h_range, self.J_range) # embed_problem returns two J's, one for the biases from your problem, one for the chains. self.j0.update(self.jc) # <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< # <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< # SOLVE PROBLEM ON D-WAVE: # generate the embedded solution to the ising problem. self.dwave_return = solve_ising(self.solver, self.h0, self.j0, **self.params) #print("dwave_return") #print(self.dwave_return['solutions']) # the unembedded answer to the ising problem. unembed = np.array( unembed_answer(self.dwave_return['solutions'], self.Embedding, broken_chains="minimize_energy", h=self.h, j=self.J)) #[0] # convert ising string to qubo string ising_ans = [ list(filter(lambda a: a != 3, unembed[i])) for i in range(len(unembed)) ] #print(ising_ans) #print("ISING ANS") # Because the problem is unembedded, the energy will be different for the embedded, and unembedded problem. # ising_energies = dwave_return['energies'] self.h_energy = [ sum(self.h1[v] * val for v, val in enumerate(unembed[i])) for i in range(len(unembed)) ] self.J_energy = [ sum(self.J1[(u, v)] * unembed[i, u] * unembed[i, v] for u, v in self.J1) for i in range(len(unembed)) ] self.ising_energies = np.array(self.h_energy) + np.array(self.J_energy) #print(self.h_energy) #print(self.J_energy) #print(self.ising_energies) #print("ENERGIES") # <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< # <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< # CONVERT ANSWER WITH ENERGY TO QUBO FORM: # Rescale and add back in the ising_offset and another constant self.dwave_energies = self.ising_energies / rescale + self.ising_offset #[map(lambda x: (x / rescale + self.ising_offset), self.ising_energies[i]) for i in range(len(self.ising_energies))] # QUBO RESULTS: self.qubo_ans = ( np.array(ising_ans) + 1 ) / 2 #[map(lambda x: (x + 1) / 2, ising_ans[i]) for i in range(len(ising_ans))]