Beispiel #1
0
def solve_kcoloring_qaoa(graph, solve_type, k):
    backend = get_compute_backend(solve_type)
    operator, offset = get_operator(graph, k)
    mixing_operator = get_mixing_operator(graph, k)
    qaoa = QAOA(operator, p=1, optimizer=NELDER_MEAD(maxfev=1000))
    result = qaoa.run(QuantumInstance(backend))
    return result
Beispiel #2
0
def getOptimiser(name="SPSA", params={}):
    optimiser = None
    if 'SPSA' in name:
        #max_trials (int) – Maximum number of iterations to perform.
        #save_steps (int) – Save intermeditate info every save_steps step.
        #last_avg (int) – Averged parameters over the last_avg iterations. If last_avg = 1, only the last iteration is considered.
        #c0 (float) – The initial a. Step size to update paramters.
        #c1 (float) – The initial c. The step size used to approximate gradient.
        #c2 (float) – The alpha in the paper, and it is used to adjust a (c0) at each iteration.
        #c3 (float) – The gamma in the paper, and it is used to adjust c (c1) at each iteration.
        #c4 (float) – The parameter used to control a as well.
        #skip_calibration (bool) – skip calibration and use provided c(s) as is.
        optimiser = SPSA(
            max_trials=params["max_trials"],
            save_steps=params["save_steps"],
        )
    elif 'COBYLA' in name:
        #maxiter (int) – Maximum number of function evaluations.
        #disp (bool) – Set to True to print convergence messages.
        #rhobeg (float) – Reasonable initial changes to the variables.
        #tol (float) – Final accuracy in the optimization (not precisely guaranteed). This is a lower bound on the size of the trust region.
        optimiser = COBYLA(maxiter=params["maxiter"], disp=True)
    elif 'L_BFGS_B' in name:
        #maxfun (int) – Maximum number of function evaluations.
        #maxiter (int) – Maximum number of iterations.
        #factr (float) – The iteration stops when (f^k - f^{k+1})/max{|f^k|, |f^{k+1}|,1} <= factr * eps, where eps is the machine precision, which is automatically generated by the code. Typical values for factr are: 1e12 for low accuracy; 1e7 for moderate accuracy; 10.0 for extremely high accuracy. See Notes for relationship to ftol, which is exposed (instead of factr) by the scipy.optimize.minimize interface to L-BFGS-B.
        #iprint (int) – Controls the frequency of output. iprint < 0 means no output; iprint = 0 print only one line at the last iteration; 0 < iprint < 99 print also f and |proj g| every iprint iterations; iprint = 99 print details of every iteration except n-vectors; iprint = 100 print also the changes of active set and final x; iprint > 100 print details of every iteration including x and g.
        #epsilon (float) – Step size used when approx_grad is True, for numerically calculating the gradient
        optimiser = L_BFGS_B(
            #maxfun=params["maxfun"],
            maxiter=params["maxiter"])

    elif 'P_BFGS' in name:
        optimiser = P_BFGS(maxfun=params["maxfun"])
    elif 'NELDER_MEAD' in name:
        #maxiter (int) – Maximum allowed number of iterations. If both maxiter and maxfev are set, minimization will stop at the first reached.
        #maxfev (int) – Maximum allowed number of function evaluations. If both maxiter and maxfev are set, minimization will stop at the first reached.
        #disp (bool) – Set to True to print convergence messages.
        #xatol (float) – Absolute error in xopt between iterations that is acceptable for convergence.
        #tol (float or None) – Tolerance for termination.
        #adaptive (bool) – Adapt algorithm parameters to dimensionality of problem.
        optimiser = NELDER_MEAD(maxiter=params["maxiter"], disp=True)
    elif 'SLSQP' in name:
        #maxiter (int) – Maximum number of iterations.
        #disp (bool) – Set to True to print convergence messages.
        #ftol (float) – Precision goal for the value of f in the stopping criterion.
        #tol (float or None) – Tolerance for termination.
        #eps (float) – Step size used for numerical approximation of the Jacobian.
        optimiser = SLSQP(maxiter=params["maxiter"])

    print("Optimising with {0} - {1}".format(name, optimiser))
    return optimiser
Beispiel #3
0
    def test_qaoa_random_initial_point(self):
        """ QAOA random initial point """
        aqua_globals.random_seed = 10598
        w = nx.adjacency_matrix(
            nx.fast_gnp_random_graph(5, 0.5,
                                     seed=aqua_globals.random_seed)).toarray()
        qubit_op, _ = max_cut.get_operator(w)
        qaoa = QAOA(qubit_op, NELDER_MEAD(disp=True), 1)

        quantum_instance = QuantumInstance(
            BasicAer.get_backend('qasm_simulator'),
            seed_simulator=aqua_globals.random_seed,
            seed_transpiler=aqua_globals.random_seed,
            shots=4096)
        _ = qaoa.run(quantum_instance)

        np.testing.assert_almost_equal([2.5179, 0.3528],
                                       qaoa.optimal_params,
                                       decimal=4)
Beispiel #4
0
 def test_nelder_mead(self):
     """ nelder mead test """
     optimizer = NELDER_MEAD(maxfev=10000, tol=1e-06)
     res = self._optimize(optimizer)
     self.assertLessEqual(res[2], 10000)
    adapt_op_dict = adapt_op_df.to_dict()
    op_label_list = list(adapt_op_dict['Ham_{}'.format(number)])
    op_list = []
    for label in op_label_list:
        op_name = adapt_op_dict['Ham_{}'.format(number)][label]
        print(op_name)
        op = Pauli.from_label(op_name[:num_qubits])
        op = WeightedPauliOperator.from_list([op])
        op_list.append(op)
    if rev:
        return reverse(op_list)
    return op_list

backend = Aer.get_backend("statevector_simulator")
qi = QuantumInstance(backend)
optimizer = NELDER_MEAD(tol = 1e-10)

ansatz_length = 5
op_list = []
"""
"""

def be_in_range(a, b, n):
    l = n
    while l > b:
        l = l-(b-a)
    while l < a:
        l = l + (b-a)
    if l>b:
        return error 
    else:
Beispiel #6
0
qi = QuantumInstance(backend, shots)

output_to_file = 1
output_to_cmd = 1
store_in_df = 1
output_to_csv = 1
enable_adapt = 1
enable_roto_2 = 1

max_iterations = 1

num_optimizer_runs = 100000
optimizer_stopping_energy = 1e-16
optimizer_name = "NM"
optimizer = NELDER_MEAD(tol=optimizer_stopping_energy)

out_file = open("ADAPT_ROTO_RUN_INFO_{}.txt".format(up), "w+")

adapt_data_dict = {
    'hamiltonian': [],
    'eval time': [],
    'num op choice evals': [],
    'num optimizer evals': [],
    'ansz length': [],
    'final energy': []
}
adapt_param_dict = dict()
adapt_op_dict = dict()
adapt_E_dict = dict()
adapt_grad_dict = dict()
Beispiel #7
0
pos = nx.spring_layout(G)

w = my_graphs.adjacency_matrix(G)
print("\nAdjacency matrix\n", w, "\n")

# setting p
p = 1

# ... QAOA ...
# Create an Ising Hamiltonian with docplex.
mdl = Model(name='max_cut')
mdl.node_vars = mdl.binary_var_list(list(range(n)), name='node')
maxcut_func = mdl.sum(w[i, j] * mdl.node_vars[i] * (1 - mdl.node_vars[j])
                      for i in range(n) for j in range(n))
mdl.maximize(maxcut_func)
qubit_op, offset = docplex.get_operator(mdl)

# Run quantum algorithm QAOA on qasm simulator
optimizer = NELDER_MEAD()
qaoa = QAOA(qubit_op, optimizer, p=p)
backend = Aer.get_backend('qasm_simulator')
quantum_instance = QuantumInstance(backend, shots=1000)
result = qaoa.run(quantum_instance)

x = sample_most_likely(result.eigenstate)
print('energy:', result.eigenvalue.real)
print('time:', result.optimizer_time, 's')
print('max-cut objective:', result.eigenvalue.real + offset)
print('solution:', max_cut.get_graph_solution(x))
print('solution objective:', max_cut.max_cut_value(x, w))
print('angles:', result.optimal_point)
print('num physical cpus', aqua_globals.num_processes)
print('num available cpus', len(psutil.Process().cpu_affinity()))
print('num logical cpus', psutil.cpu_count(logical=True))
print(starttime)
number_runs = 1
max_iterations = 0
ADAPT_stopping_gradient = 0  #not used
ADAPTROTO_stopping_energy = 0  #not used
ROTOSOLVE_stopping_energy = 1e-12
ADAPT_optimizer_stopping_energy = 1e-12
ROTOSOLVE_max_iterations = 100000

out_file = open("ADAPT_ROTO_RUN_INFO_{}.txt".format(up), "w+")

optimizer_name = "NM"
optimizer = NELDER_MEAD(tol=ROTOSOLVE_stopping_energy)

#optimizer_2 = Rotosolve(ROTOSOLVE_stopping_energy,ROTOSOLVE_max_iterations, param_per_step = 2)

adapt_data_dict = {
    'hamiltonian': [],
    'eval time': [],
    'num op choice evals': [],
    'num optimizer evals': [],
    'ansz length': [],
    'final energy': []
}
adapt_param_dict = dict()
adapt_op_dict = dict()
adapt_E_dict = dict()
adapt_grad_dict = dict()