def run_angles(n_nodes, B, angles, C=None, backend='IBMQX', backend_params={ 'backend_device': None, 'depth': 3 }): if backend == 'IBMQX': if not isinstance(angles, (np.ndarray, np.generic, list)): raise ValueError( "Incorrect angles received: {} for backend {}".format( angles, backend)) problem_description = { 'name': 'modularity', 'B': B, 'n_nodes': n_nodes, 'objective_function': gm.compute_modularity_dict, 'C': C } var_form = IBMQXVarForm(problem_description, depth=backend_params['depth']) resstrs = var_form.run(angles, backend_name=backend_params['backend_device']) else: raise ValueError("Unsupported backend: {}".format(backend)) modularities = [(gm.compute_modularity(n_nodes, B, x, C=C), x) for x in resstrs] return max(modularities, key=itemgetter(0))
def obj_val(x): resstrs = var_form.run(x) modularities = [ gm.compute_modularity(n_nodes, B, x, C=C) for x in resstrs ] y = np.mean(modularities) if return_x: all_x.append(copy.deepcopy(x)) all_vals.append({'max': max(modularities), 'mean': y}) print("Actual modularity (to be maximized): {}".format(y)) return sign * y
def run_angles(n_nodes, B, angles, C=None, backend='IBMQX', backend_params={ 'backend_device': None, 'depth': 3 }): if backend == 'IBMQX': if not isinstance(angles, (np.ndarray, np.generic, list)): raise ValueError( "Incorrect angles received: {} for backend {}".format( angles, backend)) var_form = IBMQXVarForm(num_qubits=n_nodes, depth=backend_params['depth']) resstrs = var_form.run(angles, backend_name=backend_params['backend_device']) else: raise ValueError("Unsupported backend: {}".format(backend)) modularities = [(gm.compute_modularity(n_nodes, B, x, C=C), x) for x in resstrs] return max(modularities, key=itemgetter(0))
def test_angles(graph_generator_name, left, right, angles, seed=None, verbose=0, compute_optimal=False, backend='IBMQX', backend_params={ 'backend_device': None, 'depth': 3 }): # note that compute optimal uses brute force! Not recommended for medium and large problem # angles should be a dictionary with fields 'beta' and 'gamma', e.g. {'beta': 2.0541782343349086, 'gamma': 0.34703642333837853} rand_seed = seed # Generate the graph G, _ = generate_graph(graph_generator_name, left, right, seed=seed) # Use angles # Using NetworkX modularity matrix B = nx.modularity_matrix(G).A # Compute ideal cost if compute_optimal: optimal_modularity = gm.compute_modularity(G, B, solution_bitstring) print("Optimal solution energy: ", optimal_modularity) else: optimal_modularity = None if backend == 'IBMQX': if not isinstance(angles, (np.ndarray, np.generic, list)): raise ValueError( "Incorrect angles received: {} for backend {}".format( angles, backend)) var_form = IBMQXVarForm(num_qubits=G.number_of_nodes(), depth=backend_params['depth']) resstrs = var_form.run(angles) else: raise ValueError("Unsupported backend: {}".format(backend)) if verbose > 1: # print distribution allstrs = list(product([0, 1], repeat=len(qubits))) freq = {} for bitstr in allstrs: freq[str(list(bitstr))] = 0 for resstr in resstrs: resstr = str(list(resstr)) # for it to be hashable if resstr in freq.keys(): freq[resstr] += 1 else: raise ValueError( "received incorrect string: {}".format(resstr)) for k, v in freq.items(): print("{} : {}".format(k, v)) # Raw results modularities = [gm.compute_modularity(G, B, x) for x in resstrs] mod_max = max(modularities) # Probability of getting best modularity if compute_optimal: mod_pmax = float(np.sum(np.isclose( modularities, optimal_modularity))) / float(len(modularities)) else: mod_pmax = None mod_mean = np.mean(modularities) if verbose: print("Best modularity found:", mod_max) print("pmax: ", mod_pmax) print("mean: ", mod_mean) return { 'max': mod_max, 'mean': mod_mean, 'pmax': mod_pmax, 'optimal': optimal_modularity, 'x': angles }
def single_level_optimize_modularity(G, solution_bitstring=None, random_seed=42, size_of_iteration=12, method='brute', subset_selection='spectral', stopping_criteria=3, method_params=None, qaoa_method='bayes', backend='IBMQX', backend_params={ 'backend_device': None, 'depth': 3 }): np.random.seed(random_seed) random.seed(random_seed) B = nx.modularity_matrix(G, nodelist=sorted(G.nodes()), weight='weight') if solution_bitstring is not None: logging.info("Solution: {}".format(solution_bitstring)) # random initial guess curr_solution = [ 1 - 2 * x for x in list(np.random.randint(2, size=(G.number_of_nodes(), ))) ] curr_modularity = gm.compute_modularity(G, B, curr_solution) if solution_bitstring is not None: optimal_modularity = gm.compute_modularity(G, B, solution_bitstring) else: optimal_modularity = float('inf') print("Initial guess: {}, optimal: {}".format(curr_modularity, optimal_modularity)) # We will keep track of all time guess so when we reset after getting stuck in local optima, we don't lose it all_time_best_solution = curr_solution all_time_best_modularity = curr_modularity visited = set() it = 0 it_stuck = 0 all_modularities = [] num_cores = min(multiprocessing.cpu_count(), 16) logging.info( "Using {} for subset selection, {} for subset optimization".format( subset_selection, method)) while set(G.nodes()) - visited: # if using mpi, sync the best solution at the start of each iteration if qaoa_method == 'libensemble': curr_modularity, curr_solution = MPI.COMM_WORLD.allreduce( (all_time_best_modularity, all_time_best_solution), op=opTupleMax) it += 1 if it_stuck > stopping_criteria: logging.info("Exiting at iteration {}".format(it)) break gains_list = [] for v in progressbar.progressbar(G.nodes()): gains_list.append(gm.compute_gain(G, B, curr_solution, v, True)) gains = {v: gain for gain, v in gains_list} if subset_selection == 'spectral': notvisited_gains = { v: gain for v, gain in gains.items() if v not in visited } # if stuck, try selecting random vertex to climb out of local optima n, curr_gain = max(notvisited_gains.items(), key=itemgetter(1)) threshold = np.percentile( list(gains.values()), 25 ) if 0.75 * G.number_of_nodes() >= size_of_iteration else min( list(gains.values())) visited.add(n) subset = spectral_populate_subset(G, n, size_of_iteration, gains, threshold) logging.info( "Iter {}, looking at vertex {} and its neighbors {}, potential gain {}" .format(it, n, subset, curr_gain)) elif subset_selection == 'bfs': n, curr_gain = max(gains.items(), key=itemgetter(1)) visited.add(n) subset = bfs_populate_subset(G, n, size_of_iteration) logging.info( "Iter {}, looking at vertex {} and its neighbors {}, potential gain {}" .format(it, n, subset, curr_gain)) elif subset_selection == 'top_gain': subset = top_gains_populate_subset(G, size_of_iteration, gains) logging.info("Iter {}, looking at {}".format(it, subset)) else: raise ValueError( "Invalid subset selection method: {}".format(subset_selection)) logging.info("curr_solution:\t{}".format(curr_solution)) if logging.getLogger().getEffectiveLevel() >= logging.INFO: subproblem = copy.deepcopy(curr_solution) for v in subset: subproblem[v] = "*" logging.info("Subproblem:\t{}\tcurr_modularity\t{}".format( subproblem, curr_modularity)) cand_solution = iteration_step(G, B, copy.deepcopy(curr_solution), list(subset), method=method, method_params=method_params, qaoa_method=qaoa_method, backend=backend, backend_params=backend_params) cand_modularity = gm.compute_modularity(G, B, cand_solution) logging.info("Solution:\t{}\tcand_modularity\t{}".format( cand_solution, cand_modularity)) print('it', it, 'cand_modularity', cand_modularity, 'curr_best', all_time_best_modularity) all_modularities.append({ 'it': it, 'cand_modularity': cand_modularity, 'curr_best': all_time_best_modularity }) if cand_modularity > curr_modularity: curr_solution = cand_solution curr_modularity = cand_modularity it_stuck = 0 # logging.info("New modularity found: {}".format(curr_modularity)) else: it_stuck += 1 # logging.info("Ignoring modularity: {}".format(cand_modularity)) if curr_modularity > all_time_best_modularity: all_time_best_solution = curr_solution all_time_best_modularity = curr_modularity if all_time_best_modularity >= 0.95 * optimal_modularity: logging.info( "Found really good solution at iter {}, exiting".format(it)) break return (all_time_best_modularity, all_time_best_solution, it, all_modularities)