示例#1
0
def run_cmaes_p_concat(v, k, r, seed, generations, sig=None):

    suzuki = approx.suzuki_vals(k)

    if sig == None:
        sig = 1e-5 / len(suzuki)

    chain = hchain.HeisenbergChain(len(v), v)

    random.seed(seed)
    np.random.seed(seed)

    # Error from target
    def target_error(ind):
        if NORMALISE:
            norm_ind = norm_f(ind)
        else:
            norm_ind = ind

        final_ind = approx.r_copies(approx.expand_vals(ind), r)

        return approx.error(chain, final_ind, t=2 * chain.n),

    toolbox = base.Toolbox()
    toolbox.register("evaluate", target_error)

    strategy = cma.Strategy(centroid=suzuki, sigma=sig)
    toolbox.register("generate", strategy.generate, creator.Individual)
    toolbox.register("update", strategy.update)

    hof = tools.HallOfFame(1)
    stats = tools.Statistics(lambda ind: ind.fitness.values)
    stats.register("avg", np.mean)
    stats.register("std", np.std)
    stats.register("min", np.min)
    stats.register("max", np.max)

    pop, log = algorithms.eaGenerateUpdate(toolbox,
                                           ngen=generations,
                                           stats=stats,
                                           halloffame=hof,
                                           verbose=True)

    return pop, log, hof
示例#2
0
def evaluate_rows(input_df, start_row, end_row, output_folder_path):
    df = input_df.copy()
    if not os.path.isdir(output_folder_path):
        os.makedirs(output_folder_path)

    logs_path = os.path.join(output_folder_path, 'logs')
    if not os.path.isdir(logs_path):
        os.makedirs(logs_path)

    output_data_path = os.path.join(output_folder_path, 'output_{}_{}.csv'.format(start_row, end_row))

    for row_id in range(start_row-1, end_row):
        row = df.loc[row_id]

        n = int(row['n'])
        v = row['v_1':'v_%d' % n]
        k = int(row['k'])
        r = int(row['r'])
        seed = int(row['seed'])
        gens = int(row['gens'])

        chain = hchain.HeisenbergChain(n, v)

        t = time.time()
        pop, log, hof = run_cmaes_p(v=v, k=k, r=r, seed=seed, generations=gens)
        t = time.time() - t

        best = hof[0]

        suz_err = approx.error(chain,approx.suzuki_solution(k, r),t=2*n)
        op_err = approx.error(chain,approx.r_copies(best, r),t=2*n)
        perc = 100*op_err/suz_err

        for j in range(1,5**(k-1)+1):
            df.loc[row_id,'p_%d'%j] = best[j-1]
        df.loc[row_id,'optimised error'] = op_err
        df.loc[row_id, 'suzuki error'] = suz_err
        df.loc[row_id, '% error'] = perc
        df.loc[row_id, 'time'] = t

        pd.DataFrame(log).to_csv(os.path.join(logs_path, 'row_{}.csv'.format(row_id + 1)), index=False)

    df.dropna(inplace=True)
    df.to_csv(output_data_path, index=False)
示例#3
0
def run(input_file, start, end):

    if not os.path.isdir(OUTPUT_DIR):
        os.makedirs(OUTPUT_DIR)

    input_file_prefix = os.path.splitext(os.path.basename(input_file))[0]

    output_file = os.path.join(OUTPUT_DIR, input_file_prefix + '_output_{}_{}.csv'.format(start, end))

    header, all_rows = read_all(input_file)

    with open(output_file, 'w') as outcsv:

        output_header = header + ['suzuki error', 'optimised error', '%']

        writer = csv.DictWriter(outcsv, output_header)
        writer.writeheader()

        for row in all_rows[start-1:end]:

            n = int(row['n'])
            k = int(row['k'])
            r = int(row['r'])
            t = int(row['t'])

            p_vect = {k: v for k, v in row.items() if k.startswith('p_')}
            v_vect = {k: v for k, v in row.items() if k.startswith('v_') and v != ''}

            v_values = [float(v) for v in v_vect.values()]
            p_values = [float(p) for p in p_vect.values()]

            chain = hchain.HeisenbergChain(n, v_values)

            suz_err = approx.error(chain, approx.suzuki_solution(k, r), t)
            op_err = approx.error(chain, approx.r_copies(p_values, r), t)

            percentage = 100 * op_err / suz_err

            row['suzuki error'] = suz_err
            row['optimised error'] = op_err
            row['%'] = percentage

            writer.writerow(row)
示例#4
0
def plot(folder_path):
    df = pd.read_csv(os.path.join(folder_path, 'exp16_results.csv'))

    v = list(df.loc[0, 'v_1':'v_5'])
    chain = hchain.HeisenbergChain(5, v)
    suz_errs = [
        approx.error(chain, approx.suzuki_solution(2, r), 10)
        for r in range(25, 401, 25)
    ]
    r_counts = range(25, 401, 25)

    for i in range(0, len(df), 16):
        df_r = df.loc[i:i + 15]
        orig_r = int(df_r.loc[i, 'original_r'])
        plt.plot(r_counts,
                 df_r['error'],
                 color=(1.0, 1 - orig_r / 400, 1 - orig_r / 400, 0.8),
                 marker='.',
                 linewidth=1.5,
                 markersize=4,
                 label='_nolegend_')

    plt.plot(r_counts,
             suz_errs,
             color='blue',
             marker='.',
             linewidth=2,
             markersize=7,
             label='Suzuki')
    plt.yscale(value='log')

    plt.legend(fontsize=16)

    plt.xlabel('r', fontsize=18)
    plt.ylabel('Absolute Error', fontsize=18)

    plt.xticks(fontsize=16)
    plt.yticks(fontsize=16)

    plt.tight_layout()

    plt.show()
示例#5
0
 def test_unitary(self):
     small_chain = hchain.HeisenbergChain(2, [1] * 2)
     actual = small_chain.exponential(1j)
     expected = np.array([[
         -0.6536 - 0.7568j, 0.0000 + 0.0000j, 0.0000 + 0.0000j,
         0.0000 + 0.0000j
     ],
                          [
                              0.0000 + 0.0000j, 0.2720 + 0.5944j,
                              -0.6882 + 0.3149j, 0.0000 + 0.0000j
                          ],
                          [
                              0.0000 + 0.0000j, -0.6882 + 0.3149j,
                              0.2720 + 0.5944j, 0.0000 + 0.0000j
                          ],
                          [
                              0.0000 + 0.0000j, 0.0000 + 0.0000j,
                              0.0000 + 0.0000j, 1.0000 + 0.0000j
                          ]])
     self.assertTrue(np.allclose(actual, expected, atol=0.001))
示例#6
0
def greedy():
    """ Run greedy hillclimber. Enumerate neighbourhood and choose best. """

    random.seed(1234)

    chain = hchain.HeisenbergChain(num_qubits, v)

    suzuki = np.array(approx.suzuki(k))
    suzuki_error = approx.error(chain, approx.r_copies(suzuki, r), t)
    print('Suzuki error: {}'.format(suzuki_error))

    # Start at Suzuki
    current = suzuki
    current_error = suzuki_error

    for gen in range(1, gens + 1):

        current_error_percent = 100 * (current_error / suzuki_error)
        print('Gen: {} Best: {} Percent: {} '.format(gen, current_error,
                                                     current_error_percent))

        first = random.randint(1, num_p_values)
        second = random.randint(1, num_p_values)

        neighbour = current.copy()
        neighbour[first - 1] = neighbour[first - 1] + step_size
        neighbour[second - 1] = neighbour[second - 1] - step_size

        neighbour_err = approx.error(chain, approx.r_copies(neighbour, r), t)

        neighbour_err_percent = 100 * (neighbour_err / suzuki_error)

        print('Step {}  error: {}, error %: {}'.format(gen, neighbour_err,
                                                       neighbour_err_percent))

        if neighbour_err < current_error:
            current = neighbour
            current_error = neighbour_err
            current_error_percent = 100 * (current_error / suzuki_error)
            print('New best: error: {} percent: {} solution: {})'.format(
                current_error, current_error_percent, current))
示例#7
0
def compare_lambda_over_n(input_filename, input_df, start_row, end_row,
                          output_folder_path):
    df = input_df.copy()
    df.insert(1, 'optimised error', None)
    df.insert(1, 'suzuki error', None)
    df.insert(1, '%', None)
    if not os.path.isdir(output_folder_path):
        os.makedirs(output_folder_path)

    input_filename_prefix = os.path.splitext(input_filename)[0]
    output_data_path = os.path.join(
        output_folder_path,
        input_filename_prefix + '_output_{}_{}.csv'.format(start_row, end_row))

    for row_id in range(start_row - 1, end_row):
        row = df.loc[row_id]

        n = int(row['n'])
        v = list(map(float, list(row['v_1':'v_%d' % n])))
        k = int(row['k'])
        r = int(row['r'])

        lam = list(row.loc['lambda_1':'lambda_%d' % (r * 5**(k - 1))])
        chain = hchain.HeisenbergChain(n, v)

        # Hardcode time
        suz_err = approx.error(chain, approx.suzuki_solution(k, r), t=SIM_TIME)
        op_err = approx.error(chain, lam, t=SIM_TIME)

        perc = 100 * op_err / suz_err

        df.loc[row_id, 'suzuki error'] = suz_err
        df.loc[row_id, 'optimised error'] = op_err
        df.loc[row_id, '%'] = perc

    df.dropna(inplace=True)
    df.to_csv(output_data_path, index=False)
示例#8
0
 def setUp(self):
     self.chain = hchain.HeisenbergChain(self.n, self.v)
示例#9
0
def calc_values():

    results_path = os.path.join('..', 'results')
    if not os.path.isdir(results_path):
        results_path= os.path.join('..', '..', 'results')
    if not os.path.isdir(results_path):
        results_path = os.path.join('..','..', '..', 'results')
    if not os.path.isdir(results_path):
        results_path = os.path.join('..','..','..', '..', 'results')

    path = os.path.join(results_path, 'perm_analysis')
    if not os.path.isdir(path):
        os.makedirs(path)

    np.random.seed(1294635)

    n = 5
    chain = hchain.HeisenbergChain(n, np.random.uniform(-1,1,n))

    group_perm = [4 * i for i in range(chain.n)] + [4 * i + 1 for i in range(chain.n)] \
                        + [4 * i + 2 for i in range(chain.n)] + [4 * i + 3 for i in range(chain.n)]

    can_perm = [i for i in range(4*chain.n)]

    rand_perms = [np.random.permutation(4*chain.n) for _ in range(20)]

    cols = ['r', 'gate count', 'error']
    group_k2 = pd.DataFrame(columns=cols)
    group_k3 = pd.DataFrame(columns=cols)

    can_k2 = pd.DataFrame(columns=cols)
    can_k3 = pd.DataFrame(columns=cols)

    rand_k2 = pd.DataFrame(columns=cols)
    rand_k3 = pd.DataFrame(columns=cols)


    k=2
    for r in range(25,401,25):
        print(r)
        suz = approx.suzuki_solution(k,r)
        group_k2.loc[len(group_k2)] = [r, approx.gate_count(chain,len(suz),permutation=group_perm),
                                           approx.error(chain,suz,t=2*chain.n, permutation=group_perm)]

        can_k2.loc[len(can_k2)] = [r, approx.gate_count(chain,len(suz),permutation=can_perm),
                                           approx.error(chain,suz,t=2*chain.n, permutation=can_perm)]

        mean_gate_count, mean_error = 0, 0
        for perm in rand_perms:
            mean_gate_count += approx.gate_count(chain, len(suz), permutation=perm)
            mean_error += approx.error(chain, suz,  t=2*chain.n, permutation=perm)

        mean_gate_count = mean_gate_count/len(rand_perms)
        mean_error = mean_error / len(rand_perms)
        rand_k2.loc[len(rand_k2)] = [r, mean_gate_count, mean_error]

    group_k2.to_csv(os.path.join(path,'Grouped k=2.csv'))
    can_k2.to_csv(os.path.join(path, 'Canonical k=2.csv'))
    rand_k2.to_csv(os.path.join(path, 'Random k=2.csv'))



    k=3
    for r in range(5,61,5):
        print(r)
        suz = approx.suzuki_solution(k, r)
        group_k3.loc[len(group_k3)] = [r, approx.gate_count(chain,len(suz), permutation=group_perm),
                                           approx.error(chain, suz, t=2*chain.n, permutation=group_perm)]

        can_k3.loc[len(can_k3)] = [r, approx.gate_count(chain, len(suz), permutation=can_perm),
                                           approx.error(chain, suz, t=2*chain.n, permutation=can_perm)]

        mean_gate_count, mean_error = 0, 0
        for perm in rand_perms:
            mean_gate_count += approx.gate_count(chain, len(suz), permutation=perm)
            mean_error += approx.error(chain, suz, t=2 * chain.n, permutation=perm)

        mean_gate_count = mean_gate_count / len(rand_perms)
        mean_error = mean_error / len(rand_perms)
        rand_k3.loc[len(rand_k3)] = [r, mean_gate_count, mean_error]

    group_k3.to_csv(os.path.join(path, 'Grouped k=3.csv'))
    can_k3.to_csv(os.path.join(path, 'Canonical k=3.csv'))
    rand_k3.to_csv(os.path.join(path, 'Random k=3.csv'))

    df = pd.DataFrame(columns=['n'] + ['v_%d' % i for i in range(1, chain.n+1)])
    df.loc[len(df)] = [chain.n] + list(chain.v)

    df.to_csv(os.path.join(path, 'chain.csv'))
示例#10
0
def greedy():
    """ Run greedy hillclimber. Enumerate neighbourhood and choose best. """
    chain = hchain.HeisenbergChain(num_qubits, v)

    steps = [-step_size, 0, step_size]

    neighbourhood_size = len(steps)**num_p_values

    suzuki = np.array(approx.suzuki(k))
    suzuki_error = approx.error(chain, approx.r_copies(suzuki, r), t)
    print('Suzuki error: {}'.format(suzuki_error))

    # Start at Suzuki
    # current = suzuki
    # current_error = suzuki_error

    # Start at 1/p_length
    # current = np.array([0.2] * 5)
    # current_error = approx.error(chain, approx.r_copies(current, r), t)

    import random
    random.seed(1234)
    suzuki_coeff = approx.suzuki(k)
    random.shuffle(suzuki_coeff)
    current = np.array(suzuki_coeff)
    current_error = approx.error(chain, approx.r_copies(current, r), t)

    for gen in range(1, gens + 1):

        current_error_percent = 100 * (current_error / suzuki_error)
        print('Gen: {} Best: {} Percent: {} '.format(gen, current_error,
                                                     current_error_percent))

        best_neighbour = None
        best_neighbour_error = 1e6

        for neighbour_index in range(1, neighbourhood_size):

            move = [
                step_size * (coeff - 1)
                for coeff in ternary(neighbour_index, num_p_values)
            ]

            neighbour = current + move

            neighbour_err = approx.error(chain, approx.r_copies(neighbour, r),
                                         t)

            neighbour_err_percent = 100 * (neighbour_err / suzuki_error)

            print('Step {} Neighbour {}/{} error: {}, error %: {}'.format(
                gen, neighbour_index, neighbourhood_size, neighbour_err,
                neighbour_err_percent))

            if neighbour_err <= best_neighbour_error:
                best_neighbour = neighbour
                best_neighbour_error = neighbour_err

        if best_neighbour_error < current_error:
            current = best_neighbour
            current_error = best_neighbour_error
            current_error_percent = 100 * (current_error / suzuki_error)
            print('New best: error: {} percent: {} solution: {})'.format(
                current_error, current_error_percent, current))
        elif best_neighbour_error >= current_error:
            best_neighbour_percent = 100 * (best_neighbour_error /
                                            suzuki_error)
            print('Best result: error: {} percent: {} solution: {}'.format(
                best_neighbour_error, best_neighbour_percent, best_neighbour))
            return