def evaluate_rows(input_df, start_row, end_row, output_folder_path):
    df = input_df.copy()
    if not os.path.isdir(output_folder_path):
        os.makedirs(output_folder_path)

    logs_path = os.path.join(output_folder_path, 'logs')
    if not os.path.isdir(logs_path):
        os.makedirs(logs_path)

    output_data_path = os.path.join(
        output_folder_path, 'output_{}_{}.csv'.format(start_row, end_row))

    for row_id in range(start_row - 1, end_row):
        row = df.loc[row_id]

        n = int(row['n'])
        v = row['v_1':'v_%d' % n]
        k = int(row['k'])
        r = int(row['r'])
        seed = int(row['seed'])
        gens = int(row['gens'])

        chain = hchain.HeisenbergChain(n, v)
        suz = approx.suzuki_solution(k, r)

        t = time.time()
        pop, log, hof = run_es(v, k, r, suz, seed, gens)
        t = time.time() - t

        best = hof[0]

        suz_err = approx.error(chain, approx.suzuki_solution(k, r), t=2 * n)
        op_err = approx.error(chain, best, t=2 * n)
        perc = 100 * op_err / suz_err

        for j in range(1, r * 5**(k - 1) + 1):
            df.loc[row_id, 'lambda_%d' % j] = best[j - 1]
        df.loc[row_id, 'optimised error'] = op_err
        df.loc[row_id, 'suzuki error'] = suz_err
        df.loc[row_id, '% error'] = perc
        df.loc[row_id, 'time'] = t

        pd.DataFrame(log).to_csv(os.path.join(logs_path,
                                              'row_{}.csv'.format(row_id + 1)),
                                 index=False)

    df.dropna(inplace=True)
    df.to_csv(output_data_path, index=False)
示例#2
0
def compare_lambda_over_n(input_df, start_row, end_row, output_folder_path):
    df = input_df.copy()
    df.insert(1, 'optimised error', None)
    df.insert(1, 'suzuki error', None)
    df.insert(1, '%', None)
    if not os.path.isdir(output_folder_path):
        os.makedirs(output_folder_path)

    output_data_path = os.path.join(
        output_folder_path, 'output_{}_{}.csv'.format(start_row, end_row))

    for row_id in range(start_row - 1, end_row):
        row = df.loc[row_id]

        n = int(row['n'])
        v = list(map(float, list(row['v_1':'v_%d' % n])))
        k = int(row['k'])
        r = int(row['r'])

        p = list(row.loc['p_1':'p_%d' % (5**(k - 1))])
        chain = hchain.HeisenbergChain(n, v)

        suz_err = approx.error(chain, approx.suzuki_solution(k, r), t=2 * n)
        op_err = approx.error(chain, approx.r_copies(p, r), t=2 * n)

        perc = 100 * op_err / suz_err

        df.loc[row_id, 'suzuki error'] = suz_err
        df.loc[row_id, 'optimised error'] = op_err
        df.loc[row_id, '%'] = perc

    df.dropna(inplace=True)
    df.to_csv(output_data_path, index=False)
def suzuki_sampling(input_df, start_row, end_row, output_folder_path):
    df = input_df.copy()

    NUM_SAMPLES = 100


    for row_id in range(start_row-1, end_row):
        row = df.loc[row_id]

        n = int(row['n'])
        v = row['v_1':'v_%d' % n]
        k = int(row['k'])
        r = int(row['r'])
        scale = float(row['scale'])

        cols = ['n'] + ['v_%d' % i for i in range(1, n+1)] + ['k', 'r', 'scale', 'sampled error', 'suzuki error']

        row_df = pd.DataFrame(columns=cols)

        chain = hchain.HeisenbergChain(n, v)
        sample_length = (5**(k-1))
        suzuki_vector = approx.suzuki(k)

        for _ in range(NUM_SAMPLES):
            noise = np.random.normal(loc=0, scale=scale/sample_length, size=sample_length)
            sample_vector = np.array(suzuki_vector) + noise
            sampled_err = approx.error(chain, approx.r_copies(sample_vector, r), t=2*n)
            suz_err = approx.error(chain, approx.suzuki_solution(k, r), t=2*n)

            row_df.loc[len(row_df)+1] = [n] + list(v) + [k, r, scale, sampled_err, suz_err]

        row_df.to_csv(os.path.join(output_folder_path, 'row_%d.csv'%(row_id+1)))
示例#4
0
def run(input_file, start, end):

    if not os.path.isdir(OUTPUT_DIR):
        os.makedirs(OUTPUT_DIR)

    input_file_prefix = os.path.splitext(os.path.basename(input_file))[0]

    output_file = os.path.join(
        OUTPUT_DIR, input_file_prefix + '_output_{}_{}.csv'.format(start, end))

    header, all_rows = read_all(input_file)

    with open(output_file, 'w') as outcsv:

        output_header = header + ['suzuki_error', 'error', '%']

        writer = csv.DictWriter(outcsv, output_header)
        writer.writeheader()

        for row in all_rows[start - 1:end]:

            print("Processing: " + row['row'])

            n = int(row['n'])
            k = int(row['k'])
            r = int(row['r'])
            t = int(row['t'])

            lambda_vect = {
                k: l
                for k, l in row.items() if k.startswith('lambda_') and l != ''
            }
            lambda_values = [float(l) for l in lambda_vect.values()]

            v_vect = {
                k: v
                for k, v in row.items() if k.startswith('v_') and v != ''
            }
            v_values = [float(v) for v in v_vect.values()]

            chain = hchain.HeisenbergChain(n, v_values)

            suz_err = approx.error(chain, approx.suzuki_solution(k, r), t)
            op_err = approx.error(chain, lambda_values, t)

            percentage = 100 * op_err / suz_err

            row['suzuki_error'] = suz_err
            row['error'] = op_err
            row['%'] = percentage

            writer.writerow(row)
示例#5
0
def add_column(folder_path):

    df = pd.read_csv(os.path.join(folder_path, 'exp15_results.csv'))

    for i in range(len(df)):
        print(i)

        row = df.loc[i]
        v = list(row['v_1':'v_5'])
        r = int(row['r'])
        k = int(row['k'])
        t = int(row['t'])
        chain = hchain.HeisenbergChain(5,v)

        df.loc[i,'v_suz_error'] = approx.error(chain,approx.suzuki_solution(k,r),t)

        df.to_csv(os.path.join(folder_path,'exp15_results_vcol.csv'))
示例#6
0
def plot(folder_path):
    df = pd.read_csv(os.path.join(folder_path, 'exp16_results.csv'))

    v = list(df.loc[0, 'v_1':'v_5'])
    chain = hchain.HeisenbergChain(5, v)
    suz_errs = [
        approx.error(chain, approx.suzuki_solution(2, r), 10)
        for r in range(25, 401, 25)
    ]
    r_counts = range(25, 401, 25)

    for i in range(0, len(df), 16):
        df_r = df.loc[i:i + 15]
        orig_r = int(df_r.loc[i, 'original_r'])
        plt.plot(r_counts,
                 df_r['error'],
                 color=(1.0, 1 - orig_r / 400, 1 - orig_r / 400, 0.8),
                 marker='.',
                 linewidth=1.5,
                 markersize=4,
                 label='_nolegend_')

    plt.plot(r_counts,
             suz_errs,
             color='blue',
             marker='.',
             linewidth=2,
             markersize=7,
             label='Suzuki')
    plt.yscale(value='log')

    plt.legend(fontsize=16)

    plt.xlabel('r', fontsize=18)
    plt.ylabel('Absolute Error', fontsize=18)

    plt.xticks(fontsize=16)
    plt.yticks(fontsize=16)

    plt.tight_layout()

    plt.show()
示例#7
0
def run_cmaes_lambda(v, k, r, seed, generations, sig=None):

    suzuki = approx.suzuki_solution(k, r)

    if sig == None:
        sig = 1e-5 / len(suzuki)

    chain = hchain.HeisenbergChain(len(v), v)

    random.seed(seed)
    np.random.seed(seed)

    # Error from target
    def target_error(ind):
        if NORMALISE:
            norm_ind = norm_f(ind)
        else:
            norm_ind = ind

        return approx.error(chain, ind, t=2 * chain.n),

    toolbox = base.Toolbox()
    toolbox.register("evaluate", target_error)

    strategy = cma.Strategy(centroid=suzuki, sigma=sig)
    toolbox.register("generate", strategy.generate, creator.Individual)
    toolbox.register("update", strategy.update)

    hof = tools.HallOfFame(1)
    stats = tools.Statistics(lambda ind: ind.fitness.values)
    stats.register("avg", np.mean)
    stats.register("std", np.std)
    stats.register("min", np.min)
    stats.register("max", np.max)

    pop, log = algorithms.eaGenerateUpdate(toolbox,
                                           ngen=generations,
                                           stats=stats,
                                           halloffame=hof,
                                           verbose=True)

    return pop, log, hof
示例#8
0
def compare_lambda_over_n(input_filename, input_df, start_row, end_row,
                          output_folder_path):
    df = input_df.copy()
    df.insert(1, 'optimised error', None)
    df.insert(1, 'suzuki error', None)
    df.insert(1, '%', None)
    if not os.path.isdir(output_folder_path):
        os.makedirs(output_folder_path)

    input_filename_prefix = os.path.splitext(input_filename)[0]
    output_data_path = os.path.join(
        output_folder_path,
        input_filename_prefix + '_output_{}_{}.csv'.format(start_row, end_row))

    for row_id in range(start_row - 1, end_row):
        row = df.loc[row_id]

        n = int(row['n'])
        v = list(map(float, list(row['v_1':'v_%d' % n])))
        k = int(row['k'])
        r = int(row['r'])

        lam = list(row.loc['lambda_1':'lambda_%d' % (r * 5**(k - 1))])
        chain = hchain.HeisenbergChain(n, v)

        # Hardcode time
        suz_err = approx.error(chain, approx.suzuki_solution(k, r), t=SIM_TIME)
        op_err = approx.error(chain, lam, t=SIM_TIME)

        perc = 100 * op_err / suz_err

        df.loc[row_id, 'suzuki error'] = suz_err
        df.loc[row_id, 'optimised error'] = op_err
        df.loc[row_id, '%'] = perc

    df.dropna(inplace=True)
    df.to_csv(output_data_path, index=False)
示例#9
0
def calc_values():

    results_path = os.path.join('..', 'results')
    if not os.path.isdir(results_path):
        results_path= os.path.join('..', '..', 'results')
    if not os.path.isdir(results_path):
        results_path = os.path.join('..','..', '..', 'results')
    if not os.path.isdir(results_path):
        results_path = os.path.join('..','..','..', '..', 'results')

    path = os.path.join(results_path, 'perm_analysis')
    if not os.path.isdir(path):
        os.makedirs(path)

    np.random.seed(1294635)

    n = 5
    chain = hchain.HeisenbergChain(n, np.random.uniform(-1,1,n))

    group_perm = [4 * i for i in range(chain.n)] + [4 * i + 1 for i in range(chain.n)] \
                        + [4 * i + 2 for i in range(chain.n)] + [4 * i + 3 for i in range(chain.n)]

    can_perm = [i for i in range(4*chain.n)]

    rand_perms = [np.random.permutation(4*chain.n) for _ in range(20)]

    cols = ['r', 'gate count', 'error']
    group_k2 = pd.DataFrame(columns=cols)
    group_k3 = pd.DataFrame(columns=cols)

    can_k2 = pd.DataFrame(columns=cols)
    can_k3 = pd.DataFrame(columns=cols)

    rand_k2 = pd.DataFrame(columns=cols)
    rand_k3 = pd.DataFrame(columns=cols)


    k=2
    for r in range(25,401,25):
        print(r)
        suz = approx.suzuki_solution(k,r)
        group_k2.loc[len(group_k2)] = [r, approx.gate_count(chain,len(suz),permutation=group_perm),
                                           approx.error(chain,suz,t=2*chain.n, permutation=group_perm)]

        can_k2.loc[len(can_k2)] = [r, approx.gate_count(chain,len(suz),permutation=can_perm),
                                           approx.error(chain,suz,t=2*chain.n, permutation=can_perm)]

        mean_gate_count, mean_error = 0, 0
        for perm in rand_perms:
            mean_gate_count += approx.gate_count(chain, len(suz), permutation=perm)
            mean_error += approx.error(chain, suz,  t=2*chain.n, permutation=perm)

        mean_gate_count = mean_gate_count/len(rand_perms)
        mean_error = mean_error / len(rand_perms)
        rand_k2.loc[len(rand_k2)] = [r, mean_gate_count, mean_error]

    group_k2.to_csv(os.path.join(path,'Grouped k=2.csv'))
    can_k2.to_csv(os.path.join(path, 'Canonical k=2.csv'))
    rand_k2.to_csv(os.path.join(path, 'Random k=2.csv'))



    k=3
    for r in range(5,61,5):
        print(r)
        suz = approx.suzuki_solution(k, r)
        group_k3.loc[len(group_k3)] = [r, approx.gate_count(chain,len(suz), permutation=group_perm),
                                           approx.error(chain, suz, t=2*chain.n, permutation=group_perm)]

        can_k3.loc[len(can_k3)] = [r, approx.gate_count(chain, len(suz), permutation=can_perm),
                                           approx.error(chain, suz, t=2*chain.n, permutation=can_perm)]

        mean_gate_count, mean_error = 0, 0
        for perm in rand_perms:
            mean_gate_count += approx.gate_count(chain, len(suz), permutation=perm)
            mean_error += approx.error(chain, suz, t=2 * chain.n, permutation=perm)

        mean_gate_count = mean_gate_count / len(rand_perms)
        mean_error = mean_error / len(rand_perms)
        rand_k3.loc[len(rand_k3)] = [r, mean_gate_count, mean_error]

    group_k3.to_csv(os.path.join(path, 'Grouped k=3.csv'))
    can_k3.to_csv(os.path.join(path, 'Canonical k=3.csv'))
    rand_k3.to_csv(os.path.join(path, 'Random k=3.csv'))

    df = pd.DataFrame(columns=['n'] + ['v_%d' % i for i in range(1, chain.n+1)])
    df.loc[len(df)] = [chain.n] + list(chain.v)

    df.to_csv(os.path.join(path, 'chain.csv'))