Beispiel #1
0
def main():
    stds = list()
    gov, insts = generate_agents(parameters.num_hei)
    gov, insts, stds = evolve(gov, insts, stds)
    output.produce_output(gov, insts, stds)
    files = [f for f in os.listdir('results') if 'csv' in f]
    for f in files:
        plotter.plotting('results/' + f)
Beispiel #2
0
def perform(theta_sphere, phi_sphere, F):
    experimentData = ExperimentData(F)
    F = experimentData.F
    sigma = {}
    N = 200
    t_sum = 0
    epsilon = 10**(-3)
    prev_sigma = experimentData.F_max - experimentData.F_min
    flag = False
    prev_step = 0
    #

    print(experimentData.probability_distribution)  # initial
    print(experimentData.fields_number)
    for step in range(N):

        bayesians_learning.renew_probalities(
            qubit.randbin3(experimentData, F, theta_sphere, phi_sphere),
            experimentData, theta_sphere, phi_sphere)
        #bayesians_learning.renew_probalities(qubit.randbin(experimentData, F), experimentData)
        #bayesians_learning.renew_probalities(ramsey_qubit.output(experimentData.t), experimentData)
        t_sum += experimentData.t

        x_peak, y_peak = find_peak(experimentData)
        current_sigma = find_sigma(x_peak, y_peak, experimentData)
        if current_sigma != 0:
            sigma[t_sum] = current_sigma

        if step <= 50 and prev_sigma == experimentData.F_max - experimentData.F_min and current_sigma != 0:
            flag = True

        if flag and \
                step - prev_step >= 2 and \
                prev_sigma + experimentData.delta_F > 2 * current_sigma and\
                experimentData.const * F * experimentData.F_degree * experimentData.t <= math.pi:
            prev_sigma = current_sigma
            prev_step = step
            experimentData.t *= experimentData.time_const
            print(step)

        if flag and prev_sigma < current_sigma:
            prev_sigma = current_sigma

        if (step) % 3 == 0:
            plt.plot([
                experimentData.F_min + i * experimentData.delta_F
                for i in range(experimentData.fields_number)
            ], experimentData.probability_distribution)  # distr each 50 steps

        if (step + 1) % 1 == 0:
            print(sum(experimentData.probability_distribution), x_peak, y_peak,
                  step, current_sigma, prev_sigma, t_sum,
                  experimentData.const * F * experimentData.t *
                  experimentData.F_degree, flag)  # checking ~ 1

        if y_peak >= 1.0 - epsilon:
            break

    x_peak, y_peak = find_peak(experimentData)
    optimal_angles.F_model.append(x_peak)
    optimal_angles.y_model = y_peak
    optimal_angles.F_real.append(F)
    optimal_angles.theta_angles.append(theta_sphere)
    plt.plot([
        experimentData.F_min + i * experimentData.delta_F
        for i in range(experimentData.fields_number)
    ], experimentData.probability_distribution)  # final distr
    plt.show()
    #fig.savefig('distr_' + '.png', dpi=500)
    plt.close()
    print(list(sigma.keys())[-1], list(sigma.values())[-1])
    x_peak, y_peak = find_peak(experimentData)
    print(x_peak)
    #plotter.plotting(sigma)[1]
    x_peak, y_peak = find_peak(experimentData)

    return x_peak, plotter.plotting(sigma)[1][1], list(sigma.keys())[-1], list(
        sigma.values())[-1]
Beispiel #3
0
def perform():
    experimentData = ExperimentData()
    sigma = {}
    a_from_t_sum = {} #sensitivity
    a_from_step = {} #sensitivity
    N = 45
    t_sum = 0
    epsilon = 10 ** (-3)
    prev_sigma = experimentData.F_max - experimentData.F_min
    flag = False
    prev_step = 0
    prev_entropy_step = -1
    #
    fig, ax = plt.subplots()
    ax.minorticks_on()

    print(experimentData.probability_distribution) # initial
    print(experimentData.fields_number)
    plt.plot([experimentData.F_min + i * experimentData.delta_F for i in range(experimentData.fields_number)],
             [each for each in experimentData.probability_distribution])

    for step in range(N):

        bayesians_learning.renew_probalities(experimentData)
        #bayesians_learning.renew_probalities(qubit.randbin3(experimentData, F), experimentData)
        #bayesians_learning.renew_probalities(qubit.randbin2(experimentData, F), experimentData)
        #bayesians_learning.renew_probalities(ramsey_qubit.output(experimentData.t), experimentData)
        t_sum += experimentData.t * experimentData.num_of_repetitions



        x_peak, y_peak = find_peak(experimentData)
        num_of_peaks = find_num_of_peaks(experimentData, y_peak)
        pseudo_entropy = pseudo_entropy_count(experimentData, y_peak)
        current_sigma = find_sigma(x_peak, y_peak, experimentData) / experimentData.gained_degree

        #a_from_t_sum[t_sum] = current_sigma * (t_sum) ** 0.5
        #a_from_step[step] = current_sigma * (t_sum) ** 0.5
        a_from_t_sum[experimentData.t] = abs(experimentData.F - x_peak) * (t_sum) ** 0.5

        if current_sigma != 0:
            sigma[t_sum] = current_sigma

        if step <= 50 and prev_sigma == experimentData.F_max - experimentData.F_min and current_sigma != 0:
            flag = True

        if flag and \
                step - prev_step >= 1: # and \
                #prev_sigma + experimentData.delta_F/experimentData.gained_degree > 2 * current_sigma:# and \
                #experimentData.const * F * experimentData.F_degree * experimentData.t <= 3.14:
            prev_sigma = current_sigma
            prev_step = step
            experimentData.t *= experimentData.time_const
            print(step)

        if flag and prev_sigma < current_sigma:
            prev_sigma = current_sigma

        if (step) % 1 == 0:
            plt.plot([experimentData.F_min + i*experimentData.delta_F for i in range(experimentData.fields_number)], [each for each in experimentData.probability_distribution]) # distr each _ steps

        if (step + 1) % 1 == 0:
            print(bayesians_learning.integrate_distribution(experimentData), num_of_peaks, pseudo_entropy, x_peak, y_peak, step, current_sigma, prev_sigma, experimentData.t, experimentData.const * experimentData.F * experimentData.t*experimentData.F_degree, flag) # checking ~ 1

        if pseudo_entropy == 1 or num_of_peaks == 1:
            experimentData.num_of_repetitions = 51

        if pseudo_entropy > 1 and step - prev_entropy_step > 1 and num_of_peaks == 1:
            experimentData.num_of_repetitions = 101
            experimentData.t /= experimentData.time_const ** (0)
            prev_entropy_step = step


        if num_of_peaks > 1:
            experimentData.t /= experimentData.time_const ** (0)
            experimentData.num_of_repetitions = 101

        if pseudo_entropy > 2 or num_of_peaks > 2:
            experimentData.t /= experimentData.time_const ** (1)

        if pseudo_entropy > 3 or num_of_peaks > 3:
            experimentData.t /= experimentData.time_const ** (1)

        if flag and current_sigma*experimentData.gained_degree <= 10*experimentData.delta_F or num_of_peaks > 1:
            plt.plot([experimentData.F_min + i * experimentData.delta_F for i in range(experimentData.fields_number)],
                     [each for each in experimentData.probability_distribution])
            plt.show()
            plt.close()

            fig, ax = plt.subplots()
            ax.minorticks_on()

            expand_2(x_peak, y_peak, current_sigma, experimentData)
            plt.plot([experimentData.F_min + i * experimentData.delta_F for i in range(experimentData.fields_number)],
                     [each for each in experimentData.probability_distribution])
            print(experimentData.probability_distribution)

        if experimentData.t >= 200*10**(-6):
            break

    #plt.plot([experimentData.F_min + i*experimentData.delta_F for i in range(experimentData.fields_number)], experimentData.probability_distribution) # final distr
    plt.show()
    #fig.savefig('distr_' + '.png', dpi=500)
    plt.close()
    print(list(sigma.keys())[-1], list(sigma.values())[-1])

    '''try:
        plotter.plotting_sensitivity(a_from_step, r'$N$')
    except Exception:
        pass
    try:
        plotter.plotting_sensitivity(a_from_t_sum, r'$t_{sum}$')
    except Exception:
        pass'''
    #try:
    #    plotter.plotting_sensitivity(a_from_t_sum, r'$t_{coherense\_max}, \, \mu s$')
    #except Exception:
    #    pass

    #print("final sensitivity: ", a_from_t_sum[t_sum]*10**(-9))

    x_peak, y_peak = find_peak(experimentData)

    plotter.plotting(sigma)

    return a_from_t_sum
def perform():
    experimentData = ExperimentData()
    F = experimentData.F
    sigma = {}
    a_from_t_sum = {}  #sensitivity
    a_from_step = {}  #sensitivity
    N = 90
    t_sum = 0
    epsilon = 10**(-3)
    prev_sigma = experimentData.F_max - experimentData.F_min
    flag = False
    prev_step = 0
    #

    print(experimentData.probability_distribution)  # initial
    print(experimentData.fields_number)
    for step in range(N):

        bayesians_learning.renew_probalities(qubit.randbin(experimentData, F),
                                             experimentData)
        #bayesians_learning.renew_probalities(qubit.randbin3(experimentData, F), experimentData)
        #bayesians_learning.renew_probalities(qubit.randbin2(experimentData, F), experimentData)
        #bayesians_learning.renew_probalities(ramsey_qubit.output(experimentData.t), experimentData)
        t_sum += experimentData.t

        x_peak, y_peak = find_peak(experimentData)
        current_sigma = find_sigma_2(x_peak, y_peak, experimentData)

        a_from_t_sum[t_sum] = current_sigma * (t_sum)**0.5
        a_from_step[step] = current_sigma * (t_sum)**0.5

        if current_sigma != 0:
            sigma[t_sum] = current_sigma

        if step <= 50 and prev_sigma == experimentData.F_max - experimentData.F_min and current_sigma != 0:
            flag = True

        if flag and \
                step - prev_step >= 2 and \
                prev_sigma + experimentData.delta_F > 2 * current_sigma:# and \
            #experimentData.const * F * experimentData.F_degree * experimentData.t <= 3.14:
            prev_sigma = current_sigma
            prev_step = step
            experimentData.t *= experimentData.time_const
            print(step)

        if flag and prev_sigma < current_sigma:
            prev_sigma = current_sigma

        if (step) % 5 == 0:
            plt.plot([
                experimentData.F_min + i * experimentData.delta_F
                for i in range(experimentData.fields_number)
            ], experimentData.probability_distribution)  # distr each 50 steps

        if (step + 1) % 1 == 0:
            print(sum(experimentData.probability_distribution), x_peak, y_peak,
                  step, current_sigma, prev_sigma, t_sum,
                  experimentData.const * F * experimentData.t *
                  experimentData.F_degree, flag)  # checking ~ 1

        if y_peak >= 1.0 - epsilon or t_sum >= 8 * 10**(-6):
            break

    plt.plot([
        experimentData.F_min + i * experimentData.delta_F
        for i in range(experimentData.fields_number)
    ], experimentData.probability_distribution)  # final distr
    plt.show()
    #fig.savefig('distr_' + '.png', dpi=500)
    plt.close()
    print(list(sigma.keys())[-1], list(sigma.values())[-1])

    plotter.plotting_sensitivity(a_from_step, r'$N$')
    plotter.plotting_sensitivity(a_from_t_sum, r'$t_{sum}$')

    x_peak, y_peak = find_peak(experimentData)

    plotter.plotting(sigma)
Beispiel #5
0
def perform():
    experimentData = ExperimentData()
    sigma = {}
    a_from_t_sum = {}  #sensitivity
    a_from_step = {}  #sensitivity
    N = 40
    t_sum = 0
    epsilon = 10**(-5)

    for step in range(N):
        outcome = int(
            round(
                sum([
                    qubit.randbin(experimentData, experimentData.F)
                    for i in range(experimentData.num_of_repetitions)
                ]) / experimentData.num_of_repetitions))

        if bayesians_learning.P_qubit_state_on_F_i(outcome, experimentData.F_min, experimentData)\
                > bayesians_learning.P_qubit_state_on_F_i(outcome, experimentData.F_max, experimentData):
            experimentData.F_max = (experimentData.F_max +
                                    experimentData.F_min) / 2
        else:
            experimentData.F_min = (experimentData.F_max +
                                    experimentData.F_min) / 2

        t_sum += experimentData.t * experimentData.num_of_repetitions

        current_sigma = (experimentData.F_max - experimentData.F_min) / 2

        sigma[t_sum] = current_sigma

        center = (experimentData.F_max + experimentData.F_min) / 2
        a_from_t_sum[experimentData.t] = max(abs(center - experimentData.F),
                                             current_sigma) * (t_sum)**0.5
        #a_from_step[step] = current_sigma * (t_sum) ** 0.5

        experimentData.t *= experimentData.time_const

        print(step, center, current_sigma, experimentData.t * 10**6, t_sum)
        #x = np.arange(experimentData.F_min, experimentData.F_max, 0.01)

        #plt.plot(x, 1 / (current_sigma * np.sqrt(2 * np.pi)) *
        #         np.exp(- (x - center) ** 2 / (2 * current_sigma ** 2)),
        #         linewidth=2, color='r')

        if current_sigma <= epsilon or experimentData.t >= 200 * 10**(-6):
            break

    #plt.show()
    #plt.close()

    print(list(sigma.keys())[-1], list(sigma.values())[-1])

    #try:
    #    plotter.plotting_sensitivity(a_from_step, r'$N$')
    #except Exception:
    #    pass
    try:
        plotter.plotting_sensitivity(a_from_t_sum,
                                     r'$t_{coherense\_max}, \, \mu s$')
    except Exception:
        pass

    plotter.plotting(sigma)

    return a_from_t_sum
Beispiel #6
0
def perform(p_err, a_err, F, n_rep):

    experimentData = ExperimentData()

    experimentData.F = F
    experimentData.F_min = 0  # min field Tesla
    experimentData.F_max = 50  # max field Tesla
    experimentData.F_degree = 10**(-9)

    experimentData.amp_err = p_err
    experimentData.phase_err = a_err

    experimentData.gained_degree = 1
    experimentData.delta_F = 1  # accuracy of F defining
    experimentData.fields_number = round(
        (experimentData.F_max - experimentData.F_min + experimentData.delta_F)
        / experimentData.delta_F)  # amount of discrete F meanings
    experimentData.time_const = 2
    experimentData.mu = 10**(5) * 927 * 10**(-26
                                             )  # magnetic moment of the qubit
    experimentData.h = 6.62 * 10**(-34)  # plank's constant
    experimentData.const = experimentData.mu / experimentData.h  # mu/h
    experimentData.t = math.pi / (experimentData.const *
                                  experimentData.F_degree *
                                  experimentData.F_max / 2) * 2**(-1)
    experimentData.t_init = experimentData.t  # time of interaction in seconds
    experimentData.num_of_repetitions = n_rep  # repetitions for one experiment

    experimentData.probability_distribution = [
        gaussian(15, 0, 1, 25, i) for i in range(experimentData.fields_number)
    ]

    experimentData.phase_err = p_err
    experimentData.amp_err = a_err

    sigma = {}
    a_from_t_sum = {}  #sensitivity
    a_from_step = {}  #sensitivity
    N = 13
    t_sum = 0
    epsilon = 20 * 10**(-3)  # 20 pT
    prev_sigma = experimentData.F_max - experimentData.F_min
    flag = False
    prev_step = 0
    prev_entropy_step = -1

    fig, ax = plt.subplots()
    font = {'fontname': 'Times New Roman'}
    ax.set_title(r'b)')
    ax.minorticks_on()
    ax.grid(which='major', axis='both')
    ax.grid(which='minor', axis='both', linestyle=':')

    # Подписи:
    ax.set_xlabel("Field segment, $nT$", **font)
    ax.set_ylabel(r'$P(F_k)$', **font)

    print(experimentData.probability_distribution)  # initial
    print(experimentData.fields_number)
    ax.plot([
        experimentData.F_min + i * experimentData.delta_F
        for i in range(experimentData.fields_number)
    ], [each for each in experimentData.probability_distribution],
            label='k=0')

    #answers = qubit.randbin2(experimentData, experimentData.F)
    #answers = real_experiment.output(experimentData)
    for step in range(N):

        #bayesians_learning.renew_probalities(answers[experimentData.t], experimentData)
        #bayesians_learning.renew_probalities(1, experimentData)
        #bayesians_learning.renew_probalities(qubit.randbin2(experimentData, F), experimentData)
        #bayesians_learning.renew_probalities(ramsey_qubit.output(experimentData.t), experimentData)
        t_sum += experimentData.t * experimentData.num_of_repetitions

        x_peak, y_peak = find_peak(experimentData)
        num_of_peaks = find_num_of_peaks(experimentData, y_peak)
        pseudo_entropy = pseudo_entropy_count(experimentData, y_peak)
        current_sigma = find_sigma(
            x_peak, y_peak, experimentData) / experimentData.gained_degree

        #a_from_t_sum[t_sum] = current_sigma * (t_sum) ** 0.5
        #a_from_step[step] = current_sigma * (t_sum) ** 0.5
        a_from_t_sum[experimentData.t] = current_sigma * (
            t_sum
        )**0.5  #max(abs(experimentData.F - x_peak), current_sigma) * (t_sum) ** 0.5
        #a_from_t_sum[experimentData.t] = max(abs(experimentData.F - x_peak), current_sigma)

        if current_sigma != 0:
            sigma[t_sum] = current_sigma

        if step <= 50 and prev_sigma == experimentData.F_max - experimentData.F_min and current_sigma != 0:
            flag = True

        if flag and \
                step - prev_step >= 1: # and \
            #prev_sigma + experimentData.delta_F/experimentData.gained_degree > 2 * current_sigma:# and \
            #experimentData.const * F * experimentData.F_degree * experimentData.t <= 3.14:
            prev_sigma = current_sigma
            prev_step = step
            experimentData.t *= experimentData.time_const
            #print(step)

        if flag and prev_sigma < current_sigma:
            prev_sigma = current_sigma

        if (step) % 10 == 0:
            ax.plot([
                experimentData.F_min + i * experimentData.delta_F
                for i in range(experimentData.fields_number)
            ], [each for each in experimentData.probability_distribution],
                    label='k={}'.format(step + 1))  # distr each _ steps

        if (step + 1) % 1 == 0:
            print(num_of_peaks, pseudo_entropy, x_peak, y_peak, step,
                  current_sigma, prev_sigma, experimentData.t,
                  experimentData.const * experimentData.F * experimentData.t *
                  experimentData.F_degree, flag)  # checking ~ 1

        if pseudo_entropy == 1 or num_of_peaks == 1:
            experimentData.num_of_repetitions = n_rep

        if pseudo_entropy > 1 and step - prev_entropy_step > 1 and num_of_peaks == 1:
            experimentData.num_of_repetitions = n_rep
            experimentData.t /= experimentData.time_const**(0)
            prev_entropy_step = step

        if num_of_peaks > 1:
            experimentData.t /= experimentData.time_const**(0)
            experimentData.num_of_repetitions = n_rep

        if pseudo_entropy > 2 or num_of_peaks > 2:
            experimentData.t /= experimentData.time_const**(1)

        if pseudo_entropy > 3 or num_of_peaks > 3:
            experimentData.t /= experimentData.time_const**(1)

        if flag and current_sigma * experimentData.gained_degree <= 5 * experimentData.delta_F or num_of_peaks > 1:
            #plt.plot([experimentData.F_min + i * experimentData.delta_F for i in range(experimentData.fields_number)],
            #         [each for each in experimentData.probability_distribution], label='k={}'.format(step+1))
            plt.legend(loc="best")
            plt.show()
            plt.close()

            fig, ax = plt.subplots()
            ax.minorticks_on()
            ax.set_title(r'b)')
            ax.grid(which='major', axis='both')
            ax.grid(which='minor', axis='both', linestyle=':')

            # Подписи:
            ax.set_xlabel("Field segment, $nT$", **font)
            ax.set_ylabel(r'$P(F_k)$', **font)

            expand_2(x_peak, y_peak, current_sigma, experimentData)
            plt.plot([
                experimentData.F_min + i * experimentData.delta_F
                for i in range(experimentData.fields_number)
            ], [each for each in experimentData.probability_distribution],
                     label='k={}'.format(step + 1))
            #print(experimentData.probability_distribution)

        if experimentData.t >= experimentData.T_2:
            break
    '''ax.plot([experimentData.F_min + i*experimentData.delta_F for i in range(experimentData.fields_number)], experimentData.probability_distribution, label='final') # final distr
    plt.legend(loc="best")

    plt.show()
    #fig.savefig('distr_' + '.png', dpi=500)
    plt.close()'''
    print("t_sum: ",
          list(sigma.keys())[-1], ', sigma:',
          list(sigma.values())[-1])
    print("t_coh_max: ",
          list(a_from_t_sum.keys())[-1] * 10**6, ", sensitivity: ",
          list(a_from_t_sum.values())[-1] * experimentData.F_degree)
    '''try:
        plotter.plotting_sensitivity(a_from_step, r'$N$')
    except Exception:
        pass
    try:
        plotter.plotting_sensitivity(a_from_t_sum, r'$t_{sum}$')
    except Exception:
        pass'''
    try:
        plotter.plotting_sensitivity(a_from_t_sum,
                                     r'$t_{coherense\_max}, \, \mu s$')
    except Exception:
        pass

    #print("final sensitivity: ", a_from_t_sum[t_sum]*10**(-9))

    x_peak, y_peak = find_peak(experimentData)
    succeeded = 0
    if abs(x_peak - experimentData.F) <= epsilon:
        succeeded = 1

    plotter.plotting(sigma)
    return succeeded