def renew_probalities(data): ''' :param new_qubit_state: 0 or 1 :param distr: current distribution of all fields :return: new distribution of all fields ''' #new_qubit_state = int(round(sum([qubit.randbin(data, data.F) for i in range(data.num_of_repetitions)])/data.num_of_repetitions)) new_qubit_state = [qubit.randbin(data, data.F) for i in range(data.num_of_repetitions)] distr_storage.old_distr = data.probability_distribution.copy() # saving current meanings to reaccount all
def renew_probalities(data): ''' :param new_qubit_state: 0 or 1 :param distr: current distribution of all fields :return: new distribution of all fields ''' #new_qubit_state = int(round(sum([qubit.randbin(data, data.F) for i in range(data.num_of_repetitions)])/data.num_of_repetitions)) new_qubit_state = [ qubit.randbin(data, data.F) for i in range(data.num_of_repetitions) ] distr_storage.old_distr = data.probability_distribution.copy( ) # saving current meanings to reaccount all P(F_i) at once for i in range(data.fields_number - 1, -1, -1): data.probability_distribution[i] = reaccount_P_F_i( i, new_qubit_state, data.F_min + data.delta_F * i, data) data.probability_distribution = normalise(data)
def perform(): experimentData = ExperimentData() F = experimentData.F sigma = {} a_from_t_sum = {} #sensitivity a_from_step = {} #sensitivity N = 90 t_sum = 0 epsilon = 10**(-3) prev_sigma = experimentData.F_max - experimentData.F_min flag = False prev_step = 0 # print(experimentData.probability_distribution) # initial print(experimentData.fields_number) for step in range(N): bayesians_learning.renew_probalities(qubit.randbin(experimentData, F), experimentData) #bayesians_learning.renew_probalities(qubit.randbin3(experimentData, F), experimentData) #bayesians_learning.renew_probalities(qubit.randbin2(experimentData, F), experimentData) #bayesians_learning.renew_probalities(ramsey_qubit.output(experimentData.t), experimentData) t_sum += experimentData.t x_peak, y_peak = find_peak(experimentData) current_sigma = find_sigma_2(x_peak, y_peak, experimentData) a_from_t_sum[t_sum] = current_sigma * (t_sum)**0.5 a_from_step[step] = current_sigma * (t_sum)**0.5 if current_sigma != 0: sigma[t_sum] = current_sigma if step <= 50 and prev_sigma == experimentData.F_max - experimentData.F_min and current_sigma != 0: flag = True if flag and \ step - prev_step >= 2 and \ prev_sigma + experimentData.delta_F > 2 * current_sigma:# and \ #experimentData.const * F * experimentData.F_degree * experimentData.t <= 3.14: prev_sigma = current_sigma prev_step = step experimentData.t *= experimentData.time_const print(step) if flag and prev_sigma < current_sigma: prev_sigma = current_sigma if (step) % 5 == 0: plt.plot([ experimentData.F_min + i * experimentData.delta_F for i in range(experimentData.fields_number) ], experimentData.probability_distribution) # distr each 50 steps if (step + 1) % 1 == 0: print(sum(experimentData.probability_distribution), x_peak, y_peak, step, current_sigma, prev_sigma, t_sum, experimentData.const * F * experimentData.t * experimentData.F_degree, flag) # checking ~ 1 if y_peak >= 1.0 - epsilon or t_sum >= 8 * 10**(-6): break plt.plot([ experimentData.F_min + i * experimentData.delta_F for i in range(experimentData.fields_number) ], experimentData.probability_distribution) # final distr plt.show() #fig.savefig('distr_' + '.png', dpi=500) plt.close() print(list(sigma.keys())[-1], list(sigma.values())[-1]) plotter.plotting_sensitivity(a_from_step, r'$N$') plotter.plotting_sensitivity(a_from_t_sum, r'$t_{sum}$') x_peak, y_peak = find_peak(experimentData) plotter.plotting(sigma)
def perform(): experimentData = ExperimentData() sigma = {} a_from_t_sum = {} #sensitivity a_from_step = {} #sensitivity N = 40 t_sum = 0 epsilon = 10**(-5) for step in range(N): outcome = int( round( sum([ qubit.randbin(experimentData, experimentData.F) for i in range(experimentData.num_of_repetitions) ]) / experimentData.num_of_repetitions)) if bayesians_learning.P_qubit_state_on_F_i(outcome, experimentData.F_min, experimentData)\ > bayesians_learning.P_qubit_state_on_F_i(outcome, experimentData.F_max, experimentData): experimentData.F_max = (experimentData.F_max + experimentData.F_min) / 2 else: experimentData.F_min = (experimentData.F_max + experimentData.F_min) / 2 t_sum += experimentData.t * experimentData.num_of_repetitions current_sigma = (experimentData.F_max - experimentData.F_min) / 2 sigma[t_sum] = current_sigma center = (experimentData.F_max + experimentData.F_min) / 2 a_from_t_sum[experimentData.t] = max(abs(center - experimentData.F), current_sigma) * (t_sum)**0.5 #a_from_step[step] = current_sigma * (t_sum) ** 0.5 experimentData.t *= experimentData.time_const print(step, center, current_sigma, experimentData.t * 10**6, t_sum) #x = np.arange(experimentData.F_min, experimentData.F_max, 0.01) #plt.plot(x, 1 / (current_sigma * np.sqrt(2 * np.pi)) * # np.exp(- (x - center) ** 2 / (2 * current_sigma ** 2)), # linewidth=2, color='r') if current_sigma <= epsilon or experimentData.t >= 200 * 10**(-6): break #plt.show() #plt.close() print(list(sigma.keys())[-1], list(sigma.values())[-1]) #try: # plotter.plotting_sensitivity(a_from_step, r'$N$') #except Exception: # pass try: plotter.plotting_sensitivity(a_from_t_sum, r'$t_{coherense\_max}, \, \mu s$') except Exception: pass plotter.plotting(sigma) return a_from_t_sum