config.read(full_path.with_suffix('.cfg')) # read config from respective file tau = config['DEFAULT'].getfloat('tau', fallback=0.1) # calculate integrated autocorrelation time ydata_mean = ydata_sum / data.shape[1] tint, dtint, w_max = getIntegratedCorrelationTime(ydata_mean, factor=8) # plot and fit plt.errorbar(xdata[:w_max * 2], ydata_mean[:w_max * 2], label=r'Autocorrelation function, $\tau_{int} = %0.2f \pm %0.2f$' %(tint, dtint), fmt='.', color=color_plot) if args.fit: xdata, ydata_mean = xdata[xdata < 50], ydata_mean[xdata < 50] xdata, ydata_mean = xdata[ydata_mean > 0], ydata_mean[ydata_mean > 0] parameters, parameters_error = op.curve_fit(linear, xdata, np.log(ydata_mean), p0=[1, 1]) parameters_error = np.sqrt(np.diag(parameters_error)) xdata_fit = np.linspace(min(xdata), max(xdata), 1000) ydata_fit = linear(xdata_fit, *parameters) plt.plot(xdata_fit, np.exp(ydata_fit), color=color_fit) plt.xlabel('Metropolis iteration') plt.ylabel('$\Gamma(t)$') plt.yscale('log') plt.legend() # filesystem stuff out_filename = getOutputFilename(relative_path, 'autocorrelation_metropolis', args.output) # write to disk plt.savefig(out_filename) print('done')
tint, dtint, w_max = getIntegratedCorrelationTime(ydata_mean, factor=8) plt.errorbar( xdata_times, ydata_mean, label= r'autocorrelation after %d iteration%s, $\tau_{int} = %0.4f \pm %0.4f$' % (iteration, 's' if iteration > 1 else '', tint, dtint), fmt='.', color=color_plot) # plot and fit plt.xlabel('Time t') plt.ylabel('$\Gamma(t)$') plt.yscale('log') plt.xlim(-0.1, 1) plt.legend() # filesystem stuff out_filename = getOutputFilename( relative_path, 'autocorrelation_%s' % ('-'.join([str(i) for i in iterations_used])), args.output) if args.output: out_filename = args.output out_filename.parent.mkdir(parents=True, exist_ok=True) # write to disk plt.savefig(out_filename) print('done')
color_iterator = getColorIterator() color_plot, color_fit = next(color_iterator)['color'] plt.errorbar(distances, transitions / N, yerr=dtransitions / N, fmt='.', color=color_plot) plt.xlabel('Distance') plt.ylabel('tunnelling rate') if args.log: plt.yscale('log') # filesystem stuff out_filename = getOutputFilename(relative_path, 'tunnelling_current', args.output) if args.fit: def exp_decay(x, *p): A, c = p return A * np.exp(-x / c) initvals = [1, 1] filter_ = (distances > args.fit) * (transitions > 5) parameters, parameters_error = op.curve_fit(exp_decay, distances[filter_], transitions[filter_] / N, p0=initvals, sigma=dtransitions[filter_] / N)
# calculate mean energy d = ydata[:-1] - ydata[1:] da = running_mean(d, 30) print(da) if da[0] > 0: start = np.argmax(da < 0) + 10 else: start = np.argmax(da > 0) + 10 print(start) to_use = ydata[start::30] # filesystem stuff out_filename = getOutputFilename(relative_path, 'tunnelling_current_thermalisation', args.output) out_filename_autocorrelation = pathlib.Path('%s_autocorrelation.pdf' % out_filename.with_suffix('')) # calculate tunnelling current tunnelling_current, dtunnelling_current = np.mean(to_use), np.std(to_use) print(tunnelling_current, dtunnelling_current) xdata_cut = xdata[start::30] ydata_cut = autoCorrelationNormalized(ydata[start::30], np.arange(len(xdata_cut))) # create autocorrelation plot plt.figure() plt.errorbar(xdata_cut, ydata_cut)
exit(-1) min_time_position = max_time_position = 0 number_of_transitions = {} while max_time_position < num_time_lattice_positions: max_time_position += time_lattice_positions_to_use # count the transitions of the running mean of the track data number_of_transitions[(min_time_position, max_time_position)] = countTransitions(running_mean(data[iteration_count - 1][min_time_position:max_time_position], 10)) min_time_position = max_time_position min_time_position, max_time_position = max(number_of_transitions, key=number_of_transitions.get) plt.figure() for iteration in iterations_used: iteration = int(iteration) # plot plt.errorbar(data[iteration - 1][min_time_position:max_time_position], numbers[min_time_position:max_time_position], label='path after %d iteration%s' %(iteration, 's' if iteration > 1 else '')) plt.xlabel('Position') plt.ylabel('Number') plt.title('time slice %d:%d' %(min_time_position, max_time_position)) plt.legend() # filesystem stuff out_filename = getOutputFilename(relative_path, 'track_pretty_%s' %('-'.join([str(i) for i in iterations_used])), args.output) # write to disk plt.savefig(out_filename) print('done')
p = Potential(mu, lambda_) e = Energy(k, p) ydata = np.array([e(data[x]) for x in xdata]) # calculate mean energy d = ydata[:-1] - ydata[1:] da = running_mean(d, 10) if da[0] > 0: start = np.argmax(da < 0) + 10 else: start = np.argmax(da > 0) + 10 # filesystem stuff out_filename = getOutputFilename(relative_path, 'thermalisation', args.output) out_filename_autocorrelation = pathlib.Path('%s_autocorrelation.pdf' % out_filename.with_suffix('')) ydata_cut = autoCorrelationNormalized(ydata, np.arange(len(ydata))) # calculate integrated autocorrelation time tint, dtint, w_max = getIntegratedCorrelationTime(ydata_cut, factor=8) step_size = int((tint + dtint) * 2 + 1) xdata_cut = xdata[start::step_size] # calculate mean over blocked data ydata_mean = block(ydata[start:], step_size)
p = Potential(mu, lambda_) block_size = 10 xdata_cut = xdata[50::block_size] kineticE = block( np.array([getTotalKineticEnergy(data[x], k) for x in xdata[50:]]), block_size) potentialE = block( np.array([getTotalPotentialEnergy(data[x], p) for x in xdata[50:]]), block_size) # filesystem stuff out_filename = getOutputFilename(relative_path, 'virial', args.output) start = 100 // block_size potentialE_mean = np.mean(potentialE[start:]) potentialE_error = np.std(potentialE[start:]) kineticE_mean = np.mean(kineticE[start:]) kineticE_error = np.std(kineticE[start:]) # plot plt.figure() plt.fill_between(xdata_cut, potentialE + kineticE, kineticE, alpha=0.75, label=r'potential energy $\bar E = (%0.1f \pm %0.1f)$' % (potentialE_mean, potentialE_error))
datas.append(data) transitions.append(float(row[-2])) dtransitions.append(float(row[-1])) fig, ax = plt.subplots(figsize=(6,6)) cs = ax.imshow(datas, extent=[min(header_min), max(header_max), max(distances), min(distances)], norm=LogNorm()) cbar = fig.colorbar(cs) cbar.ax.minorticks_off() # plot plt.plot([+d / 2 for d in distances], distances, color='black', label='Classical Minima') plt.plot([-d / 2 for d in distances], distances, color='black') x0, x1 = ax.get_xlim() y0, y1 = ax.get_ylim() ax.set_aspect(abs(x1-x0)/abs(y1-y0)) plt.xlabel('position') plt.ylabel('minima distance') plt.legend() # filesystem stuff out_filename = getOutputFilename(relative_path, 'lambda', args.output) # write to disk plt.savefig(out_filename) print('done')
if i == 0: header_min = [float(v) for v in row[1:]] elif i == 1: header_max = [float(v) for v in row[1:]] else: hbar = float(row[0]) data = [int(v) for v in row[1:]] hbars.append(hbar) datas.append(data) # plot fig, ax = plt.subplots(figsize=(6,6)) cs = ax.imshow(datas, extent=[min(header_min), max(header_max), 2.0, 0.0], norm=LogNorm()) cbar = fig.colorbar(cs) cbar.ax.minorticks_off() x0, x1 = ax.get_xlim() y0, y1 = ax.get_ylim() ax.set_aspect(abs(x1-x0)/abs(y1-y0)) plt.xlabel('position') plt.ylabel('$\\hbar$') # filesystem stuff out_filename = getOutputFilename(relative_path, 'classical', args.output) # write to disk plt.savefig(out_filename) print('done')