def plot_psds(psd_plots, frequencies, labels, index): colorList = get_color_map(len(psd_plots)) plt.figure() for currLabel, psd, color in zip(labels, psd_plots, colorList): plt.loglog(frequencies[index], psd[index], "x", color=color) plt.loglog(frequencies[2 * index + 1], psd[2 * index + 1], "x", color=color) #plt.loglog(frequencies, drive, color = color) plt.loglog(frequencies, psd, color=color, label=currLabel) plt.title("Noise level integrated over " + str(integrationTime) + " seconds") plt.xlabel("Frequencies [Hz]") plt.xlim([20, 100]) plt.ylabel("Noise Level [N]") plt.ylim([ 0, np.amax(psd_plots[-1] [np.argmin(np.abs(frequencies - 20)):np.argmin(np.abs(frequencies - 100))]) ]) #plt.title(path[path.rfind('\\'):]) plt.legend() plt.show(block=False)
def plot_profs(fp_arr): #plots average profile from different heights i = 1 colors = bu.get_color_map(len(fp_arr), cmap='plasma') fp_arr_sort = sorted(fp_arr, key=lambda fp: fp.cant_height) for fp_ind, fp in enumerate(fp_arr_sort): color = colors[fp_ind] #plt.errorbar(fp.bins, fp.y, fp.errors, label = str(np.round(fp.cant_height)) + 'um') # if multi_dir: # lab = 'dir' + str(i) # else: lab = str(np.round(fp.cant_height)) + 'um' i += 1 # if multi_dir: # plt.plot(fp.bins, fp.y / np.max(fp.y), 'o', label = lab, color=color) # plt.ylim(10**(-5), 10) # else: plt.plot(fp.profile[0], fp.profile[1], 'o', label=lab, color=color) plt.xlabel("Position [um]") plt.ylabel("margenalized irradiance ~[W/m]") if log_profs: plt.gca().set_yscale('log') else: plt.gca().set_yscale('linear') plt.legend() plt.show()
def plot_profs(fp_arr, title='', show=True): #plots average profile from different heights i = 1 colors = bu.get_color_map(len(fp_arr), cmap='plasma') fp_arr_sort = sorted(fp_arr, key=lambda fp: fp.cant_height) plt.figure() for fp_ind, fp in enumerate(fp_arr_sort): color = colors[fp_ind] #plt.errorbar(fp.bins, fp.y, fp.errors, label = str(np.round(fp.cant_height)) + 'um') lab = str(np.round(fp.cant_height)) + 'um' if multi_dir: plt.plot(fp.bins, fp.y / np.max(fp.y), 'o', label=lab, color=color) plt.ylim(10**(-5), 10) else: plt.plot(fp.bins, fp.y, 'o', label=lab, color=color) plt.xlabel("Knife-edge Position [$\\mu$m]") plt.ylabel("Margenalized Irradiance [~W/m]") if log_profs: plt.gca().set_yscale('log') else: plt.gca().set_yscale('linear') plt.legend(loc='lower right', ncol=2) if title: plt.title(title) plt.tight_layout() heights = [] means = [] for fp_ind, fp in enumerate(fp_arr_sort): heights.append(fp.cant_height) means.append(fp.mean) plt.figure() plt.plot(heights, means, 'o', ms=10) plt.xlabel("Knife-edge Height [$\\mu$m]") plt.ylabel("Profile mean [$\\mu$m]") if title: plt.title(title) plt.tight_layout() if show: plt.show()
def plot_2W_curves(path, file_name): """ plot all curves with 'file_name' in each sub-folder of 'path' """ #plot_2W_curves(path, file_name) file_list = glob.glob(path + "/*/" + file_name) N = len(file_list) colormap = get_color_map(N) plt.figure() for name, color in zip(file_list, colormap): file_label = name[len(path):name.rfind(file_name)] pathname = name[:name.rfind(file_name)] Ea_order, force_2W_order, popt_2W, alpha0, error = get_stuff_for_2W( pathname, file_name) plt.plot(Ea_order, force_2W_order, ".", color=color, label=file_label) plt.plot(Ea_order, F2w((np.array(Ea_order), np.array(alpha0)), *popt_2W), color=color) plt.ylabel("Force (N)") plt.xlabel("AC field amplitude (N/e)") plt.legend() plt.title(path[path.rfind('\\'):]) plt.show()
xpsd_old, freqs = matplotlib.mlab.psd(dat[:, a] - numpy.mean(dat[:, a]), Fs=Fs, NFFT=NFFT) # Ddrive = dat[:, bu.drive]*np.gradient(dat[:,bu.drive]) # DdrivePSD, freqs = matplotlib.mlab.psd(Ddrive-numpy.mean(Ddrive), Fs = Fs, NFFT = NFFT) #for h in [xpsd, ypsd, zpsd]: # h /= numpy.median(dat[:,bu.zi])**2 return [freqs, 0, 0, 0, 0, xpsd_old, Press, Volt, Time] freq = getdata(file_list[0])[0] time0 = getdata(file_list[0])[8] N = 1 cmap = bu.get_color_map(len(file_list) / N) for idx, c in zip(range(len(file_list) / N), cmap): i = file_list[idx] aux_press = str("mbar") aa = getdata(i) aux = str(aa[6]) aux_angle = str("_angle=") aux2 = i[i.rfind('_') + 1:i.rfind('.h5')] v = "" if VEOM_h5: v = ", v=" + str("%.1f" % aa[7]) tot_psd = 0 for j in range(N): cpsd = getdata(file_list[idx * N + j])[5] tot_psd += cpsd time = getdata(file_list[idx * N + j])[8] - time0
Ar_lab_str = 'Ar: $p_{\\mathrm{max}}$' \ + '$ = {0} \\times 10^{{{1}}}~$ mbar'.format('{:0.2f}'.format(Ar_sv[0]), \ '{:d}'.format(Ar_sv[1])) Ar_lab_str = 'Ar: $p_{\\mathrm{max}}$' + '$={:0.3f}$ mbar'.format(Ar_pmax) SF6_lab_str = 'SF$_6$: $p_{\\mathrm{max}}$' \ + '$ = {0} \\times 10^{{{1}}}~$ mbar'.format('{:0.2f}'.format(SF6_sv[0]), \ '{:d}'.format(SF6_sv[1])) SF6_lab_str = 'SF$_6$: $p_{\\mathrm{max}}$' + '$={:0.3f}$ mbar'.format(SF6_pmax) stuff = [[He_pmax, He_dat, He_lab_str, 'C0'],\ [Ar_pmax, Ar_dat, Ar_lab_str, 'C1'],\ [SF6_pmax, SF6_dat, SF6_lab_str, 'C2'] ] maxp = 0 colors = bu.get_color_map(7, cmap='inferno')[::-1] for i in [0,1,2]: pmax = stuff[i][0] dat = stuff[i][1] lab_str = stuff[i][2] #color = stuff[i][3] color = colors[2*i+1] if np.max(dat[0]) > maxp: maxp = np.max(dat[0]) fitp = np.linspace(0, np.max(dat[0]), 100) fit = np.array(phi_ffun(fitp, pmax, 0)) ax.scatter(dat[0], dat[1] / np.pi, edgecolors=color, facecolors='none', alpha=0.5) ax.plot(fitp, fit / np.pi, '-', color=color, lw=3, label=lab_str)
##################################################################### ##################################################################### #################################################################### lambind = np.argmin(np.abs(gfuncs_class.lambdas - yuklambda)) sep_fig, sep_axarr = plt.subplots(3, 1, sharex=True, sharey=True, \ figsize=(7,7)) height_fig, height_axarr = plt.subplots(3, 1, sharex=True, sharey=True, \ figsize=(8,8)) modamp_fig, modamp_ax = plt.subplots(1, 1) minsep = np.min(seps) colors = bu.get_color_map(len(seps), cmap='plasma') for sepind, sep in enumerate(seps): ones = np.ones_like(posvec) pts = np.stack((sep * ones + rbead, posvec, 5.0 * ones), axis=-1) for resp in [0, 1, 2]: yukforce = gfuncs_class.yukfuncs[resp][lambind](pts * 1.0e-6) sep_axarr[resp].plot(posvec, fac*yukforce, color=colors[sepind], \ label='$\\Delta x = {:0.1f}~\\mu$m'.format(sep)) ax_dict = {0: 'X', 1: 'Y', 2: 'Z'} for resp in [0, 1, 2]: sep_axarr[resp].set_ylabel('{:s} Force [N]'.format(ax_dict[resp]))
#print sep #print maxval def fitfunc(x, a, b, c, d): return ffn_wlin(x, a, b, c, d, sep=sep, maxval=maxval) if subtract_background: bobj = bdir_objs[objind] keys = list(obj.avg_diag_force_v_pos.keys()) cal_facs = obj.conv_facs #cal_facs = [1.,1.,1.] keycolors = bu.get_color_map(len(keys)) keys.sort(key=lambda x: float(x)) for keyind, key in enumerate(keys): color = keycolors[keyind] # Force objects are indexed as follows: # data[response axis][velocity mult.][bins, data, or errs] # response axis : X=0, Y=1, Z=2 # velocity mult. : both=0, forward=1, backward=-1 # b, d, or e : bins=0, data=1, errors=2 diagdat = obj.avg_diag_force_v_pos[key] dat = obj.avg_force_v_pos[key] if subtract_background: diagbdat = bobj.avg_diag_force_v_pos[key] bdat = bobj.avg_force_v_pos[key]
mbead = bu.get_mbead(date) rbead = bu.get_rbead(mbead) Ibead = bu.get_Ibead(mbead) print('Optical torque estimate: ', Ibead['val'] * 20.0e3 / 1500.0) kappa = {} kappa['val'] = 6.47e11 kappa['sterr'] = 0.06e11 kappa['syserr'] = 0.25e11 kappa_calc = bu.get_kappa(mbead) #colors = bu.get_color_map(len(newpaths)*2 + 1, cmap='plasma') colors = bu.get_color_map(len(newpaths), cmap='plasma') two_point_times = [] two_point_estimates = [] two_point_errors = [] two_point_estimates_2 = [] two_point_errors_2 = [] linear_estimates = [] linear_errors = [] all_fits = [] for fileind, file in enumerate(newpaths): try: #if fileind < 3:
plot_base = '/home/cblakemore/plots/20200727/spinning/' ringdown_fig_path = os.path.join( plot_base, '20200727-20200924_libration_impulse_damping_time_with_feedback.svg') spectra_fig_path = os.path.join( plot_base, '20200727_libration_spectra_with_feedback_fewer.svg') save = True # ringdown_data_path = os.path.join(processed_base, 'dds_libration_ringdowns.p') # spectra_save_path = os.path.join(processed_base, 'dds_feedback_spectra.p') # ringdown_dict = pickle.load( open(ringdown_data_path, 'rb') ) # spectra_dict = pickle.load( open(spectra_save_path, 'rb') ) ringdown_colors = bu.get_color_map(len(ringdown_paths), cmap='plasma') fig, ax = plt.subplots(1, 1, figsize=(6, 4)) for ringdown_ind, ringdown_path in enumerate(ringdown_paths): color = ringdown_colors[ringdown_ind] max_phi_dg = max_phi_dgs[ringdown_ind] ringdown_dict = pickle.load(open(ringdown_path, 'rb')) phi_dgs = list(ringdown_dict.keys()) phi_dgs.sort() phi_dgs = np.array(phi_dgs) inds = (phi_dgs > 0.0) * (phi_dgs <= max_phi_dg) phi_dgs = phi_dgs[inds]
def fit_monochromatic_line(files, data_axes=[0,1], drive_axes=[6], diag=True, \ minfreq=2000, maxfreq=8000, pickfirst=True, \ colormap='jet', sort='time', file_inds=(0,10000), \ dirlengths=[]): '''Loops over a list of file names, loads each file, diagonalizes, then plots the amplitude spectral density of any number of data or cantilever/electrode drive signals INPUTS: files, list of files names to extract data data_axes, list of pos_data axes to plot diag, boolean specifying whether to diagonalize colormap, matplotlib colormap string for sort sort, sorting key word file_inds, indices for min and max file OUTPUTS: none, plots stuff ''' files = [(os.stat(path), path) for path in files] files = [(stat.st_ctime, path) for stat, path in files] files.sort(key = lambda x: (x[0])) files = [obj[1] for obj in files] files = files[file_inds[0]:file_inds[1]] if step10: files = files[::10] if invert_order: files = files[::-1] times = [] peak_pos = [] drive_pos = [] errs = [] drive_errs = [] colors = bu.get_color_map(len(files), cmap=colormap) bad_inds = [] oldtime = 0 old_per = 0 print(files[-1]) print("Processing %i files..." % len(files)) print("Percent complete: ") for fil_ind, fil in enumerate(files): # Display percent completion per = int(100. * float(fil_ind) / float(len(files)) ) if per > old_per: print(old_per, end=' ') sys.stdout.flush() old_per = per if fil in computed_freq_dict and not recompute: soln = computed_freq_dict[fil] times.append(soln[0]) peak_pos.append(soln[1]) errs.append(soln[2]) drive_pos.append(soln[3]) drive_errs.append(soln[4]) old = soln[1] old_drive = soln[3] continue else: newsoln = [0, 0, 0, 0, 0] # Load data df = bu.DataFile() try: df.load(fil) except: continue if len(drive_axes) > 0: df.load_other_data() ctime = time.mktime(df.time.timetuple()) times.append(ctime) newsoln[0] = ctime cpos = [] errvals = [] for axind, ax in enumerate(data_axes): #fac = df.conv_facs[ax] if fullNFFT: NFFT = len(df.pos_data[ax]) else: NFFT = userNFFT psd, freqs = mlab.psd(df.pos_data[ax], Fs=df.fsamp, NFFT=NFFT) fitbool = (freqs > minfreq) * (freqs < maxfreq) maxval = np.max(psd[fitbool]) delta = delta_per*maxval peaks = pdet.peakdetect(psd[fitbool], lookahead=lookahead, delta=delta) pos_peaks = peaks[0] neg_peaks = peaks[1] if plot_peaks: for peakind, pos_peak in enumerate(pos_peaks): try: neg_peak = neg_peaks[peakind] except: continue plt.loglog(freqs[fitbool][pos_peak[0]], pos_peak[1], 'x', color='r') plt.loglog(freqs[fitbool][neg_peak[0]], neg_peak[1], 'x', color='b') plt.loglog(freqs[fitbool], psd[fitbool]) plt.show() np_pos_peaks = np.array(pos_peaks) try: if fil_ind == 0: ucutoff = 100000 lcutoff = 0 else: ucutoff = (1.0 + percent_band) * old lcutoff = (1.0 - percent_band) * old vals = [] for peakind, peak in enumerate(pos_peaks): newval = freqs[fitbool][peak[0]] if newval > ucutoff: continue if newval < lcutoff: continue vals.append(newval) cpos.append(np.mean(vals)) for val in vals: errvals.append(val) except: print('FAILED') continue drive_cpos = [] drive_errvals = [] for axind, ax in enumerate(drive_axes): ax = ax - 3 if fullNFFT: NFFT = len(df.other_data[ax]) else: NFFT = userNFFT psd, freqs = mlab.psd(df.other_data[ax], Fs=df.fsamp, NFFT=NFFT) fitbool = (freqs > minfreq) * (freqs < maxfreq) maxval = np.max(psd[fitbool]) delta = delta_per*maxval peaks = pdet.peakdetect(psd[fitbool], lookahead=lookahead, delta=delta) pos_peaks = peaks[0] neg_peaks = peaks[1] np_pos_peaks = np.array(pos_peaks) if plot_drive_peaks: for peakind, pos_peak in enumerate(pos_peaks): try: neg_peak = neg_peaks[peakind] except: continue plt.loglog(freqs[fitbool][pos_peak[0]], pos_peak[1], 'x', color='r') plt.loglog(freqs[fitbool][neg_peak[0]], neg_peak[1], 'x', color='b') plt.loglog(freqs[fitbool], psd[fitbool]) plt.show() try: maxind = np.argmax(np_pos_peaks[:,1]) maxpeak = pos_peaks[maxind] vals = [] if maxpeak[1] < np.mean(psd[fitbool]) * (1.0 / drive_thresh): vals.append(np.nan) else: vals.append(freqs[fitbool][maxpeak[0]]) #for peakind, peak in enumerate(pos_peaks): # if peak[0] < 1e-2: # continue # newval = freqs[fitbool][peak[0]] # if newval > ucutoff: # continue # if newval < lcutoff: # continue # # vals.append(newval) drive_cpos.append(np.mean(vals)) for val in vals: drive_errvals.append(val) except: print('FAILED DRIVE ANALYSIS') continue freqval = np.mean(cpos) errval = np.std(errvals) drive_freqval = np.mean(drive_cpos) drive_errval = np.std(drive_errvals) if len(cpos) < 0: bad_inds.append(fil_ind) else: peak_pos.append(freqval) drive_pos.append(drive_freqval) errs.append(errval) drive_errs.append(drive_errval) old = np.mean(cpos) old_drive = np.mean(drive_cpos) newsoln[1] = freqval newsoln[2] = errval newsoln[3] = drive_freqval newsoln[4] = drive_errval oldtime = ctime computed_freq_dict[fil] = newsoln times2 = np.delete(times, bad_inds) times2 = times2 - np.min(times) peak_pos = np.array(peak_pos) drive_pos = np.array(drive_pos) sortinds = np.argsort(times2) times2 = times2[sortinds] peak_pos = peak_pos[sortinds] drive_pos = drive_pos[sortinds] times2 = np.array(times2) peak_pos = np.array(peak_pos) drive_pos = np.array(drive_pos) bad_inds = np.array(bad_inds) max_hours = np.max( times2*(1.0/3600) ) plot_ind = np.argmin(np.abs(times2*(1.0/3600) - (max_hours - plot_lastn_hours) ) ) if not plot_together: fig, ax = plt.subplots(2,1,figsize=(10,10), sharex=True, sharey=True) elif plot_together: fig, ax = plt.subplots(1,1,figsize=(10,5), sharex=True, sharey=True) ax = [ax] ax[0].errorbar(times2[plot_ind:]*(1.0/3600), peak_pos[plot_ind:], yerr=errs[plot_ind:], fmt='o', \ color='C0', label='Bead Rotation') if plot_together: ax[0].errorbar(times2[plot_ind:]*(1.0/3600), drive_pos[plot_ind:], \ yerr=drive_errs[plot_ind:], fmt='o', alpha=0.15, \ color='C1', label='Drive') elif not plot_together: ax[1].errorbar(times2[plot_ind:]*(1.0/3600), drive_pos[plot_ind:], \ yerr=drive_errs[plot_ind:], fmt='o', color='C1', label='Drive') if logtime: ax[0].set_xscale("log") if not plot_together: ax[1].set_xscale("log") if not plot_together: ax[1].set_xlabel('Elapsed Time [hrs]', fontsize=14) elif plot_together: ax[0].set_xlabel('Elapsed Time [hrs]', fontsize=14) ax[0].set_ylabel('Rotation Frequency [Hz]', fontsize=14) if not plot_together: ax[1].set_ylabel('Rotation Frequency [Hz]', fontsize=14) plt.setp(ax[0].get_xticklabels(), fontsize=14, visible = True) plt.setp(ax[0].get_yticklabels(), fontsize=14, visible = True) if not plot_together: plt.setp(ax[1].get_xticklabels(), fontsize=14, visible = True) plt.setp(ax[1].get_yticklabels(), fontsize=14, visible = True) ax[0].yaxis.grid(which='major', color='k', linestyle='--', linewidth=0.5) ax[0].xaxis.grid(which='major', color='k', linestyle='--', linewidth=0.5) if not plot_together: ax[1].yaxis.grid(which='major', color='k', linestyle='--', linewidth=0.5) ax[1].xaxis.grid(which='major', color='k', linestyle='--', linewidth=0.5) label_keys = list(dirmarkers.keys()) plot_first = max_hours <= plot_lastn_hours if field_on_at_beginning and plot_first: ax[0].axvline(x=times2[0], lw=2, label='Field On', color='r', ls='-') if len(dirlengths) != 0: oldlength = 0 for dirind, length in enumerate(dirlengths): oldlength += length tlength = oldlength - np.sum(bad_inds < oldlength) if tlength < plot_ind: continue if dirind+2 in label_keys: ax[0].axvline(x=times2[tlength]*(1.0/3600), lw=2, label=dirmarkers[dirind+2][0], \ color=dirmarkers[dirind+2][1], ls=dirmarkers[dirind+2][2]) ax[0].legend() plt.tight_layout() pickle.dump(computed_freq_dict, open(computed_freq_path, 'wb')) plt.show()
def get_alpha_vs_file(fildat, diag=True, ignoreX=False, ignoreY=False, ignoreZ=False, \ plot=True, save=False, savepath='', confidence_level=0.95, \ only_closest=False, ax1='x', ax2='z', lamb_range=(1e-9, 1e-2)): '''Loops over a list of file names, loads each file, diagonalizes, then performs an optimal filter using the cantilever drive and a theoretical force vs position to generate the filter/template. The result of the optimal filtering is stored, and the data released from memory INPUTS: fildat OUTPUTS: ''' # For the confidence interval, compute the inverse CDF of a # chi^2 distribution at given confidence level and compare to # liklihood ratio via a goodness of fit parameter. # Refer to scipy.stats documentation to understand chi2 chi2dist = stats.chi2(1) # factor of 0.5 from Wilks's theorem: -2 log (Liklihood) ~ chi^2(1) con_val = 0.5 * chi2dist.ppf(confidence_level) colors = bu.get_color_map(len(lambdas)) alphas = np.zeros_like(lambdas) diagalphas = np.zeros_like(lambdas) testalphas = np.linspace(-10**10, 10**10, 11) biasvec = list(fildat.keys()) biasvec.sort() ax1posvec = list(fildat[biasvec[0]].keys()) ax1posvec.sort() ax2posvec = list(fildat[biasvec[0]][ax1posvec[0]].keys()) ax2posvec.sort() if only_closest: if ax1 == 'x' and ax2 == 'z': seps = minsep + (maxthrow - np.array(ax1posvec)) heights = np.array(ax2posvec) - beadheight sind = np.argmin(seps) hind = np.argmin(np.abs(heights - beadheight)) ax1posvec = [ax1posvec[sind]] ax2posvec = [ax2posvec[hind]] elif ax1 =='z' and ax2 == 'x': seps = minsep + (maxthrow - np.array(ax2posvec)) heights = np.array(ax1pos) - beadheight sind = np.argmin(seps) hind = np.argmin(np.abs(heights - beadheight)) ax1posvec = [ax1posvec[hind]] ax2posvec = [ax2posvec[sind]] newlamb = lambdas[(lambdas > lamb_range[0]) * (lambdas < lamb_range[-1])] tot_iterations = len(biasvec) * len(ax1posvec) * len(ax2posvec) * len(newlamb) * len(testalphas) i = -1 for lambind, yuklambda in enumerate(lambdas): if lambind != 48: continue if (yuklambda < lamb_range[0]) or (yuklambda > lamb_range[1]): continue test = fildat[biasvec[0]][ax1posvec[0]][ax2posvec[0]][0][lambind] test_yukdat = test[-1] test_dat = test[1] newalpha = 1e-4 * np.sqrt(np.mean(np.abs(test_dat) / np.abs(test_yukdat))) testalphas = np.linspace(-1.0*newalpha, newalpha, 11) for bias, ax1pos, ax2pos in itertools.product(biasvec, ax1posvec, ax2posvec): i += 1 bu.progress_bar(i, tot_iterations) minalphas = [0] * len(fildat[bias][ax1pos][ax2pos]) diag_minalphas = [0] * len(fildat[bias][ax1pos][ax2pos]) for fil_ind in range(len(fildat[bias][ax1pos][ax2pos])): dat = fildat[bias][ax1pos][ax2pos][fil_ind][lambind] assert dat[0] == yuklambda _, datfft, diagdatfft, daterr, diagdaterr, gfft, yukfft = dat chi_sqs = np.zeros(len(testalphas)) diagchi_sqs = np.zeros(len(testalphas)) for alphaind, testalpha in enumerate(testalphas): chi_sq = 0 diagchi_sq = 0 N = 0 for resp in [0,1,2]: if (ignoreX and resp == 0) or \ (ignoreY and resp == 1) or \ (ignoreZ and resp == 2): continue re_diff = datfft[resp].real - \ (gfft[resp].real + testalpha * yukfft[resp].real ) im_diff = datfft[resp].imag - \ (gfft[resp].imag + testalpha * yukfft[resp].imag ) if diag: diag_re_diff = diagdatfft[resp].real - \ (gfft[resp].real + testalpha * yukfft[resp].real ) diag_im_diff = diagdatfft[resp].imag - \ (gfft[resp].imag + testalpha * yukfft[resp].imag ) #plt.plot(np.abs(re_diff)) #plt.plot(daterr[resp]) #plt.show() chi_sq += ( np.sum( np.abs(re_diff)**2 / (0.5*(daterr[resp]**2)) ) + \ np.sum( np.abs(im_diff)**2 / (0.5*(daterr[resp]**2)) ) ) if diag: diagchi_sq += ( np.sum( np.abs(diag_re_diff)**2 / \ (0.5*(diagdaterr[resp]**2)) ) + \ np.sum( np.abs(diag_im_diff)**2 / \ (0.5*(diagdaterr[resp]**2)) ) ) N += len(re_diff) + len(im_diff) chi_sqs[alphaind] = chi_sq / (N - 1) if diag: diagchi_sqs[alphaind] = diagchi_sq / (N - 1) max_chi = np.max(chi_sqs) if diag: max_diagchi = np.max(diagchi_sqs) max_alpha = np.max(testalphas) p0 = [max_chi/max_alpha**2, 0, 1] if diag: diag_p0 = [max_diagchi/max_alpha**2, 0, 1] try: popt, pcov = opti.curve_fit(parabola, testalphas, chi_sqs, \ p0=p0, maxfev=100000) if diag: diagpopt, diagpcov = opti.curve_fit(parabola, testalphas, diagchi_sqs, \ p0=diag_p0, maxfev=1000000) except: print("Couldn't fit") popt = [0,0,0] popt[2] = np.mean(chi_sqs) regular_con_val = con_val + np.min(chi_sqs) if diag: diag_con_val = con_val + np.min(diagchi_sqs) # Select the positive root for the non-diagonalized data soln1 = ( -1.0 * popt[1] + np.sqrt( popt[1]**2 - \ 4 * popt[0] * (popt[2] - regular_con_val)) ) / (2 * popt[0]) soln2 = ( -1.0 * popt[1] - np.sqrt( popt[1]**2 - \ 4 * popt[0] * (popt[2] - regular_con_val)) ) / (2 * popt[0]) if diag: diagsoln1 = ( -1.0 * diagpopt[1] + np.sqrt( diagpopt[1]**2 - \ 4 * diagpopt[0] * (diagpopt[2] - diag_con_val)) ) / (2 * diagpopt[0]) diagsoln2 = ( -1.0 * diagpopt[1] - np.sqrt( diagpopt[1]**2 - \ 4 * diagpopt[0] * (diagpopt[2] - diag_con_val)) ) / (2 * diagpopt[0]) if soln1 > soln2: alpha_con = soln1 else: alpha_con = soln2 if diag: if diagsoln1 > diagsoln2: diagalpha_con = diagsoln1 else: diagalpha_con = diagsoln2 minalphas[fil_ind] = alpha_con if diag: diag_minalphas[fil_ind] = diagalpha_con if plot: minfig, minaxarr = plt.subplots(1,2,figsize=(10,5),dpi=150) minaxarr[0].plot(minalphas) minaxarr[0].set_title('Min $\\alpha$ vs. Time', fontsize=18) minaxarr[0].set_xlabel('File Num', fontsize=16) minaxarr[0].set_ylabel('$\\alpha$ [arb]', fontsize=16) minaxarr[1].hist(minalphas, bins=20) minaxarr[1].set_xlabel('$\\alpha$ [arb]', fontsize=16) plt.tight_layout() plt.show() return minalphas
def plot_vs_time(files, data_axes=[0,1,2], cant_axes=[], elec_axes=[], \ diag=True, colormap='jet', sort='time', file_inds=(0,10000)): '''Loops over a list of file names, loads each file, diagonalizes, then plots the amplitude spectral density of any number of data or cantilever/electrode drive signals INPUTS: files, list of files names to extract data data_axes, list of pos_data axes to plot cant_axes, list of cant_data axes to plot elec_axes, list of electrode_data axes to plot diag, boolean specifying whether to diagonalize OUTPUTS: none, plots stuff ''' if diag: dfig, daxarr = plt.subplots(len(data_axes),2,sharex=True,sharey=True, \ figsize=(8,8)) else: dfig, daxarr = plt.subplots(len(data_axes),1,sharex=True,sharey=True, \ figsize=(8,8)) if len(cant_axes): cfig, caxarr = plt.subplots(len(data_axes), 1, sharex=True, sharey=True) if len(elec_axes): efig, eaxarr = plt.subplots(len(data_axes), 1, sharex=True, sharey=True) files = [(os.stat(path), path) for path in files] files = [(stat.st_ctime, path) for stat, path in files] files.sort(key=lambda x: (x[0])) files = [obj[1] for obj in files] files = files[file_inds[0]:file_inds[1]] if step10: files = files[::10] if invert_order: files = files[::-1] colors = bu.get_color_map(len(files), cmap=colormap) old_per = 0 print("Processing %i files..." % len(files)) print("Percent complete: ") for fil_ind, fil in enumerate(files): color = colors[fil_ind] # Display percent completion per = int(100. * float(fil_ind) / float(len(files))) if per > old_per: print(old_per, end=' ') sys.stdout.flush() old_per = per # Load data df = bu.DataFile() df.load(fil) df.calibrate_stage_position() df.calibrate_phase() #df.high_pass_filter(fc=1) #df.detrend_poly() plt.figure() #plt.plot((df.daqmx_time-df.daqmx_time[0])*1e-9, \ # (df.pos_data[2]-np.mean(df.pos_data[2])) * (2.0**3 / 100.0)) plt.plot((df.daqmx_time-df.daqmx_time[0])*1e-9, \ (df.phase[4] - df.phase[4][0])) plt.show() df.diagonalize(maxfreq=lpf, interpolate=False) loaded_other = False for ax in data_axes: if ax > 2 and not loaded_other: df.load_other_data() for axind, ax in enumerate(data_axes): if ax <= 2: data = df.pos_data[ax] fac = df.conv_facs[ax] if ax > 2: data = df.other_data[ax - 3] fac = 1.0 t = np.arange(len(data)) * (1.0 / df.fsamp) if diag: daxarr[axind, 0].plot(t, data * fac, color=color) daxarr[axind, 0].grid(alpha=0.5) daxarr[axind, 1].plot(t, data, color=color) daxarr[axind, 1].grid(alpha=0.5) daxarr[axind, 0].set_ylabel('[N]', fontsize=10) if ax == data_axes[-1]: daxarr[axind, 0].set_xlabel('t [s]', fontsize=10) daxarr[axind, 1].set_xlabel('t [s]', fontsize=10) else: daxarr[axind].plot(t, data * fac, color=color) daxarr[axind].grid(alpha=0.5) daxarr[axind].set_ylabel('[N]', fontsize=10) if ax == data_axes[-1]: daxarr[axind].set_xlabel('t [s]', fontsize=10) if len(cant_axes): for axind, ax in enumerate(cant_axes): t = np.arange(len(df.cant_data[ax])) * (1.0 / df.fsamp) caxarr[axind].plot(t, df.cant_data[ax], color=color) if len(elec_axes): for axind, ax in enumerate(elec_axes): t = np.arange(len(df.electrode_data[ax])) * (1.0 / df.fsamp) eaxarr[axind].plot(t, df.electrode_data[ax], color=color) #daxarr[0].set_xlim(0.5, 25000) #daxarr[0].set_ylim(1e-21, 1e-14) plt.tight_layout() plt.show()
force_dic = select_allbias_onepos(force_dic, ax1_toplot, ax2_toplot) diag_force_dic = select_allbias_onepos(diag_force_dic, ax1_toplot, ax2_toplot) if plot: cantVvec = list(force_dic.keys()) cantVvec.sort() fig, axarr = plt.subplots(3,2,sharex=True,sharey=True,figsize=(6,8),dpi=150) colors = bu.get_color_map(len(cantVvec)) for cantind, cantV in enumerate(cantVvec): color = colors[cantind] lab = '%0.2f V' % cantV for resp in [0,1,2]: bins = force_dic[cantV][resp][0] dat = force_dic[cantV][resp][1] errs = force_dic[cantV][resp][2] diag_bins = diag_force_dic[cantV][resp][0] diag_dat = diag_force_dic[cantV][resp][1] diag_errs = diag_force_dic[cantV][resp][2] offset = 0
def get_alpha_lambda(fildat, diag=True, ignoreX=False, ignoreY=False, ignoreZ=False, \ plot=True, save=False, savepath='', confidence_level=0.95, \ only_closest=False, ax1='x', ax2='z', lamb_range=(1e-9, 1e-2)): '''Loops over a list of file names, loads each file, diagonalizes, then performs an optimal filter using the cantilever drive and a theoretical force vs position to generate the filter/template. The result of the optimal filtering is stored, and the data released from memory INPUTS: fildat OUTPUTS: ''' # For the confidence interval, compute the inverse CDF of a # chi^2 distribution at given confidence level and compare to # liklihood ratio via a goodness of fit parameter. # Refer to scipy.stats documentation to understand chi2 chi2dist = stats.chi2(1) # factor of 0.5 from Wilks's theorem: -2 log (Liklihood) ~ chi^2(1) con_val = 0.5 * chi2dist.ppf(confidence_level) colors = bu.get_color_map(len(lambdas)) alphas = np.zeros_like(lambdas) diagalphas = np.zeros_like(lambdas) testalphas = np.linspace(-10**10, 10**10, 11) minalphas = [[]] * len(lambdas) biasvec = list(fildat.keys()) biasvec.sort() ax1posvec = list(fildat[biasvec[0]].keys()) ax1posvec.sort() ax2posvec = list(fildat[biasvec[0]][ax1posvec[0]].keys()) ax2posvec.sort() if only_closest: if ax1 == 'x' and ax2 == 'z': seps = minsep + (maxthrow - np.array(ax1posvec)) heights = np.array(ax2posvec) - beadheight sind = np.argmin(seps) hind = np.argmin(np.abs(heights - beadheight)) ax1posvec = [ax1posvec[sind]] ax2posvec = [ax2posvec[hind]] elif ax1 =='z' and ax2 == 'x': seps = minsep + (maxthrow - np.array(ax2posvec)) heights = np.array(ax1pos) - beadheight sind = np.argmin(seps) hind = np.argmin(np.abs(heights - beadheight)) ax1posvec = [ax1posvec[hind]] ax2posvec = [ax2posvec[sind]] newlamb = lambdas[(lambdas > lamb_range[0]) * (lambdas < lamb_range[-1])] tot_iterations = len(biasvec) * len(ax1posvec) * len(ax2posvec) * \ len(newlamb) * len(testalphas) + 1 i = -1 # To test chi2 fit against "fake" data, uncomment these lines rands = np.random.randn(*fildat[biasvec[0]][ax1posvec[0]][ax2posvec[0]][0][0][1].shape) rands2 = np.random.randn(*fildat[biasvec[0]][ax1posvec[0]][ax2posvec[0]][0][0][1].shape) for lambind, yuklambda in enumerate(lambdas): #if lambind != 48: # continue if (yuklambda < lamb_range[0]) or (yuklambda > lamb_range[1]): continue test = fildat[biasvec[0]][ax1posvec[0]][ax2posvec[0]][0][lambind] test_yukdat = test[-1] test_dat = test[1] newalpha = 1e-4 * np.sqrt(np.mean(np.abs(test_dat) / np.abs(test_yukdat))) testalphas = np.linspace(-1.0*newalpha, newalpha, 21) chi_sqs = np.zeros(len(testalphas)) diagchi_sqs = np.zeros(len(testalphas)) for alphaind, testalpha in enumerate(testalphas): N = 0 chi_sq = 0 diagchi_sq = 0 for bias, ax1pos, ax2pos in itertools.product(biasvec, ax1posvec, ax2posvec): i += 1 bu.progress_bar(i, tot_iterations, suffix=' Fitting the Data for Chi^2') for fil_ind in range(len(fildat[bias][ax1pos][ax2pos])): dat = fildat[bias][ax1pos][ax2pos][fil_ind][lambind] assert dat[0] == yuklambda _, datfft, diagdatfft, daterr, diagdaterr, gfft, yukfft = dat # To test chi2 fit against "fake" data, uncomment these lines #datfft = yukfft * -0.5e9 #datfft += (1.0 / np.sqrt(2)) * daterr * rands + \ # (1.0 / np.sqrt(2)) * daterr * rands2 * 1.0j #gfft = np.zeros_like(datfft) for resp in [0,1,2]: if (ignoreX and resp == 0) or \ (ignoreY and resp == 1) or \ (ignoreZ and resp == 2): print(ignoreX, ignoreY, ignoreZ, resp) continue re_diff = datfft[resp].real - \ (gfft[resp].real + testalpha * yukfft[resp].real ) im_diff = datfft[resp].imag - \ (gfft[resp].imag + testalpha * yukfft[resp].imag ) if diag: diag_re_diff = diagdatfft[resp].real - \ (gfft[resp].real + testalpha * yukfft[resp].real ) diag_im_diff = diagdatfft[resp].imag - \ (gfft[resp].imag + testalpha * yukfft[resp].imag ) #plt.plot(np.abs(re_diff)) #plt.plot(daterr[resp]) #plt.show() chi_sq += ( np.sum( np.abs(re_diff)**2 / (0.5*daterr[resp]**2) ) + \ np.sum( np.abs(im_diff)**2 / (0.5*daterr[resp]**2) ) ) if diag: diagchi_sq += ( np.sum( np.abs(diag_re_diff)**2 / \ (0.5*diagdaterr[resp]**2) ) + \ np.sum( np.abs(diag_im_diff)**2 / \ (0.5*diagdaterr[resp]**2) ) ) N += len(re_diff) + len(im_diff) chi_sqs[alphaind] = chi_sq / (N - 1) if diag: diagchi_sqs[alphaind] = diagchi_sq / (N - 1) max_chi = np.max(chi_sqs) if diag: max_diagchi = np.max(diagchi_sqs) max_alpha = np.max(testalphas) p0 = [max_chi/max_alpha**2, 0, 1] if diag: diag_p0 = [max_diagchi/max_alpha**2, 0, 1] #if lambind == 0: # p0 = [0.15e9, 0, 5] #else: # p0 = p0_old if plot: plt.figure(1) plt.plot(testalphas, chi_sqs, color = colors[lambind]) if diag: plt.figure(2) plt.plot(testalphas, diagchi_sqs, color = colors[lambind]) try: popt, pcov = opti.curve_fit(parabola, testalphas, chi_sqs, \ p0=p0, maxfev=100000) if diag: diagpopt, diagpcov = opti.curve_fit(parabola, testalphas, diagchi_sqs, \ p0=diag_p0, maxfev=1000000) except: print("Couldn't fit") popt = [0,0,0] popt[2] = np.mean(chi_sqs) regular_con_val = con_val + np.min(chi_sqs) if diag: diag_con_val = con_val + np.min(diagchi_sqs) # Select the positive root for the non-diagonalized data soln1 = ( -1.0 * popt[1] + np.sqrt( popt[1]**2 - \ 4 * popt[0] * (popt[2] - regular_con_val)) ) / (2 * popt[0]) soln2 = ( -1.0 * popt[1] - np.sqrt( popt[1]**2 - \ 4 * popt[0] * (popt[2] - regular_con_val)) ) / (2 * popt[0]) if diag: diagsoln1 = ( -1.0 * diagpopt[1] + np.sqrt( diagpopt[1]**2 - \ 4 * diagpopt[0] * (diagpopt[2] - diag_con_val)) ) / (2 * diagpopt[0]) diagsoln2 = ( -1.0 * diagpopt[1] - np.sqrt( diagpopt[1]**2 - \ 4 * diagpopt[0] * (diagpopt[2] - diag_con_val)) ) / (2 * diagpopt[0]) if soln1 > soln2: alpha_con = soln1 else: alpha_con = soln2 if diag: if diagsoln1 > diagsoln2: diagalpha_con = diagsoln1 else: diagalpha_con = diagsoln2 alphas[lambind] = alpha_con if diag: diagalphas[lambind] = alpha_con if plot: plt.figure(1) plt.title('Goodness of Fit for Various Lambda', fontsize=16) plt.xlabel('Alpha Parameter [arb]', fontsize=14) plt.ylabel('$\chi^2$', fontsize=18) if diag: plt.figure(2) plt.title('Goodness of Fit for Various Lambda - DIAG', fontsize=16) plt.xlabel('Alpha Parameter [arb]', fontsize=14) plt.ylabel('$\chi^2$', fontsize=18) plt.show() if not diag: diagalphas = np.zeros_like(alphas) if save: if savepath == '': print('No save path given, type full path here') savepath = input('path: ') np.save(savepath, [lambdas, alphas, diagalphas]) return lambdas, alphas, diagalphas
def plot_many_spectra(files, data_axes=[0,1,2], colormap='jet', \ sort='time', plot_freqs=(0.0,1000000.0), labels=[]): '''Loops over a list of file names, loads each file, then plots the amplitude spectral density of any number of data channels INPUTS: files, list of files names to extract data data_axes, list of pos_data axes to plot OUTPUTS: none, plots stuff ''' dfig, daxarr = plt.subplots(len(data_axes),sharex=True,sharey=False, \ figsize=(8,8)) if len(data_axes) == 1: daxarr = [daxarr] colors = bu.get_color_map(len(files), cmap=colormap) #colors = ['C0', 'C1', 'C2'] if track_feature: times = [] feature_locs = [] old_per = 0 print("Processing %i files..." % len(files)) for fil_ind, fil in enumerate(files): print(fil) color = colors[fil_ind] # Display percent completion bu.progress_bar(fil_ind, len(files)) # Load data obj = bu.hsDat(fil, load=True) #plt.figure() #plt.plot(df.pos_data[0]) #plt.show() fsamp = obj.attribs['fsamp'] nsamp = obj.attribs['nsamp'] t = obj.attribs['time'] freqs = np.fft.rfftfreq(nsamp, d=1.0 / fsamp) if waterfall: fac = waterfall_fac**fil_ind else: fac = 1.0 if not fullNFFT: NFFT = userNFFT else: NFFT = nsamp for axind, ax in enumerate(data_axes): # if fullNFFT: # NFFT = len(df.pos_data[ax]) # else: # NFFT = userNFFT # asd = np.abs(np.fft.rfft(obj.dat[:,axind])) psd, freqs = mlab.psd(obj.dat[:,axind], Fs=obj.attribs['fsamp'], \ NFFT=NFFT, window=window) asd = np.sqrt(psd) plot_inds = (freqs > plot_freqs[0]) * (freqs < plot_freqs[1]) if len(labels): daxarr[axind].loglog(freqs[plot_inds], asd[plot_inds]*fac, \ label=labels[fil_ind], color=colors[fil_ind]) else: daxarr[axind].loglog(freqs[plot_inds], asd[plot_inds]*fac, \ color=colors[fil_ind]) daxarr[axind].set_ylabel('$\sqrt{\mathrm{PSD}}$') if ax == data_axes[-1]: daxarr[axind].set_xlabel('Frequency [Hz]') if len(axes_labels): for labelind, label in enumerate(axes_labels): daxarr[labelind].set_title(label) if len(labels): daxarr[0].legend(fontsize=10) if len(xlim): daxarr[0].set_xlim(xlim[0], xlim[1]) if len(ylim): daxarr[0].set_ylim(ylim[0], ylim[1]) plt.tight_layout() if savefig: dfig.savefig(fig_savename) plt.show()
def analyze_background(self, data_axes=[0,1,2], lpf=2500, \ diag=False, colormap='jet', \ file_inds=(0,10000), unwrap=False, \ harms_to_track = [1, 2, 3], \ ext_cant_drive=False, ext_cant_ind=0, \ plot_first_drive=False, sub_cant_phase=True, \ progstr=''): '''Loops over a list of file names, loads each file, diagonalizes, then plots the amplitude spectral density of any number of data or cantilever/electrode drive signals INPUTS: files, list of files names to extract data data_axes, list of pos_data axes to plot ax_labs, dict with labels for plotted axes diag, bool specifying whether to diagonalize unwrap, bool to unwrap phase of background harms, harmonics to label in ASD OUTPUTS: none, generates class attributes ''' files = bu.sort_files_by_timestamp(self.relevant_files) files = files[file_inds[0]:file_inds[1]] nfreq = len(harms_to_track) nax = len(data_axes) nfiles = len(files) colors = bu.get_color_map(nfiles, cmap=colormap) avg_asd = [[]] * nax diag_avg_asd = [[]] * nax Nasds = [[]] * nax amps = np.zeros((nax, nfreq, nfiles)) amp_errs = np.zeros((nax, nfreq, nfiles)) phases = np.zeros((nax, nfreq, nfiles)) phase_errs = np.zeros((nax, nfreq, nfiles)) temps = np.zeros((2, nfiles)) times = np.zeros(nfiles) print("Processing %i files..." % nfiles) for fil_ind, fil in enumerate(files): color = colors[fil_ind] # Display percent completion bu.progress_bar(fil_ind, nfiles, suffix=progstr) # Load data df = bu.DataFile() df.load(fil) try: temps[0, fil_ind] = df.temps[0] temps[1, fil_ind] = df.temps[1] except: temps[:, fil_ind] = 0.0 if fil_ind == 0: self.fsamp = df.fsamp init_time = df.time times[0] = 0.0 else: times[fil_ind] = (df.time - init_time).total_seconds() df.calibrate_stage_position() #df.high_pass_filter(fc=1) #df.detrend_poly() df.diagonalize(maxfreq=lpf, interpolate=False) Nsamp = len(df.pos_data[0]) if len(harms_to_track): harms = harms_to_track else: harms = [1] ginds, driveind, drive_freq, drive_ax = \ df.get_boolean_cantfilt(ext_cant_drive=ext_cant_drive, \ ext_cant_ind=ext_cant_ind, \ nharmonics=10, harms=harms) if fil_ind == 0: if plot_first_drive: df.plot_cant_asd(drive_ax) freqs = np.fft.rfftfreq(Nsamp, d=1.0 / df.fsamp) bin_sp = freqs[1] - freqs[0] datfft, diagdatfft, daterr, diagdaterr = \ df.get_datffts_and_errs(ginds, drive_freq, plot=False) harm_freqs = freqs[ginds] for axind, ax in enumerate(data_axes): print(ax, df.conv_facs[ax]) asd = np.abs( np.fft.rfft(df.pos_data[ax]) ) * \ bu.fft_norm(Nsamp, df.fsamp) * df.conv_facs[ax] diag_asd = np.abs( np.fft.rfft(df.diag_pos_data[ax]) ) * \ bu.fft_norm(Nsamp, df.fsamp) if not len(avg_asd[axind]): avg_asd[axind] = asd diag_avg_asd[axind] = diag_asd Nasds[axind] = 1 else: avg_asd[axind] += asd diag_avg_asd[axind] += diag_asd Nasds[axind] += 1 for freqind, freq in enumerate(harm_freqs): phase = np.angle(datfft[axind][freqind]) if sub_cant_phase: cantfft = np.fft.rfft(df.cant_data[drive_ax]) cantphase = np.angle(cantfft[driveind]) phases[axind][freqind][fil_ind] = phase - cantphase else: phases[axind][freqind][fil_ind] = phase sig_re = daterr[axind][freqind] / np.sqrt(2) sig_im = np.copy(sig_re) im = np.imag(datfft[axind][freqind]) re = np.real(datfft[axind][freqind]) phase_var = np.mean((im**2 * sig_re**2 + re**2 * sig_im**2) / \ (re**2 + im**2)**2) phase_errs[axind][freqind][fil_ind] = np.sqrt(phase_var) amps[axind][freqind][fil_ind] = np.abs(datfft[axind][freqind] * \ np.sqrt(bin_sp) * \ bu.fft_norm(Nsamp, df.fsamp)) amp_errs[axind][freqind][fil_ind] = daterr[axind][freqind] * \ np.sqrt(bin_sp) * \ bu.fft_norm(Nsamp, df.fsamp) for axind, ax in enumerate(data_axes): avg_asd[axind] *= (1.0 / Nasds[axind]) diag_avg_asd[axind] *= (1.0 / Nasds[axind]) self.freqs = freqs self.ginds = ginds self.avg_asd = avg_asd self.diag_avg_asd = diag_avg_asd self.amps = amps self.phases = phases self.amp_errs = amp_errs self.phase_errs = phase_errs self.temps = temps self.times = times
print('STUPIDITYERROR: Multiple dirve amplitudes in directory') newlist = [] for i in [0, 1, 2]: if i == straighten_axis: newlist.append(uamps[0]) else: newlist.append(0.0) dir_obj.drive_amplitude = newlist return dir_obj dir_objs = list(map(proc_dir, dirs)) colors_yeay = bu.get_color_map(len(dir_objs)) psds = {} pows = {} bpows = {} for ind, obj in enumerate(dir_objs): psd = [] col = colors_yeay[ind] amp = obj.drive_amplitude[straighten_axis] filcount = 0 for fobj in obj.fobjs: filcount += 1
time_dict = {} for obj in dir_objs: for fobj in obj.fobjs: fobj.detrend() time = fobj.Time time_dict[time] = fobj for fobj in fil_objs: fobj.detrend() time = fobj.Time time_dict[time] = fobj times = list(time_dict.keys()) times.sort() colors_yeay = bu.get_color_map(len(times)) colors_elecs = bu.get_color_map(8) #colors_yeay = ['b', 'r', 'g'] #if plot: # f, axarr = plt.subplots(4,2,sharex='all',sharey='all') avgs = [] for i, time in enumerate(times): col = colors_yeay[i] cfobj = time_dict[time] elecdat = cfobj.electrode_data if not avgs: avgs = np.mean(elecdat, axis=-1) else:
dpi=100) if calibrate: cal_facs = obj.conv_facs else: cal_facs = [1., 1., 1.] obj.get_avg_force_v_pos(cant_axis=cant_axis, bin_size=bin_size, bias=True) obj.get_avg_diag_force_v_pos(cant_axis=cant_axis, bin_size=bin_size, bias=True) keys = list(obj.avg_force_v_pos.keys()) keys.sort() colors_yeay = bu.get_color_map(len(keys)) for keyind, key in enumerate(keys): lab = str(key) + ' V' col = colors_yeay[keyind] for resp_axis in [0, 1, 2]: #offset = 0 #offset_d = 0 offset = -1.0 * obj.avg_force_v_pos[key][resp_axis, 0][1][-1] offset_d = -1.0 * obj.avg_diag_force_v_pos[key][resp_axis, 0][1][-1] xdat = obj.avg_force_v_pos[key][resp_axis, 0][0] ydat = (obj.avg_force_v_pos[key][resp_axis, 0][1] + offset) * cal_facs[resp_axis]
def sqrt(x, A, x0, b): return A * np.sqrt(x-x0) + b for gas in gases: fig, ax = plt.subplots(1,1) paths = path_dict[gas] for pathind, path in enumerate(paths): dipole_filename = path[:-1] + '.dipole' color = 'C' + str(pathind) files, lengths = bu.find_all_fnames(path, ext='.npy') if one_path: colors = bu.get_color_map(len(files), cmap='inferno') popt_arr = [] pcov_arr = [] max_field = 0 A_arr = [] A_sterr_arr = [] A_syserr_arr = [] x0_arr = [] x0_err_arr = [] for fileind, file in enumerate(files): if one_path: color = colors[fileind]
### Define some constants. Later simulations will have these saved with the ### data, but I forgot on the first few so.... mbead_dic = {'val': 84.3e-15, 'sterr': 1.0e-15, 'syserr': 1.5e-15} Ibead = bu.get_Ibead(mbead=mbead_dic)['val'] kappa = bu.get_kappa(mbead=mbead_dic)['val'] m0 = 18.0 * constants.atomic_mass # residual gas particle mass, in kg ### Define an exponential for fitting the rindown def exponential(x, a, b, c): return a * np.exp(-1.0 * b * x) + c # plt.figure(figsize=(10,8)) ### Colors for plotting colors = bu.get_color_map(len(results_cut), cmap='plasma') # fig = plt.figure(figsize=(8,5)) ### Loop over the simulation results and fit each one for ind, result in enumerate(results_cut): pressure, all_t, all_amp = result beta_rot = pressure * np.sqrt(m0) / kappa tau_calc = Ibead / beta_rot fit_inds = all_t <= 4.0 * tau_calc ### Guess some of the exponential fit parameters a_guess = all_amp[0] b_guess = 0.2 * all_t[-1] c_guess = all_amp[-1]
cpos = cpos * 80. / 10. # 80um travel per 10V control if cpos not in pos_dict: pos_dict[cpos] = [] pos_dict[cpos].append(fobj.fname) if subtract_background: bpos_dict = {} for obj in bdir_objs: for fobj in obj.fobjs: cpos = fobj.get_stage_settings(axis=step_axis)[0] cpos = cpos * 80. / 10. # 80um travel per 10V control if cpos not in bpos_dict: bpos_dict[cpos] = [] bpos_dict[cpos].append(fobj.fname) colors = bu.get_color_map(len(list(pos_dict.keys()))) # Obtain the unique cantilever positions and sort them pos_keys = list(pos_dict.keys()) pos_keys.sort() # initialize some dictionaries that will be updated in a for-loop force_at_closest = {} fits = {} diag_fits = {} f, axarr = plt.subplots(3, 2, sharex='all', sharey='all', figsize=(7, 8),
def find_step_cal_response(file_obj, bandwidth=1., include_in_phase=False, \ using_tabor=False, tabor_ind=3, mon_fac=100, \ ecol=-1, pcol=-1, new_trap=False, plot=False, \ userphase=0.0, nearest=True): '''Analyze a data step-calibraiton data file, find the drive frequency, correlate the response to the drive INPUTS: file_obj, input file object bandwidth, bandpass filter bandwidth OUTPUTS: H, (response / drive)''' if new_trap: using_tabor = False if not using_tabor: if pcol == -1: if ecol == -1: ecol = np.argmax(file_obj.electrode_settings['driven']) pcol = config.elec_map[ecol] efield = bu.trap_efield(file_obj.electrode_data, new_trap=new_trap) drive = efield[pcol] #drive = file_obj.electrode_data[ecol] if plot: fig, axarr = plt.subplots(2, 1, sharex=True) tvec = np.arange(file_obj.nsamp) * (1.0 / file_obj.fsamp) colors = bu.get_color_map(len(file_obj.electrode_data), cmap='plasma') for i in range(len(file_obj.electrode_data)): try: if file_obj.electrode_settings['driven'][i]: ext = ' - driven' else: ext = '' axarr[0].plot(tvec, file_obj.electrode_data[i], color=colors[i], label='Elec. {:s}{:s}'.format(str(i), ext)) except: 2 + 2 axarr[0].set_title('Electrode and Efield Data') axarr[0].set_ylabel('Voltage [V]') axarr[0].legend(fontsize=10, ncol=2, loc='upper right') for i, ax in enumerate(['X', 'Y', 'Z']): axarr[1].plot(tvec, efield[i], label=ax) axarr[1].set_ylabel('Efield [V/m]') axarr[1].set_xlabel('Time [s]') axarr[1].legend(fontsize=10, loc='upper right') fig.tight_layout() plt.show(fig) input() # plt.plot(drive) # plt.show() #drive = efield[ecol] elif using_tabor: pcol = 0 v3 = file_obj.other_data[tabor_ind] * mon_fac v4 = file_obj.other_data[tabor_ind + 1] * mon_fac zeros = np.zeros(len(v3)) if plot: colors = bu.get_color_map(2, cmap='plasma') plt.figure() plt.plot(v3, color=colors[0], label='Elec. {:s}'.format(str(tabor_ind))) plt.plot(v4, color=colors[1], label='Elec. {:s}'.format(str(tabor_ind + 1))) plt.title('Electrode data [V]') plt.legend() plt.tight_layout() plt.show() input() fac = 1.0 if np.std(v4) < 0.5 * np.std(v3): # print('Only one Tabor drive channel being digitized...') v4 = zeros fac = 2.0 elif np.std(v3) < 0.5 * np.std(v4): # print('Only one Tabor drive channel being digitized...') v3 = zeros fac = 2.0 voltages = [] for i in range(8): if i == tabor_ind: voltages.append(v3) elif i == (tabor_ind + 1): voltages.append(v4) else: voltages.append(zeros) drive = bu.trap_efield(voltages, new_trap=new_trap)[pcol] * fac # try: # power = np.mean(file_obj.power) # except Exception: # power = 0.0 # traceback.print_exc() zpos = np.mean(file_obj.pos_data[2]) #drive = bu.detrend_poly(drive, order=1.0, plot=True) drive_fft = np.fft.rfft(drive) ### Find the drive frequency freqs = np.fft.rfftfreq(len(drive), d=1. / file_obj.fsamp) drive_freq = freqs[np.argmax(np.abs(drive_fft[1:])) + 1] # plt.plot(drive) # plt.show() # input() # print(drive_freq) # for i in range(3): # plt.plot(efield[i], label=str(i)) # plt.legend() # plt.show() ### Extract the response and detrend # response = file_obj.pos_data[pcol] if new_trap: response = file_obj.pos_data_3[pcol] else: response = file_obj.pos_data[pcol] #response = bu.detrend_poly(response, order=1.0, plot=True) ### Configure a time array for plotting and fitting cut_samp = config.adc_params["ignore_pts"] N = len(drive) dt = 1. / file_obj.fsamp t = np.linspace(0, (N + cut_samp - 1) * dt, N + cut_samp) t = t[cut_samp:] # print(drive_freq) # if drive_freq < 10.0: # print(file_obj.fname) # plt.plot(t, drive) # plt.figure() # plt.loglog(freqs, np.abs(drive_fft)) # plt.show() if drive_freq < 0.5 * bandwidth: apply_filter = False else: apply_filter = True ### Bandpass filter the response if apply_filter: b, a = signal.butter(3, [2.*(drive_freq-bandwidth/2.)/file_obj.fsamp, \ 2.*(drive_freq+bandwidth/2.)/file_obj.fsamp ], btype = 'bandpass') responsefilt = signal.filtfilt(b, a, response) else: responsefilt = np.copy(response) if plot: plt.figure() plt.loglog(freqs, np.abs(np.fft.rfft(drive))) plt.loglog(freqs, np.abs(np.fft.rfft(responsefilt))) plt.show() input() ### Compute the full, normalized correlation and extract amplitude corr_full = bu.correlation(drive, responsefilt, file_obj.fsamp, drive_freq) ncorr = len(corr_full) phase_ratio = userphase / (2.0 * np.pi) phase_inds = np.array( [np.floor(phase_ratio * ncorr), np.ceil(phase_ratio * ncorr)], dtype='int') response_inphase = corr_full[0] response_max = np.max(corr_full) # try: response_userphase = np.interp([phase_ratio * ncorr], phase_inds, corr_full[phase_inds])[0] # except: # response_userphase = corr_full[phase_inds[0]] ### Compute the drive amplitude, assuming it's a sine wave drive_amp = np.sqrt(2) * np.std(drive) # Assume drive is sinusoidal # print(drive_amp) outdict = {} outdict['inphase'] = response_inphase / drive_amp outdict['max'] = response_max / drive_amp outdict['userphase'] = response_userphase / drive_amp outdict['userphase_nonorm'] = response_userphase outdict['drive'] = drive_amp outdict['drive_freq'] = drive_freq outdict['pcol'] = pcol return outdict
time_dict = {} for obj in dir_objs: for find, fobj in enumerate(obj.fobjs): if find not in files: continue time = fobj.Time if time not in time_dict: time_dict[time] = [] time_dict[time].append(fobj.fname) else: time_dict[time].append(fobj.fname) times = list(time_dict.keys()) colors_yeay = bu.get_color_map( len(times) ) f, axarr = plt.subplots(len(axes_to_plot),2,sharey='row',sharex='all',figsize=(10,12),dpi=100) for i, time in enumerate(times): newobj = cu.Data_dir(0, init_data, time) newobj.files = time_dict[time] newobj.load_dir(cu.diag_loader, maxfiles=maxfiles) newobj.load_H(tf_path) newobj.load_step_cal(step_cal_path) newobj.calibrate_H() newobj.diagonalize_files(reconstruct_lowf=True,lowf_thresh=200.,# plot_Happ=True, \ build_conv_facs=True, drive_freq=cal_drive_freq, close_dat=False)
#ind_offset = 3 savefig = True base_plot_path = '/home/cblakemore/plots/20191017/pramp/combined_reverse'.format(date) #base_plot_path = '/home/cblakemore/plots/{:s}/pramp'.format(date) # base_path = '/data/old_trap_processed/spinning/pramp_data/20190905' # base_dipole_path = '/data/old_trap_processed/spinning/wobble/20190905/' # mbead = 84.2e-15 # convert picograms to kg # mbead_err = 1.5e-15 # ind_offset = 1 # #ind_offset = 3 # base_plot_path = '/home/cblakemore/plots/20190905/pramp' colors = bu.get_color_map(3, cmap='plasma') include_other_beads = True other_paths = ['/data/old_trap_processed/spinning/pramp_data/20190905', \ '/data/old_trap_processed/spinning/pramp_data/20191017'] other_markers = ['x', '+'] other_linestyles = [':', '-.'] other_linestyles = [(0, (1, 1)), (0, (3, 1, 1, 1))] other_dipole_paths = ['/data/old_trap_processed/spinning/wobble/20190905/', \ '/data/old_trap_processed/spinning/wobble/20191017/'] other_masses = [] for path in other_paths: date_o = path.split('/')[-1] mbead_o = bu.get_mbead(date_o) other_masses.append(mbead_o)
np.save(save_paths[pathind], out_arr) # print('Saving: ', save_path) # np.save(save_path, out_arr) all_data.append(out_arr) if load: for save_path in save_paths: saved_arr = np.load(save_path) #field_strength, field_err, wobble_freq, wobble_err = np.load(save_path) #arr = np.array([field_strength, field_err, wobble_freq, wobble_err]) #arr = saved_arr.T all_data.append(saved_arr) popt_arr = [] colors = bu.get_color_map(len(all_data), cmap='inferno') for arrind, arr in enumerate(all_data): field_strength = arr[0] field_err = arr[1] wobble_freq = arr[2] wobble_err = arr[3] # plt.scatter(np.arange(arr.shape[1]), field_strength) # plt.figure() plt.errorbar(field_strength, 2*np.pi*wobble_freq, alpha=0.6, \ yerr=wobble_err, color=colors[arrind]) p0 = [10, 0, 0] try: popt, pcov = opti.curve_fit(sqrt, field_strength, 2*np.pi*wobble_freq, \
def plot_many_spectra(files, data_axes=[0,1,2], cant_axes=[], elec_axes=[], other_axes=[], \ fb_axes=[], plot_power=False, diag=True, colormap='plasma', \ sort='time', file_inds=(0,10000)): '''Loops over a list of file names, loads each file, diagonalizes, then plots the amplitude spectral density of any number of data or cantilever/electrode drive signals INPUTS: files, list of files names to extract data data_axes, list of pos_data axes to plot cant_axes, list of cant_data axes to plot elec_axes, list of electrode_data axes to plot diag, boolean specifying whether to diagonalize OUTPUTS: none, plots stuff ''' if diag: dfig, daxarr = plt.subplots(len(data_axes),2,sharex=True,sharey=True, \ figsize=figsize) else: dfig, daxarr = plt.subplots(len(data_axes),1,sharex=True,sharey=True, \ figsize=figsize) dfig.suptitle('XYZ Data', fontsize=18) if len(cant_axes): cfig, caxarr = plt.subplots(len(data_axes), 1, sharex=True, sharey=True) if len(cant_axes) == 1: caxarr = [caxarr] cfig.suptitle('Attractor Data', fontsize=18) if len(elec_axes): efig, eaxarr = plt.subplots(len(elec_axes), 1, sharex=True, sharey=True) if len(elec_axes) == 1: eaxarr = [eaxarr] efig.suptitle('Electrode Data', fontsize=18) if len(other_axes): ofig, oaxarr = plt.subplots(len(other_axes), 1, sharex=True, sharey=True) if len(other_axes) == 1: oaxarr = [oaxarr] ofig.suptitle('Other Data', fontsize=18) if len(fb_axes): fbfig, fbaxarr = plt.subplots(len(fb_axes),1,sharex=True,sharey=True, \ figsize=figsize) if len(fb_axes) == 1: fbaxarr = [fbaxarr] fbfig.suptitle('Feedback Data', fontsize=18) if plot_power: pfig, paxarr = plt.subplots(2, 1, sharex=True, figsize=(6, 6)) pfig.suptitle('Power/Power Feedback Data', fontsize=18) kludge_fig, kludge_ax = plt.subplots(1, 1) files = files[file_inds[0]:file_inds[1]] if step10: files = files[::10] if invert_order: files = files[::-1] colors = bu.get_color_map(len(files), cmap=colormap) #colors = ['C0', 'C1', 'C2'] old_per = 0 print("Processing %i files..." % len(files)) for fil_ind, fil in enumerate(files): color = colors[fil_ind] # Display percent completion bu.progress_bar(fil_ind, len(files)) # Load data df = bu.DataFile() if new_trap: df.load_new(fil) else: df.load(fil) if len(other_axes): df.load_other_data() df.calibrate_stage_position() #df.high_pass_filter(fc=1) #df.detrend_poly() #plt.figure() #plt.plot(df.pos_data[0]) #plt.show() if cascade: cascade_scale = (cascade_fac)**fil_ind else: cascade_scale = 1.0 freqs = np.fft.rfftfreq(len(df.pos_data[0]), d=1.0 / df.fsamp) if diag: df.diagonalize(maxfreq=lpf, date=tfdate, plot=tf_plot) if fil_ind == 0 and len(cant_axes): drivepsd = np.abs(np.fft.rfft(df.cant_data[drive_ax])) driveind = np.argmax(drivepsd[1:]) + 1 drive_freq = freqs[driveind] for axind, ax in enumerate(data_axes): try: fac = cascade_scale * df.conv_facs[ax] # * (1.0 / 0.12e-12) except: fac = cascade_scale if fullNFFT: NFFT = len(df.pos_data[ax]) else: NFFT = userNFFT psd, freqs = mlab.psd(df.pos_data[ax], Fs=df.fsamp, \ NFFT=NFFT, window=window) norm = bu.fft_norm(df.nsamp, df.fsamp) new_freqs = np.fft.rfftfreq(df.nsamp, d=1.0 / df.fsamp) #fac = 1.0 kludge_fac = 1.0 #kludge_fac = 1.0 / np.sqrt(10) if diag: dpsd, dfreqs = mlab.psd(df.diag_pos_data[ax], Fs=df.fsamp, \ NFFT=NFFT, window=window) kludge_ax.loglog(freqs, np.sqrt(dpsd) *kludge_fac, color='C'+str(axind), \ label=posdic[axind]) kludge_ax.set_ylabel( '$\sqrt{\mathrm{PSD}}$ $[\mathrm{N}/\sqrt{\mathrm{Hz}}]$') kludge_ax.set_xlabel('Frequency [Hz]') # daxarr[axind,0].loglog(new_freqs, fac*norm*np.abs(np.fft.rfft(df.pos_data[ax]))*kludge_fac, color='k', label='np.fft with manual normalization') daxarr[axind, 0].loglog(freqs, np.sqrt(psd) * fac * kludge_fac, color=color, label=df.fname) #'mlab.psd') daxarr[axind, 0].grid(alpha=0.5) daxarr[axind, 1].loglog( new_freqs, norm * np.abs(np.fft.rfft(df.diag_pos_data[ax])) * kludge_fac, color='k') daxarr[axind, 1].loglog(freqs, np.sqrt(dpsd) * kludge_fac, color=color) daxarr[axind, 1].grid(alpha=0.5) daxarr[axind, 0].set_ylabel( '$\sqrt{\mathrm{PSD}}$ $[\mathrm{N}/\sqrt{\mathrm{Hz}}]$') if ax == data_axes[-1]: daxarr[axind, 0].set_xlabel('Frequency [Hz]') daxarr[axind, 1].set_xlabel('Frequency [Hz]') else: # daxarr[axind].loglog(new_freqs, norm*np.abs(np.fft.rfft(df.pos_data[ax])), color='k', label='np.fft with manual normalization') daxarr[axind].loglog(freqs, np.sqrt(psd) * fac, color=color, label=df.fname) #'mlab.psd') daxarr[axind].grid(alpha=0.5) daxarr[axind].set_ylabel( '$\\sqrt{\mathrm{PSD}}$ $[\\mathrm{Arb}/\\sqrt{\mathrm{Hz}}]$' ) #daxarr[axind].set_ylabel('$\sqrt{\mathrm{PSD}}$ $[\mathrm{N}/\sqrt{\mathrm{Hz}}]$') if ax == data_axes[-1]: daxarr[axind].set_xlabel('Frequency [Hz]') if len(fb_axes): for axind, ax in enumerate(fb_axes): fb_psd, freqs = mlab.psd(df.pos_fb[ax], Fs=df.fsamp, \ NFFT=NFFT, window=window) fbaxarr[axind].loglog(freqs, np.sqrt(fb_psd) * fac, color=color) fbaxarr[axind].set_ylabel('$\\sqrt{\\mathrm{PSD}}$') if len(cant_axes): for axind, ax in enumerate(cant_axes): psd, freqs = mlab.psd(df.cant_data[ax], Fs=df.fsamp, \ NFFT=NFFT, window=window) caxarr[axind].loglog(freqs, np.sqrt(psd), color=color) caxarr[axind].set_ylabel('$\\sqrt{\\mathrm{PSD}}$') if len(elec_axes): for axind, ax in enumerate(elec_axes): psd, freqs = mlab.psd(df.electrode_data[ax], Fs=df.fsamp, \ NFFT=NFFT, window=window) eaxarr[axind].loglog(freqs, np.sqrt(psd), color=color) eaxarr[axind].set_ylabel('$\\sqrt{\\mathrm{PSD}}$') if len(other_axes): for axind, ax in enumerate(other_axes): #ax = ax - 3 psd, freqs = mlab.psd(df.other_data[ax], Fs=df.fsamp, \ NFFT=NFFT, window=window) oaxarr[axind].loglog(freqs, np.sqrt(psd), color=color) oaxarr[axind].set_ylabel('$\\sqrt{\\mathrm{PSD}}$') if plot_power: psd, freqs = mlab.psd(df.power, Fs=df.fsamp, \ NFFT=NFFT, window=window) psd_fb, freqs_fb = mlab.psd(df.power_fb, Fs=df.fsamp, \ NFFT=NFFT, window=window) paxarr[0].loglog(freqs, np.sqrt(psd), color=color) paxarr[1].loglog(freqs_fb, np.sqrt(psd_fb), color=color) for axind in [0, 1]: paxarr[axind].set_ylabel('$\\sqrt{\\mathrm{PSD}}$') if filename_labels: daxarr[0].legend(fontsize=10) if len(fb_axes): fbaxarr[0].legend(fontsize=10) #daxarr[0].set_xlim(0.5, 25000) if diag: derp_ax = daxarr[0, 0] else: derp_ax = daxarr[0] # derp_ax.legend(fontsize=10) if len(ylim): derp_ax.set_ylim(*ylim) kludge_ax.set_ylim(*ylim) if len(xlim): derp_ax.set_xlim(*xlim) kludge_ax.set_xlim(1, 500) dfig.tight_layout() dfig.subplots_adjust(top=0.91) kludge_ax.grid() kludge_ax.legend() kludge_fig.tight_layout() if plot_power: paxarr[-1].set_xlabel('Frequency [Hz]') pfig.tight_layout() pfig.subplots_adjust(top=0.91) if len(cant_axes): caxarr[-1].set_xlabel('Frequency [Hz]') cfig.tight_layout() cfig.subplots_adjust(top=0.91) if len(elec_axes): eaxarr[-1].set_xlabel('Frequency [Hz]') efig.tight_layout() efig.subplots_adjust(top=0.91) if len(other_axes): oaxarr[-1].set_xlabel('Frequency [Hz]') ofig.tight_layout() ofig.subplots_adjust(top=0.91) if len(fb_axes): fbaxarr[-1].set_xlabel('Frequency [Hz]') fbfig.tight_layout() fbfig.subplots_adjust(top=0.91) if savefigs: plt.savefig(title_pre + '.png') daxarr[0].set_xlim(2000, 25000) plt.tight_layout() plt.savefig(title_pre + '_zoomhf.png') daxarr[0].set_xlim(1, 80) plt.tight_layout() plt.savefig(title_pre + '_zoomlf.png') daxarr[0].set_xlim(0.5, 25000) if not savefigs: plt.show()
figsize=(5, 3), dpi=150) zfigs.append(zfig) zaxarrs.append(zaxarr) zfits = [] diag_zfits = [] stage_settings = list(force_dic[bias].keys()) stage_settings.sort() stage_settings = np.array(stage_settings) nsettings = len(stage_settings) if nsettings < 10: colors = ['C' + str(i) for i in range(nsettings)] else: colors = bu.get_color_map(nsettings, cmap='jet') for posind, pos in enumerate(stage_settings): color = colors[posind] lab = str(pos) + ' um' if fit_xdat: xfits.append(xdat[bias][pos][1]) diag_xfits.append(diag_xdat[bias][pos][1]) if fit_zdat: zfits.append(zdat[bias][pos][1]) diag_zfits.append(diag_zdat[bias][pos][1]) for resp in [0, 1, 2]: bins = force_dic[bias][pos][resp][0]
alphas = np.zeros_like(lambdas) simp_alphas = np.zeros_like(lambdas) diagalphas = np.zeros_like(lambdas) simp_diagalphas = np.zeros_like(lambdas) lambdas = lambdas[::-1] testalphas = np.linspace(0, 14, 10000) # For the confidence interval, compute the inverse CDF of a # chi^2 distribution at 0.95 and compare to liklihood ratio # via a goodness of fit parameter chi2dist = stats.chi2(1) # factor of 0.5 from Wilks's theorem: -2 log (Liklihood) ~ chi^2(1) con_val = 0.5 * chi2dist.ppf(confidence_level) colors = bu.get_color_map(len(lambdas)) for ind, yuklambda in enumerate(lambdas): fcurve = fcurve_obj.mod_grav_force(bins*1e-6, sep=SEP, alpha=1., \ yuklambda=yuklambda, rbead=RBEAD, nograv=True) diagfcurve = fcurve_obj.mod_grav_force(diagbins*1e-6, sep=SEP, alpha=1., \ yuklambda=yuklambda, rbead=RBEAD, nograv=True) fcurve = signal.detrend(fcurve) diagfcurve = signal.detrend(diagfcurve) fft = np.fft.rfft(fcurve) asd = np.sqrt(fft.conj() * fft) diagfft = np.fft.rfft(diagfcurve) diagasd = np.sqrt(diagfft.conj() * diagfft)