def plot_xy_orbit(dirname, allfiles=True, user_filind=0, filter=True, fdrive=41.0): print('Analyzing: ', dirname, ' ...') files, lengths = bu.find_all_fnames(dirname) nfiles = len(files) for filind, fil in enumerate(files): if not allfiles: if filind != user_filind: continue bu.progress_bar(filind, nfiles) df = bu.DataFile() df.load(fil) freqs = np.fft.rfftfreq(df.nsamp, d=1.0/df.fsamp) plt.loglog(freqs, np.abs(np.fft.rfft(df.pos_data[0]))) plt.loglog(freqs, np.abs(np.fft.rfft(df.pos_data[1]))) plt.figure() plt.scatter(df.pos_data[0], df.pos_data[1]) plt.show()
def find_stage_positions(self, find_again=False): '''Loops over a list of file names, loads the attributes of each file, then extracts the DC stage position to sort through data.''' axvecs = [{}, {}, {}] nfiles = len(self.allfiles) for fil_ind, fil in enumerate(self.allfiles): bu.progress_bar(fil_ind, nfiles, suffix='sorting by stage pos') df = bu.DataFile() df.load_only_attribs(fil) if df.badfile: continue df.calibrate_stage_position() for axind, axstr in enumerate(['x', 'y', 'z']): axpos = df.stage_settings[axstr + ' DC'] if axpos not in list(axvecs[axind].keys()): axvecs[axind][axpos] = [] axvecs[axind][axpos].append(fil) pickle.dump(axvecs, open('/backgrounds/axvecs/' + self.bead + '_' + \ self.parent_dir + '_axvecs.p', 'wb')) self.axvecs = axvecs
def profile(fname, data_column=3): df = bu.DataFile() df.load(fname) df.load_other_data() df.calibrate_stage_position() if 'ysweep' in fname: stage_column = 1 if 'left' in fname: sign = -1.0 elif 'right' in fname: sign = 1.0 else: sign = 1.0 else: stage_column = 0 sign = 1.0 b, a = sig.butter(1, 0.5) #shape = np.shape(df.other_data) #for i in range(shape[0]): # plt.plot(df.other_data[i, :], label = str(i)) #plt.legend() #plt.show() int_filt = sig.filtfilt(b, a, df.other_data[data_column, :]) proft = np.gradient(int_filt) stage_filt = sig.filtfilt(b, a, df.cant_data[stage_column, :]) dir_sign = np.sign(np.gradient(stage_filt)) * sign xvec = df.cant_data[stage_column, :] yvec = (proft - proft * dir_sign) * 0.5 - (proft + proft * dir_sign) * 0.5 b, y, e = spatial_bin(xvec, yvec) return b, y, e
def profile_directory(prof_dir, raw_dat_col = 0, drum_diam=3.25e-2, \ return_pos=False, plot_peaks=False, guess=3e-3): ''' Takes a directory path and profiles each file, and averages for a final result INPUTS: prof_dir, directory path raw_dat_col, column in 'other_data' with raw WM100 monitor drum_diam, diameter of the optical head that rotates return_pos, boolean to specify if return in raw time or calibrated drum position using the drum_diam argument OUTPUTS: tot_x, all t/disp associated with profiles, overlain/sorted tot_prof, all profiles overlain and sorted ''' prof_files = [] for root, dirnames, filenames in os.walk(prof_dir): for filename in fnmatch.filter(filenames, '*' + config.extensions['data']): prof_files.append(os.path.join(root, filename)) tot_x = [] tor_prof = [] nfiles = len(prof_files) for fil_ind, fil_path in enumerate(prof_files): bu.progress_bar(fil_ind, nfiles) prof_df = bu.DataFile() prof_df.load(fil_path, skip_fpga=True) prof_df.load_other_data() x, prof, popt = profile(prof_df, raw_dat_col = raw_dat_col, \ drum_diam = drum_diam, return_pos = return_pos, \ fit_intensity=True, plot_peaks=plot_peaks, \ guess=guess) #plt.plot(x, prof) #plt.show() #x, prof, errs = bu.rebin(x, prof, numbins=5000) #plt.plot(x, prof) #plt.show() if not len(tot_x): tot_x = x tot_prof = prof tot_popt = [popt] else: tot_x = np.block([tot_x, x]) # = np.hstack((tot_x, x)) tot_prof = np.block([tot_prof, prof]) # = np.hstack((tot_prof, prof)) tot_popt.append(popt) # = np.concatenate((tot_popt, popt), axis=0) #tot_x = np.concatenate(tot_x) #tot_prof = np.concatenate(tot_x) tot_popt = np.array(tot_popt) tot_popt_mean = np.mean(tot_popt, axis=0) sort_inds = tot_x.argsort() return tot_x[sort_inds], tot_prof[sort_inds], tot_popt_mean
def getNanoStage(fname): '''Takes image filename as argument. gets nano positioning stage dc settings by opening the .h5 file associated with the image file. Returns the median voltage times the stage calibration from V to um''' h5fname = os.path.splitext(fname)[0] df = bu.DataFile() df.load(h5fname) df.calibrate_stage_position() return np.median(df.cant_data, axis=-1)
def make_file_objs(datadir, hpf=False, hpf_freq=1.0, \ detrend=False, diag=False): objs = [] files = bu.find_all_fnames(datadir) for fil in files: df = bu.DataFile() df.load(fil) df.calibrate_stage_position() if hpf: df.high_pass_filter(fc=hpf_freq) if detrend: df.detrend_poly() objs.append(df) return objs
def get_dir_data(files, drive_range=[1., 1500.], drive_amp=5000.): nf = len(files) ns = 50000 nfreq = len(freqs) d_ave = np.zeros((nf, nfreq), dtype=complex) pb = [] pc = [] pp = [] t = [] xs = np.zeros((nf, nfreq), dtype=complex) ys = np.zeros((nf, nfreq), dtype=complex) zs = np.zeros((nf, nfreq), dtype=complex) for i, f in enumerate(files): df = bu.DataFile() df.load(f) df.load_other_data() drive = df.other_data[2] dbool = (np.abs(np.fft.rfft(drive)) > drive_amp) * ( freqs > drive_range[0]) * (freqs < drive_range[1]) if not np.sum(dbool): plt.loglog(freqs, np.abs(np.fft.rfft(drive))) plt.loglog(freqs[dbool], np.abs(np.fft.rfft(drive))[dbool], 'o') plt.show() phi = np.angle(np.mean(np.fft.rfft(drive)[dbool])) d_ave[i, :] = np.fft.rfft(drive) * np.exp(-1.j * phi) xs[i, :] = np.fft.rfft(df.pos_data[0]) * np.exp(-1.j * phi) ys[i, :] = np.fft.rfft(df.pos_data[1]) * np.exp(-1.j * phi) zs[i, :] = np.fft.rfft(df.pos_data[2]) * np.exp(-1.j * phi) pb.append(df.pressures['baratron']) pc.append(df.pressures['cold_cathode']) pp.append(df.pressures['pirani']) t.append(df.time) return { 'd': d_ave, 'x': xs, 'y': ys, 'z': zs, 'p': np.array([pb, pc, pp]), 't': t }
def __init__(self, fname, tfdate='', tophatf=2500, plot_tf=False): '''Load an hdf5 file into a bead_util.DataFile obj. Calibrate the stage position. Calibrate the microsphere response with th transfer function.''' df = bu.DataFile() try: df.load(fname) self.badfile = False except: self.badfile = True return df.calibrate_stage_position() df.diagonalize(date=tfdate, maxfreq=tophatf, plot=plot_tf) self.time = df.time self.fsamp = df.fsamp self.nsamp = df.nsamp self.phi_cm = df.phi_cm self.df = df self.data_closed = False
def plot_vs_time(files, data_axes=[0,1,2], cant_axes=[], elec_axes=[], \ diag=True, colormap='jet', sort='time', file_inds=(0,10000)): '''Loops over a list of file names, loads each file, diagonalizes, then plots the amplitude spectral density of any number of data or cantilever/electrode drive signals INPUTS: files, list of files names to extract data data_axes, list of pos_data axes to plot cant_axes, list of cant_data axes to plot elec_axes, list of electrode_data axes to plot diag, boolean specifying whether to diagonalize OUTPUTS: none, plots stuff ''' if diag: dfig, daxarr = plt.subplots(len(data_axes),2,sharex=True,sharey=True, \ figsize=(8,8)) else: dfig, daxarr = plt.subplots(len(data_axes),1,sharex=True,sharey=True, \ figsize=(8,8)) if len(cant_axes): cfig, caxarr = plt.subplots(len(data_axes), 1, sharex=True, sharey=True) if len(elec_axes): efig, eaxarr = plt.subplots(len(data_axes), 1, sharex=True, sharey=True) files = [(os.stat(path), path) for path in files] files = [(stat.st_ctime, path) for stat, path in files] files.sort(key=lambda x: (x[0])) files = [obj[1] for obj in files] files = files[file_inds[0]:file_inds[1]] if step10: files = files[::10] if invert_order: files = files[::-1] colors = bu.get_color_map(len(files), cmap=colormap) old_per = 0 print("Processing %i files..." % len(files)) print("Percent complete: ") for fil_ind, fil in enumerate(files): color = colors[fil_ind] # Display percent completion per = int(100. * float(fil_ind) / float(len(files))) if per > old_per: print(old_per, end=' ') sys.stdout.flush() old_per = per # Load data df = bu.DataFile() df.load(fil) df.calibrate_stage_position() df.calibrate_phase() #df.high_pass_filter(fc=1) #df.detrend_poly() plt.figure() #plt.plot((df.daqmx_time-df.daqmx_time[0])*1e-9, \ # (df.pos_data[2]-np.mean(df.pos_data[2])) * (2.0**3 / 100.0)) plt.plot((df.daqmx_time-df.daqmx_time[0])*1e-9, \ (df.phase[4] - df.phase[4][0])) plt.show() df.diagonalize(maxfreq=lpf, interpolate=False) loaded_other = False for ax in data_axes: if ax > 2 and not loaded_other: df.load_other_data() for axind, ax in enumerate(data_axes): if ax <= 2: data = df.pos_data[ax] fac = df.conv_facs[ax] if ax > 2: data = df.other_data[ax - 3] fac = 1.0 t = np.arange(len(data)) * (1.0 / df.fsamp) if diag: daxarr[axind, 0].plot(t, data * fac, color=color) daxarr[axind, 0].grid(alpha=0.5) daxarr[axind, 1].plot(t, data, color=color) daxarr[axind, 1].grid(alpha=0.5) daxarr[axind, 0].set_ylabel('[N]', fontsize=10) if ax == data_axes[-1]: daxarr[axind, 0].set_xlabel('t [s]', fontsize=10) daxarr[axind, 1].set_xlabel('t [s]', fontsize=10) else: daxarr[axind].plot(t, data * fac, color=color) daxarr[axind].grid(alpha=0.5) daxarr[axind].set_ylabel('[N]', fontsize=10) if ax == data_axes[-1]: daxarr[axind].set_xlabel('t [s]', fontsize=10) if len(cant_axes): for axind, ax in enumerate(cant_axes): t = np.arange(len(df.cant_data[ax])) * (1.0 / df.fsamp) caxarr[axind].plot(t, df.cant_data[ax], color=color) if len(elec_axes): for axind, ax in enumerate(elec_axes): t = np.arange(len(df.electrode_data[ax])) * (1.0 / df.fsamp) eaxarr[axind].plot(t, df.electrode_data[ax], color=color) #daxarr[0].set_xlim(0.5, 25000) #daxarr[0].set_ylim(1e-21, 1e-14) plt.tight_layout() plt.show()
def get_data_at_harms(files, gfuncs, yukfuncs, lambdas, lims, \ minsep=20, maxthrow=80, beadheight=5,\ cantind=0, ax1='x', ax2='z', diag=True, plottf=False, \ width=0, nharmonics=10, harms=[], \ ext_cant_drive=False, ext_cant_ind=1, \ ignoreX=False, ignoreY=False, ignoreZ=False, noiseband=10): '''Loops over a list of file names, loads each file, diagonalizes, then performs an optimal filter using the cantilever drive and a theoretical force vs position to generate the filter/template. The result of the optimal filtering is stored, and the data released from memory INPUTS: files, list of files names to extract data cantind, cantilever electrode index ax1, axis with different DC positions ax2, 2nd axis with different DC positions OUTPUTS: ''' #parts = data_dir.split('/') #prefix = parts[-1] #savepath = '/processed_data/grav_data/' + prefix + '_fildat.p' #try: # fildat = pickle.load(open(savepath, 'rb')) # return fildat #except: # print 'Loading data from: ', data_dir fildat = {} temp_gdat = {} for fil_ind, fil in enumerate(files): bu.progress_bar(fil_ind, len(files), suffix=' Sorting Files, Extracting Data') ### Load data df = bu.DataFile() df.load(fil) df.calibrate_stage_position() cantbias = df.electrode_settings['dc_settings'][0] ax1pos = df.stage_settings[ax1 + ' DC'] ax2pos = df.stage_settings[ax2 + ' DC'] if cantbias not in list(fildat.keys()): fildat[cantbias] = {} if ax1pos not in list(fildat[cantbias].keys()): fildat[cantbias][ax1pos] = {} if ax2pos not in list(fildat[cantbias][ax1pos].keys()): fildat[cantbias][ax1pos][ax2pos] = [] if ax1pos not in list(temp_gdat.keys()): temp_gdat[ax1pos] = {} if ax2pos not in list(temp_gdat[ax1pos].keys()): temp_gdat[ax1pos][ax2pos] = [[], []] temp_gdat[ax1pos][ax2pos][1] = [[]] * len(lambdas) cfind = len(fildat[cantbias][ax1pos][ax2pos]) fildat[cantbias][ax1pos][ax2pos].append([]) if fil_ind == 0 and plottf: df.diagonalize(date=tfdate, maxfreq=tophatf, plot=True) else: df.diagonalize(date=tfdate, maxfreq=tophatf) if fil_ind == 0: ginds, fund_ind, drive_freq, drive_ind = \ df.get_boolean_cantfilt(ext_cant_drive=ext_cant_drive, ext_cant_ind=ext_cant_ind, \ nharmonics=nharmonics, harms=harms, width=width) datffts, diagdatffts, daterrs, diagdaterrs = \ df.get_datffts_and_errs(ginds, drive_freq, noiseband=noiseband, plot=False, \ diag=diag) drivevec = df.cant_data[drive_ind] mindrive = np.min(drivevec) maxdrive = np.max(drivevec) posvec = np.linspace(mindrive, maxdrive, 500) ones = np.ones_like(posvec) start = time.time() for lambind, yuklambda in enumerate(lambdas): if ax1 == 'x' and ax2 == 'z': newxpos = minsep + (maxthrow - ax1pos) newheight = ax2pos - beadheight elif ax1 =='z' and ax2 == 'x': newxpos = minsep + (maxthrow - ax2pos) newheight = ax1pos - beadheight else: print("Coordinate axes don't make sense for gravity data...") print("Proceeding anyway, but results might be hard to interpret") newxpos = ax1pos newheight = ax2pos if (newxpos < lims[0][0]*1e6) or (newxpos > lims[0][1]*1e6): #print 'skipped x' continue if (newheight < lims[2][0]*1e6) or (newheight > lims[2][1]*1e6): #print 'skipped z' continue pts = np.stack((newxpos*ones, posvec, newheight*ones), axis=-1) gfft = [[], [], []] yukfft = [[], [], []] for resp in [0,1,2]: if (ignoreX and resp == 0) or (ignoreY and resp == 1) or (ignoreZ and resp == 2): gfft[resp] = np.zeros(np.sum(ginds)) yukfft[resp] = np.zeros(np.sum(ginds)) continue if len(temp_gdat[ax1pos][ax2pos][0]): gfft[resp] = temp_gdat[ax1pos][ax2pos][0][resp] else: gforcevec = gfuncs[resp](pts*1e-6) gforcefunc = interp.interp1d(posvec, gforcevec) gforcet = gforcefunc(drivevec) gfft[resp] = np.fft.rfft(gforcet)[ginds] if len(temp_gdat[ax1pos][ax2pos][1][lambind]): yukfft[resp] = temp_gdat[ax1pos][ax2pos][1][lambind][resp] else: yukforcevec = yukfuncs[resp][lambind](pts*1e-6) yukforcefunc = interp.interp1d(posvec, yukforcevec) yukforcet = yukforcefunc(drivevec) yukfft[resp] = np.fft.rfft(yukforcet)[ginds] gfft = np.array(gfft) yukfft = np.array(yukfft) temp_gdat[ax1pos][ax2pos][0] = gfft temp_gdat[ax1pos][ax2pos][1][lambind] = yukfft outdat = (yuklambda, datffts, diagdatffts, daterrs, diagdaterrs, gfft, yukfft) fildat[cantbias][ax1pos][ax2pos][cfind].append(outdat) stop = time.time() #print 'func eval time: ', stop-start return fildat
def plot_spectra_3d(files, ax_to_plot=0, diag=False, colormap='plasma'): '''Makes a cool 3d plot since waterfalls/cascaded plots end up kind being f****d up. ''' res_freqs = [] powers = [] fig = plt.figure(figsize=(7, 5)) ax = fig.gca(projection='3d') ax.get_proj = lambda: np.dot(Axes3D.get_proj(ax), np.diag([1, 1, 0.6, 1])) # fig.suptitle('XYZ Data', fontsize=18) files = files colors = bu.get_color_map(len(files_to_plot), cmap=colormap) i = 0 #colors = ['C0', 'C1', 'C2'] print("Processing %i files..." % len(files)) for fil_ind, fil in enumerate(files): # Display percent completion bu.progress_bar(fil_ind, len(files)) # Load data df = bu.DataFile() if new_trap: df.load_new(fil) else: df.load(fil) df.calibrate_stage_position() if diag: df.diagonalize(maxfreq=lpf, date=tfdate, plot=tf_plot) try: fac = df.conv_facs[ax_to_plot] # * (1.0 / 0.12e-12) except: fac = 1.0 if fullNFFT: NFFT = len(df.pos_data[ax_to_plot]) else: NFFT = userNFFT if diag: psd, freqs = mlab.psd(df.diag_pos_data[ax_to_plot], Fs=df.fsamp, \ NFFT=NFFT, window=window) else: psd, freqs = mlab.psd(df.pos_data[ax_to_plot], Fs=df.fsamp, \ NFFT=NFFT, window=window) inds = (freqs > ylim[0]) * (freqs < ylim[1]) * ( np.sqrt(psd) > zlim[0] * fac_for_resfreq) freqs = freqs[inds] psd = psd[inds] norm = bu.fft_norm(df.nsamp, df.fsamp) new_freqs = np.fft.rfftfreq(df.nsamp, d=1.0 / df.fsamp) xs = np.zeros_like(freqs) + fil_ind if fil_ind in files_to_plot: popt, pcov = bu.fit_damped_osc_amp(df.pos_data[ax_to_plot], fit_band=[10, 2000], \ fsamp=df.fsamp, plot=False) res_freqs.append(popt[1]) color = colors[i] i += 1 ax.plot(xs, np.log10(freqs), np.log10(np.sqrt(psd)), color=color) zlim_actual = (zlim[0] * fac_for_resfreq, zlim[1]) x = np.arange(len(res_freqs)) interpfunc = interpolate.UnivariateSpline(x, res_freqs, k=2) ax.scatter(x, np.log10(res_freqs), zs=np.log10(zlim_actual[0]), \ zdir='z', s=25, c=colors, alpha=1) ax.plot(x, np.log10(interpfunc(x)), zs=np.log10(zlim_actual[0]), \ zdir='z', lw=2, color='k', zorder=1) # ax.grid() if ylim: ax.set_ylim(np.log10(ylim[0]), np.log10(ylim[1])) if zlim: ax.set_zlim(np.log10(zlim_actual[0]), np.log10(zlim_actual[1])) ax.set_xticks([]) ax.set_yticks(np.log10(yticks)) ax.set_yticklabels(yticks) ax.set_zticks(np.log10(zticks)) ax.set_zticklabels(zticklabels) # ax.ticklabel_format(axis='z', style='sci') ax.set_xlabel('Closer to Focus $\\rightarrow$', labelpad=0) ax.set_ylabel('Frequency [Hz]', labelpad=20) ax.set_zlabel('ASD [Arb/$\\sqrt{\\rm Hz}$]', labelpad=15) # if xlim: # ax.set_xlim(*xlim) # if ylim: # ax.set_ylim(*ylim) # if zlim: # ax.set_zlim(*zlim) # fig.tight_layout() ax.view_init(elev=15, azim=-15) fig.tight_layout() fig.subplots_adjust(top=1.35, left=-0.07, right=0.95, bottom=-0.05) plt.show()
def profile(fname, data_column=0, plot=False, nbins=200): df = bu.DataFile() df.load(fname, skip_fpga=True) df.load_other_data() df.calibrate_stage_position() dt = 1.0 / df.fsamp if 'ysweep' in fname: stage_column = 1 if not ysign: sign = 1.0 else: sign = ysign else: stage_column = 0 sign = 1.0 b, a = sig.butter(1, 0.5) if plot: shape = np.shape(df.other_data) for i in range(shape[0]): plt.plot(df.other_data[i, :], label=str(i)) plt.title('Data Columns') plt.legend() plt.tight_layout() plt.figure() for j in range(3): plt.plot(df.cant_data[j, :], label=str(j)) plt.title('Attractor Coordinates') plt.legend() plt.tight_layout() plt.show() input() h = np.mean(df.cant_data[2, :]) h_round = bu.round_sig(h, sig=3) if h_round < 10.0: h_round = bu.round_sig(h_round, sig=2) int_filt = sig.filtfilt(b, a, df.other_data[data_column]) proft = np.gradient(int_filt) # proft = np.gradient(df.other_data[data_column]) #plt.plot(df.other_data[0]) #plt.show() stage_filt = sig.filtfilt(b, a, df.cant_data[stage_column, :]) dir_sign = np.sign(np.gradient(stage_filt)) * sign dir_sign = np.sign(np.gradient(df.cant_data[stage_column])) * sign xvec = df.cant_data[stage_column, :] yvec = (proft - proft * dir_sign) * 0.5 - (proft + proft * dir_sign) * 0.5 # sort_inds = np.argsort(xvec) # b, y, e = bu.spatial_bin(xvec, yvec, dt, nbins=300, nharmonics=300, add_mean=True) b, y, e = bu.rebin(xvec[vec_inds[0]:vec_inds[1]], \ yvec[vec_inds[0]:vec_inds[1]], \ nbins=nbins, plot=False, correlated_errs=True) return b, y, e, h_round
def close_datafile(self): '''Clear the old DataFile class by assigning and empty class to self.df and assuming the python garbage collector will take care of it.''' self.data_closed = True self.df = bu.DataFile()
def weigh_bead_efield(files, colormap='jet', sort='time', file_inds=(0,10000), \ pos=False): '''Loops over a list of file names, loads each file, diagonalizes, then plots the amplitude spectral density of any number of data or cantilever/electrode drive signals INPUTS: files, list of files names to extract data data_axes, list of pos_data axes to plot cant_axes, list of cant_data axes to plot elec_axes, list of electrode_data axes to plot diag, boolean specifying whether to diagonalize OUTPUTS: none, plots stuff ''' files = [(os.stat(path), path) for path in files] files = [(stat.st_ctime, path) for stat, path in files] files.sort(key=lambda x: (x[0])) files = [obj[1] for obj in files] files = files[file_inds[0]:file_inds[1]] #files = files[::10] date = files[0].split('/')[2] charge_file = '/calibrations/charges/' + date if pos: charge_file += '_recharge.charge' else: charge_file += '.charge' q_bead = np.load(charge_file)[0] * constants.elementary_charge print(q_bead / constants.elementary_charge) run_index = 0 masses = [] nfiles = len(files) print("Processing %i files..." % nfiles) eforce = [] power = [] for fil_ind, fil in enumerate(files): #files[56*(i):56*(i+1)]): bu.progress_bar(fil_ind, nfiles) # Load data df = bu.DataFile() try: df.load(fil, load_other=True) except: continue df.calibrate_stage_position() df.calibrate_phase() if fil_ind == 0: init_phi = np.mean(df.zcal) top_elec = mon_fac * np.mean(df.other_data[6]) bot_elec = mon_fac * np.mean(df.other_data[7]) # Synth plugged in negative so just adding instead of subtracting negative Vdiff = V2 + amp_gain * df.synth_settings[0] Vdiff = np.mean(df.electrode_data[2]) - np.mean(df.electrode_data[1]) Vdiff = top_elec - bot_elec force = -(Vdiff / (4.0e-3)) * q_bead force2 = (top_elec * e_top_func(0.0) + bot_elec * e_bot_func(0.0)) * q_bead try: mean_fb = np.mean(df.pos_fb[2]) mean_pow = bits_to_power(mean_fb) except: continue #eforce.append(force) eforce.append(force2) power.append(mean_pow) eforce = np.array(eforce) power = np.array(power) power = power / np.mean(power) inds = np.abs(eforce) < 2e-13 eforce = eforce[inds] power = power[inds] popt, pcov = opti.curve_fit(line, eforce*1e13, power, \ absolute_sigma=False, maxfev=10000) test_vals = np.linspace(np.min(eforce * 1e13), np.max(eforce * 1e13), 100) fit = line(test_vals, *popt) lev_force = -popt[1] / (popt[0] * 1e13) mass = lev_force / (9.806) mass_err = np.sqrt( pcov[0,0] / popt[0]**2 + \ pcov[1,1] / popt[1]**2 + \ np.abs(pcov[0,1]) / np.abs(popt[0]*popt[1]) ) * mass #masses.append(mass) print(mass * 1e12) print(mass_err * 1e12) plt.figure() plt.plot(eforce, power, 'o') plt.xlabel('Elec. Force [N]', fontsize=14) plt.ylabel('Levitation Power [arb]', fontsize=14) plt.tight_layout() plt.plot(test_vals*1e-13, fit, lw=2, color='r', \ label='Implied mass: %0.3f ng' % (mass*1e12)) plt.legend() plt.show()
def weigh_bead_efield(files, elec_ind, pow_ind, colormap='plasma', sort='time',\ file_inds=(0,10000), plot=True, print_res=False, pos=False, \ save_mass=False, new_trap=False, correct_phase_shift=False): '''Loops over a list of file names, loads each file, diagonalizes, then plots the amplitude spectral density of any number of data or cantilever/electrode drive signals INPUTS: files, list of files names to extract data data_axes, list of pos_data axes to plot cant_axes, list of cant_data axes to plot elec_axes, list of electrode_data axes to plot diag, boolean specifying whether to diagonalize OUTPUTS: none, plots stuff ''' date = re.search(r"\d{8,}", files[0])[0] suffix = files[0].split('/')[-2] if new_trap: trap_str = 'new_trap' else: trap_str = 'old_trap' charge_file = '/data/{:s}_processed/calibrations/charges/'.format( trap_str) + date save_filename = '/data/{:s}_processed/calibrations/masses/'.format(trap_str) \ + date + '_' + suffix + '.mass' bu.make_all_pardirs(save_filename) if pos: charge_file += '_recharge.charge' else: charge_file += '.charge' try: nq = np.load(charge_file)[0] found_charge = True except: found_charge = False if not found_charge or manual_charge: user_nq = input('No charge file or manual requested. Guess q: ') nq = int(user_nq) if correct_phase_shift: print('Correcting anomalous phase-shift during analysis.') # nq = -16 print('qbead: {:d} e'.format(int(nq))) q_bead = nq * constants.elementary_charge run_index = 0 masses = [] nfiles = len(files) if not print_res: print("Processing %i files..." % nfiles) all_eforce = [] all_power = [] all_param = [] mass_vec = [] p_ac = [] p_dc = [] e_ac = [] e_dc = [] pressure_vec = [] zamp_avg = 0 zphase_avg = 0 zamp_N = 0 zfb_avg = 0 zfb_N = 0 power_avg = 0 power_N = 0 Nbad = 0 powpsd = [] for fil_ind, fil in enumerate(files): # 15-65 # 4 # if fil_ind == 16 or fil_ind == 4: # continue bu.progress_bar(fil_ind, nfiles) # Load data df = bu.DataFile() try: if new_trap: df.load_new(fil) else: df.load(fil, load_other=True) except Exception: traceback.print_exc() continue try: # df.calibrate_stage_position() df.calibrate_phase() except Exception: traceback.print_exc() continue if ('20181129' in fil) and ('high' in fil): pressure_vec.append(1.5) else: try: pressure_vec.append(df.pressures['pirani']) except Exception: pressure_vec.append(0.0) ### Extract electrode data if new_trap: top_elec = df.electrode_data[1] bot_elec = df.electrode_data[2] else: top_elec = mon_fac * df.other_data[elec_ind] bot_elec = mon_fac * df.other_data[elec_ind + 1] fac = 1.0 if np.std(top_elec) < 0.5 * np.std(bot_elec) \ or np.std(bot_elec) < 0.5 * np.std(top_elec): print( 'Adjusting electric field since only one electrode was digitized.' ) fac = 2.0 nsamp = len(top_elec) zeros = np.zeros(nsamp) voltages = [zeros, top_elec, bot_elec, zeros, \ zeros, zeros, zeros, zeros] efield = bu.trap_efield(voltages, new_trap=new_trap) eforce2 = fac * sign * efield[2] * q_bead tarr = np.arange(0, df.nsamp / df.fsamp, 1.0 / df.fsamp) # fig, axarr = plt.subplots(2,1,sharex=True,figsize=(10,8)) # axarr[0].plot(tarr, top_elec, label='Top elec.') # axarr[0].plot(tarr, bot_elec, label='Bottom elec.') # axarr[0].set_ylabel('Apparent Voltages [V]') # axarr[0].legend(fontsize=12, loc='upper right') # axarr[1].plot(tarr, efield[2]) # axarr[1].set_xlabel('Time [s]') # axarr[1].set_ylabel('Apparent Electric Field [V/m]') # fig.tight_layout() # plt.show() # input() freqs = np.fft.rfftfreq(df.nsamp, d=1.0 / df.fsamp) drive_ind = np.argmax(np.abs(np.fft.rfft(eforce2))) drive_freq = freqs[drive_ind] zamp = np.abs( np.fft.rfft(df.zcal) * bu.fft_norm(df.nsamp, df.fsamp) * \ np.sqrt(freqs[1] - freqs[0]) ) zamp *= (1064.0e-9 / 2.0) * (1.0 / (2.9 * np.pi)) zphase = np.angle(np.fft.rfft(df.zcal)) zamp_avg += zamp[drive_ind] zamp_N += 1 #plt.loglog(freqs, zamp) #plt.scatter(freqs[drive_ind], zamp[drive_ind], s=10, color='r') #plt.show() zfb = np.abs(np.fft.rfft(df.pos_fb[2]) * bu.fft_norm(df.nsamp, df.fsamp) * \ np.sqrt(freqs[1] - freqs[0]) ) zfb_avg += zfb[drive_ind] zfb_N += 1 #eforce2 = (top_elec * e_top_func(0.0) + bot_elec * e_bot_func(0.0)) * q_bead if noise: e_dc.append(np.mean(eforce2)) e_ac_val = np.abs(np.fft.rfft(eforce2))[drive_ind] e_ac.append(e_ac_val * bu.fft_norm(df.nsamp, df.fsamp) \ * np.sqrt(freqs[1] - freqs[0]) ) zphase_avg += (zphase[drive_ind] - np.angle(eforce2)[drive_ind]) if np.sum(df.power) == 0.0: current = np.abs(df.other_data[pow_ind]) / trans_gain else: fac = 1e-6 current = fac * df.power / trans_gain power = current / pd_gain power = power / line_filter_trans power = power / bs_fac power_avg += np.mean(power) power_N += 1 if noise: p_dc.append(np.mean(power)) p_ac_val = np.abs(np.fft.rfft(power))[drive_ind] p_ac.append(p_ac_val * bu.fft_norm(df.nsamp, df.fsamp) \ * np.sqrt(freqs[1] - freqs[0]) ) fft1 = np.fft.rfft(power) fft2 = np.fft.rfft(df.pos_fb[2]) if not len(powpsd): powpsd = np.abs(fft1) Npsd = 1 else: powpsd += np.abs(fft1) Npsd += 1 # freqs = np.fft.rfftfreq(df.nsamp, d=1.0/df.fsamp) # plt.loglog(freqs, np.abs(np.fft.rfft(eforce2))) # plt.loglog(freqs, np.abs(np.fft.rfft(power))) # plt.show() # input() # fig, axarr = plt.subplots(2,1,sharex=True,figsize=(10,8)) # axarr[0].plot(tarr, power) # axarr[0].set_ylabel('Measured Power [Arb.]') # axarr[1].plot(tarr, power) # axarr[1].set_xlabel('Time [s]') # axarr[1].set_ylabel('Measured Power [Arb.]') # bot, top = axarr[1].get_ylim() # axarr[1].set_ylim(1.05*bot, 0) # fig.tight_layout() # plt.show() # input() bins, dat, errs = bu.spatial_bin(eforce2, power, nbins=200, width=0.0, #width=0.05, \ dt=1.0/df.fsamp, harms=[1], \ add_mean=True, verbose=False, \ correct_phase_shift=correct_phase_shift, \ grad_sign=0) dat = dat / np.mean(dat) #plt.plot(bins, dat, 'o') #plt.show() popt, pcov = opti.curve_fit(line, bins*1.0e13, dat, \ absolute_sigma=False, maxfev=10000) test_vals = np.linspace(np.min(eforce2 * 1.0e13), np.max(eforce2 * 1.0e13), 100) fit = line(test_vals, *popt) lev_force = -popt[1] / (popt[0] * 1.0e13) mass = lev_force / (9.806) #umass = ulev_force / 9.806 #lmass = llev_force / 9.806 if mass > upper_outlier or mass < lower_outlier: print('Crazy mass: {:0.2f} pg.... ignoring'.format(mass * 1e15)) # fig, axarr = plt.subplots(3,1,sharex=True) # axarr[0].plot(eforce2) # axarr[1].plot(power) # axarr[2].plot(df.pos_data[2]) # ylims = axarr[1].get_ylim() # axarr[1].set_ylim(ylims[0], 0) # plt.show() continue all_param.append(popt) all_eforce.append(bins) all_power.append(dat) mass_vec.append(mass) if noise: print('DC power: ', np.mean(p_dc), np.std(p_dc)) print('AC power: ', np.mean(p_ac), np.std(p_ac)) print('DC field: ', np.mean(e_dc), np.std(e_dc)) print('AC field: ', np.mean(e_ac), np.std(e_ac)) return #plt.plot(mass_vec) mean_popt = np.mean(all_param, axis=0) mean_lev = np.mean(mass_vec) * 9.806 plot_vec = np.linspace(np.min(all_eforce), mean_lev, 100) if plot: fig = plt.figure(dpi=200, figsize=(6, 4)) ax = fig.add_subplot(111) ### Plot force (in pN / g = pg) vs power plt.plot(np.array(all_eforce).flatten()[::5]*1e15*(1.0/9.806), \ np.array(all_power).flatten()[::5], \ 'o', alpha = 0.5) #for params in all_param: # plt.plot(plot_vec, line(plot_vec, params[0]*1e13, params[1]), \ # '--', color='r', lw=1, alpha=0.05) plt.plot(plot_vec*1e12*(1.0/9.806)*1e3, \ line(plot_vec, mean_popt[0]*1e13, mean_popt[1]), \ '--', color='k', lw=2, \ label='Implied mass: %0.1f pg' % (np.mean(mass_vec)*1e15)) left, right = ax.get_xlim() # ax.set_xlim((left, 500)) ax.set_xlim(*xlim) bot, top = ax.get_ylim() ax.set_ylim((0, top)) plt.legend() plt.xlabel('Applied electrostatic force/$g$ (pg)') plt.ylabel('Optical power (arb. units)') plt.grid() plt.tight_layout() if save_example: fig.savefig(example_filename) fig.savefig(example_filename[:-4] + '.pdf') fig.savefig(example_filename[:-4] + '.svg') x_plotvec = np.array(all_eforce).flatten() y_plotvec = np.array(all_power).flatten() yresid = (y_plotvec - line(x_plotvec, mean_popt[0] * 1e13, mean_popt[1])) / y_plotvec plt.figure(dpi=200, figsize=(3, 2)) plt.hist(yresid * 100, bins=30) plt.legend() plt.xlabel('Resid. Power [%]') plt.ylabel('Counts') plt.grid() plt.tight_layout() plt.figure(dpi=200, figsize=(3, 2)) plt.plot(x_plotvec * 1e15, yresid * 100, 'o') plt.legend() plt.xlabel('E-Force [pN]') plt.ylabel('Resid. Pow. [%]') plt.grid() plt.tight_layout() derpfig = plt.figure(dpi=200, figsize=(3, 2)) #derpfig.patch.set_alpha(0.0) plt.hist(np.array(mass_vec) * 1e15, bins=10) plt.xlabel('Mass (pg)') plt.ylabel('Count') plt.grid() #plt.title('Implied Masses, Each from 50s Integration') #plt.xlim(0.125, 0.131) plt.tight_layout() if save_example: derpfig.savefig(example_filename[:-4] + '_hist.png') derpfig.savefig(example_filename[:-4] + '_hist.pdf') derpfig.savefig(example_filename[:-4] + '_hist.svg') plt.show() final_mass = np.mean(mass_vec) final_err_stat = 0.5 * np.std(mass_vec) #/ np.sqrt(len(mass_vec)) final_err_sys = np.sqrt((0.015**2 + 0.01**2) * final_mass**2) final_pressure = np.mean(pressure_vec) if save_mass: save_arr = [final_mass, final_err_stat, final_err_sys] np.save(open(save_filename, 'wb'), save_arr) print('Bad Files: %i / %i' % (Nbad, nfiles)) if print_res: gresid_fac = (2.0 * np.pi * freqs[drive_ind])**2 / 9.8 print(' mass [pg]: {:0.1f}'.format(final_mass * 1e15)) print(' st.err [pg]: {:0.2f}'.format(final_err_stat * 1e15)) print(' sys.err [pg]: {:0.2f}'.format(final_err_sys * 1e15)) print(' qbead [e]: {:d}'.format( int(round(q_bead / constants.elementary_charge)))) print(' P [mbar]: {:0.2e}'.format(final_pressure)) print(' <P> [arb]: {:0.2e}'.format(power_avg / power_N)) print(' zresid [g]: {:0.3e}'.format( (zamp_avg / zamp_N) * gresid_fac)) print(' zphase [rad]: {:0.3e}'.format(zphase_avg / zamp_N)) print(' zfb [arb]: {:0.3e}'.format(zfb_avg / zfb_N)) outarr = [ final_mass*1e15, final_err_stat*1e15, final_err_sys*1e15, \ q_bead/constants.elementary_charge, \ final_pressure, power_avg / power_N, \ (zamp_avg / zamp_N) * gresid_fac, \ zphase_avg / zamp_N, zfb_avg / zfb_N ] return outarr else: scaled_params = np.array(all_param) scaled_params[:, 0] *= 1e13 outdic = {'eforce': all_eforce, 'power': all_power, \ 'linear_fit_params': scaled_params, \ 'ext_masses': mass_vec} return outdic
def analyze_background(self, data_axes=[0,1,2], lpf=2500, \ diag=False, colormap='jet', \ file_inds=(0,10000), unwrap=False, \ harms_to_track = [1, 2, 3], \ ext_cant_drive=False, ext_cant_ind=0, \ plot_first_drive=False, sub_cant_phase=True, \ progstr=''): '''Loops over a list of file names, loads each file, diagonalizes, then plots the amplitude spectral density of any number of data or cantilever/electrode drive signals INPUTS: files, list of files names to extract data data_axes, list of pos_data axes to plot ax_labs, dict with labels for plotted axes diag, bool specifying whether to diagonalize unwrap, bool to unwrap phase of background harms, harmonics to label in ASD OUTPUTS: none, generates class attributes ''' files = bu.sort_files_by_timestamp(self.relevant_files) files = files[file_inds[0]:file_inds[1]] nfreq = len(harms_to_track) nax = len(data_axes) nfiles = len(files) colors = bu.get_color_map(nfiles, cmap=colormap) avg_asd = [[]] * nax diag_avg_asd = [[]] * nax Nasds = [[]] * nax amps = np.zeros((nax, nfreq, nfiles)) amp_errs = np.zeros((nax, nfreq, nfiles)) phases = np.zeros((nax, nfreq, nfiles)) phase_errs = np.zeros((nax, nfreq, nfiles)) temps = np.zeros((2, nfiles)) times = np.zeros(nfiles) print("Processing %i files..." % nfiles) for fil_ind, fil in enumerate(files): color = colors[fil_ind] # Display percent completion bu.progress_bar(fil_ind, nfiles, suffix=progstr) # Load data df = bu.DataFile() df.load(fil) try: temps[0, fil_ind] = df.temps[0] temps[1, fil_ind] = df.temps[1] except: temps[:, fil_ind] = 0.0 if fil_ind == 0: self.fsamp = df.fsamp init_time = df.time times[0] = 0.0 else: times[fil_ind] = (df.time - init_time).total_seconds() df.calibrate_stage_position() #df.high_pass_filter(fc=1) #df.detrend_poly() df.diagonalize(maxfreq=lpf, interpolate=False) Nsamp = len(df.pos_data[0]) if len(harms_to_track): harms = harms_to_track else: harms = [1] ginds, driveind, drive_freq, drive_ax = \ df.get_boolean_cantfilt(ext_cant_drive=ext_cant_drive, \ ext_cant_ind=ext_cant_ind, \ nharmonics=10, harms=harms) if fil_ind == 0: if plot_first_drive: df.plot_cant_asd(drive_ax) freqs = np.fft.rfftfreq(Nsamp, d=1.0 / df.fsamp) bin_sp = freqs[1] - freqs[0] datfft, diagdatfft, daterr, diagdaterr = \ df.get_datffts_and_errs(ginds, drive_freq, plot=False) harm_freqs = freqs[ginds] for axind, ax in enumerate(data_axes): print(ax, df.conv_facs[ax]) asd = np.abs( np.fft.rfft(df.pos_data[ax]) ) * \ bu.fft_norm(Nsamp, df.fsamp) * df.conv_facs[ax] diag_asd = np.abs( np.fft.rfft(df.diag_pos_data[ax]) ) * \ bu.fft_norm(Nsamp, df.fsamp) if not len(avg_asd[axind]): avg_asd[axind] = asd diag_avg_asd[axind] = diag_asd Nasds[axind] = 1 else: avg_asd[axind] += asd diag_avg_asd[axind] += diag_asd Nasds[axind] += 1 for freqind, freq in enumerate(harm_freqs): phase = np.angle(datfft[axind][freqind]) if sub_cant_phase: cantfft = np.fft.rfft(df.cant_data[drive_ax]) cantphase = np.angle(cantfft[driveind]) phases[axind][freqind][fil_ind] = phase - cantphase else: phases[axind][freqind][fil_ind] = phase sig_re = daterr[axind][freqind] / np.sqrt(2) sig_im = np.copy(sig_re) im = np.imag(datfft[axind][freqind]) re = np.real(datfft[axind][freqind]) phase_var = np.mean((im**2 * sig_re**2 + re**2 * sig_im**2) / \ (re**2 + im**2)**2) phase_errs[axind][freqind][fil_ind] = np.sqrt(phase_var) amps[axind][freqind][fil_ind] = np.abs(datfft[axind][freqind] * \ np.sqrt(bin_sp) * \ bu.fft_norm(Nsamp, df.fsamp)) amp_errs[axind][freqind][fil_ind] = daterr[axind][freqind] * \ np.sqrt(bin_sp) * \ bu.fft_norm(Nsamp, df.fsamp) for axind, ax in enumerate(data_axes): avg_asd[axind] *= (1.0 / Nasds[axind]) diag_avg_asd[axind] *= (1.0 / Nasds[axind]) self.freqs = freqs self.ginds = ginds self.avg_asd = avg_asd self.diag_avg_asd = diag_avg_asd self.amps = amps self.phases = phases self.amp_errs = amp_errs self.phase_errs = phase_errs self.temps = temps self.times = times
def plot_many_spectra(files, data_axes=[0,1,2], cant_axes=[], elec_axes=[], other_axes=[], \ fb_axes=[], plot_power=False, diag=True, colormap='plasma', \ sort='time', file_inds=(0,10000)): '''Loops over a list of file names, loads each file, diagonalizes, then plots the amplitude spectral density of any number of data or cantilever/electrode drive signals INPUTS: files, list of files names to extract data data_axes, list of pos_data axes to plot cant_axes, list of cant_data axes to plot elec_axes, list of electrode_data axes to plot diag, boolean specifying whether to diagonalize OUTPUTS: none, plots stuff ''' if diag: dfig, daxarr = plt.subplots(len(data_axes),2,sharex=True,sharey=True, \ figsize=figsize) else: dfig, daxarr = plt.subplots(len(data_axes),1,sharex=True,sharey=True, \ figsize=figsize) dfig.suptitle('XYZ Data', fontsize=18) if len(cant_axes): cfig, caxarr = plt.subplots(len(data_axes), 1, sharex=True, sharey=True) if len(cant_axes) == 1: caxarr = [caxarr] cfig.suptitle('Attractor Data', fontsize=18) if len(elec_axes): efig, eaxarr = plt.subplots(len(elec_axes), 1, sharex=True, sharey=True) if len(elec_axes) == 1: eaxarr = [eaxarr] efig.suptitle('Electrode Data', fontsize=18) if len(other_axes): ofig, oaxarr = plt.subplots(len(other_axes), 1, sharex=True, sharey=True) if len(other_axes) == 1: oaxarr = [oaxarr] ofig.suptitle('Other Data', fontsize=18) if len(fb_axes): fbfig, fbaxarr = plt.subplots(len(fb_axes),1,sharex=True,sharey=True, \ figsize=figsize) if len(fb_axes) == 1: fbaxarr = [fbaxarr] fbfig.suptitle('Feedback Data', fontsize=18) if plot_power: pfig, paxarr = plt.subplots(2, 1, sharex=True, figsize=(6, 6)) pfig.suptitle('Power/Power Feedback Data', fontsize=18) kludge_fig, kludge_ax = plt.subplots(1, 1) files = files[file_inds[0]:file_inds[1]] if step10: files = files[::10] if invert_order: files = files[::-1] colors = bu.get_color_map(len(files), cmap=colormap) #colors = ['C0', 'C1', 'C2'] old_per = 0 print("Processing %i files..." % len(files)) for fil_ind, fil in enumerate(files): color = colors[fil_ind] # Display percent completion bu.progress_bar(fil_ind, len(files)) # Load data df = bu.DataFile() if new_trap: df.load_new(fil) else: df.load(fil) if len(other_axes): df.load_other_data() df.calibrate_stage_position() #df.high_pass_filter(fc=1) #df.detrend_poly() #plt.figure() #plt.plot(df.pos_data[0]) #plt.show() if cascade: cascade_scale = (cascade_fac)**fil_ind else: cascade_scale = 1.0 freqs = np.fft.rfftfreq(len(df.pos_data[0]), d=1.0 / df.fsamp) if diag: df.diagonalize(maxfreq=lpf, date=tfdate, plot=tf_plot) if fil_ind == 0 and len(cant_axes): drivepsd = np.abs(np.fft.rfft(df.cant_data[drive_ax])) driveind = np.argmax(drivepsd[1:]) + 1 drive_freq = freqs[driveind] for axind, ax in enumerate(data_axes): try: fac = cascade_scale * df.conv_facs[ax] # * (1.0 / 0.12e-12) except: fac = cascade_scale if fullNFFT: NFFT = len(df.pos_data[ax]) else: NFFT = userNFFT psd, freqs = mlab.psd(df.pos_data[ax], Fs=df.fsamp, \ NFFT=NFFT, window=window) norm = bu.fft_norm(df.nsamp, df.fsamp) new_freqs = np.fft.rfftfreq(df.nsamp, d=1.0 / df.fsamp) #fac = 1.0 kludge_fac = 1.0 #kludge_fac = 1.0 / np.sqrt(10) if diag: dpsd, dfreqs = mlab.psd(df.diag_pos_data[ax], Fs=df.fsamp, \ NFFT=NFFT, window=window) kludge_ax.loglog(freqs, np.sqrt(dpsd) *kludge_fac, color='C'+str(axind), \ label=posdic[axind]) kludge_ax.set_ylabel( '$\sqrt{\mathrm{PSD}}$ $[\mathrm{N}/\sqrt{\mathrm{Hz}}]$') kludge_ax.set_xlabel('Frequency [Hz]') # daxarr[axind,0].loglog(new_freqs, fac*norm*np.abs(np.fft.rfft(df.pos_data[ax]))*kludge_fac, color='k', label='np.fft with manual normalization') daxarr[axind, 0].loglog(freqs, np.sqrt(psd) * fac * kludge_fac, color=color, label=df.fname) #'mlab.psd') daxarr[axind, 0].grid(alpha=0.5) daxarr[axind, 1].loglog( new_freqs, norm * np.abs(np.fft.rfft(df.diag_pos_data[ax])) * kludge_fac, color='k') daxarr[axind, 1].loglog(freqs, np.sqrt(dpsd) * kludge_fac, color=color) daxarr[axind, 1].grid(alpha=0.5) daxarr[axind, 0].set_ylabel( '$\sqrt{\mathrm{PSD}}$ $[\mathrm{N}/\sqrt{\mathrm{Hz}}]$') if ax == data_axes[-1]: daxarr[axind, 0].set_xlabel('Frequency [Hz]') daxarr[axind, 1].set_xlabel('Frequency [Hz]') else: # daxarr[axind].loglog(new_freqs, norm*np.abs(np.fft.rfft(df.pos_data[ax])), color='k', label='np.fft with manual normalization') daxarr[axind].loglog(freqs, np.sqrt(psd) * fac, color=color, label=df.fname) #'mlab.psd') daxarr[axind].grid(alpha=0.5) daxarr[axind].set_ylabel( '$\\sqrt{\mathrm{PSD}}$ $[\\mathrm{Arb}/\\sqrt{\mathrm{Hz}}]$' ) #daxarr[axind].set_ylabel('$\sqrt{\mathrm{PSD}}$ $[\mathrm{N}/\sqrt{\mathrm{Hz}}]$') if ax == data_axes[-1]: daxarr[axind].set_xlabel('Frequency [Hz]') if len(fb_axes): for axind, ax in enumerate(fb_axes): fb_psd, freqs = mlab.psd(df.pos_fb[ax], Fs=df.fsamp, \ NFFT=NFFT, window=window) fbaxarr[axind].loglog(freqs, np.sqrt(fb_psd) * fac, color=color) fbaxarr[axind].set_ylabel('$\\sqrt{\\mathrm{PSD}}$') if len(cant_axes): for axind, ax in enumerate(cant_axes): psd, freqs = mlab.psd(df.cant_data[ax], Fs=df.fsamp, \ NFFT=NFFT, window=window) caxarr[axind].loglog(freqs, np.sqrt(psd), color=color) caxarr[axind].set_ylabel('$\\sqrt{\\mathrm{PSD}}$') if len(elec_axes): for axind, ax in enumerate(elec_axes): psd, freqs = mlab.psd(df.electrode_data[ax], Fs=df.fsamp, \ NFFT=NFFT, window=window) eaxarr[axind].loglog(freqs, np.sqrt(psd), color=color) eaxarr[axind].set_ylabel('$\\sqrt{\\mathrm{PSD}}$') if len(other_axes): for axind, ax in enumerate(other_axes): #ax = ax - 3 psd, freqs = mlab.psd(df.other_data[ax], Fs=df.fsamp, \ NFFT=NFFT, window=window) oaxarr[axind].loglog(freqs, np.sqrt(psd), color=color) oaxarr[axind].set_ylabel('$\\sqrt{\\mathrm{PSD}}$') if plot_power: psd, freqs = mlab.psd(df.power, Fs=df.fsamp, \ NFFT=NFFT, window=window) psd_fb, freqs_fb = mlab.psd(df.power_fb, Fs=df.fsamp, \ NFFT=NFFT, window=window) paxarr[0].loglog(freqs, np.sqrt(psd), color=color) paxarr[1].loglog(freqs_fb, np.sqrt(psd_fb), color=color) for axind in [0, 1]: paxarr[axind].set_ylabel('$\\sqrt{\\mathrm{PSD}}$') if filename_labels: daxarr[0].legend(fontsize=10) if len(fb_axes): fbaxarr[0].legend(fontsize=10) #daxarr[0].set_xlim(0.5, 25000) if diag: derp_ax = daxarr[0, 0] else: derp_ax = daxarr[0] # derp_ax.legend(fontsize=10) if len(ylim): derp_ax.set_ylim(*ylim) kludge_ax.set_ylim(*ylim) if len(xlim): derp_ax.set_xlim(*xlim) kludge_ax.set_xlim(1, 500) dfig.tight_layout() dfig.subplots_adjust(top=0.91) kludge_ax.grid() kludge_ax.legend() kludge_fig.tight_layout() if plot_power: paxarr[-1].set_xlabel('Frequency [Hz]') pfig.tight_layout() pfig.subplots_adjust(top=0.91) if len(cant_axes): caxarr[-1].set_xlabel('Frequency [Hz]') cfig.tight_layout() cfig.subplots_adjust(top=0.91) if len(elec_axes): eaxarr[-1].set_xlabel('Frequency [Hz]') efig.tight_layout() efig.subplots_adjust(top=0.91) if len(other_axes): oaxarr[-1].set_xlabel('Frequency [Hz]') ofig.tight_layout() ofig.subplots_adjust(top=0.91) if len(fb_axes): fbaxarr[-1].set_xlabel('Frequency [Hz]') fbfig.tight_layout() fbfig.subplots_adjust(top=0.91) if savefigs: plt.savefig(title_pre + '.png') daxarr[0].set_xlim(2000, 25000) plt.tight_layout() plt.savefig(title_pre + '_zoomhf.png') daxarr[0].set_xlim(1, 80) plt.tight_layout() plt.savefig(title_pre + '_zoomlf.png') daxarr[0].set_xlim(0.5, 25000) if not savefigs: plt.show()
def fit_monochromatic_line(files, data_axes=[0,1], drive_axes=[6], diag=True, \ minfreq=2000, maxfreq=8000, pickfirst=True, \ colormap='jet', sort='time', file_inds=(0,10000), \ dirlengths=[]): '''Loops over a list of file names, loads each file, diagonalizes, then plots the amplitude spectral density of any number of data or cantilever/electrode drive signals INPUTS: files, list of files names to extract data data_axes, list of pos_data axes to plot diag, boolean specifying whether to diagonalize colormap, matplotlib colormap string for sort sort, sorting key word file_inds, indices for min and max file OUTPUTS: none, plots stuff ''' files = [(os.stat(path), path) for path in files] files = [(stat.st_ctime, path) for stat, path in files] files.sort(key = lambda x: (x[0])) files = [obj[1] for obj in files] files = files[file_inds[0]:file_inds[1]] if step10: files = files[::10] if invert_order: files = files[::-1] times = [] peak_pos = [] drive_pos = [] errs = [] drive_errs = [] colors = bu.get_color_map(len(files), cmap=colormap) bad_inds = [] oldtime = 0 old_per = 0 print(files[-1]) print("Processing %i files..." % len(files)) print("Percent complete: ") for fil_ind, fil in enumerate(files): # Display percent completion per = int(100. * float(fil_ind) / float(len(files)) ) if per > old_per: print(old_per, end=' ') sys.stdout.flush() old_per = per if fil in computed_freq_dict and not recompute: soln = computed_freq_dict[fil] times.append(soln[0]) peak_pos.append(soln[1]) errs.append(soln[2]) drive_pos.append(soln[3]) drive_errs.append(soln[4]) old = soln[1] old_drive = soln[3] continue else: newsoln = [0, 0, 0, 0, 0] # Load data df = bu.DataFile() try: df.load(fil) except: continue if len(drive_axes) > 0: df.load_other_data() ctime = time.mktime(df.time.timetuple()) times.append(ctime) newsoln[0] = ctime cpos = [] errvals = [] for axind, ax in enumerate(data_axes): #fac = df.conv_facs[ax] if fullNFFT: NFFT = len(df.pos_data[ax]) else: NFFT = userNFFT psd, freqs = mlab.psd(df.pos_data[ax], Fs=df.fsamp, NFFT=NFFT) fitbool = (freqs > minfreq) * (freqs < maxfreq) maxval = np.max(psd[fitbool]) delta = delta_per*maxval peaks = pdet.peakdetect(psd[fitbool], lookahead=lookahead, delta=delta) pos_peaks = peaks[0] neg_peaks = peaks[1] if plot_peaks: for peakind, pos_peak in enumerate(pos_peaks): try: neg_peak = neg_peaks[peakind] except: continue plt.loglog(freqs[fitbool][pos_peak[0]], pos_peak[1], 'x', color='r') plt.loglog(freqs[fitbool][neg_peak[0]], neg_peak[1], 'x', color='b') plt.loglog(freqs[fitbool], psd[fitbool]) plt.show() np_pos_peaks = np.array(pos_peaks) try: if fil_ind == 0: ucutoff = 100000 lcutoff = 0 else: ucutoff = (1.0 + percent_band) * old lcutoff = (1.0 - percent_band) * old vals = [] for peakind, peak in enumerate(pos_peaks): newval = freqs[fitbool][peak[0]] if newval > ucutoff: continue if newval < lcutoff: continue vals.append(newval) cpos.append(np.mean(vals)) for val in vals: errvals.append(val) except: print('FAILED') continue drive_cpos = [] drive_errvals = [] for axind, ax in enumerate(drive_axes): ax = ax - 3 if fullNFFT: NFFT = len(df.other_data[ax]) else: NFFT = userNFFT psd, freqs = mlab.psd(df.other_data[ax], Fs=df.fsamp, NFFT=NFFT) fitbool = (freqs > minfreq) * (freqs < maxfreq) maxval = np.max(psd[fitbool]) delta = delta_per*maxval peaks = pdet.peakdetect(psd[fitbool], lookahead=lookahead, delta=delta) pos_peaks = peaks[0] neg_peaks = peaks[1] np_pos_peaks = np.array(pos_peaks) if plot_drive_peaks: for peakind, pos_peak in enumerate(pos_peaks): try: neg_peak = neg_peaks[peakind] except: continue plt.loglog(freqs[fitbool][pos_peak[0]], pos_peak[1], 'x', color='r') plt.loglog(freqs[fitbool][neg_peak[0]], neg_peak[1], 'x', color='b') plt.loglog(freqs[fitbool], psd[fitbool]) plt.show() try: maxind = np.argmax(np_pos_peaks[:,1]) maxpeak = pos_peaks[maxind] vals = [] if maxpeak[1] < np.mean(psd[fitbool]) * (1.0 / drive_thresh): vals.append(np.nan) else: vals.append(freqs[fitbool][maxpeak[0]]) #for peakind, peak in enumerate(pos_peaks): # if peak[0] < 1e-2: # continue # newval = freqs[fitbool][peak[0]] # if newval > ucutoff: # continue # if newval < lcutoff: # continue # # vals.append(newval) drive_cpos.append(np.mean(vals)) for val in vals: drive_errvals.append(val) except: print('FAILED DRIVE ANALYSIS') continue freqval = np.mean(cpos) errval = np.std(errvals) drive_freqval = np.mean(drive_cpos) drive_errval = np.std(drive_errvals) if len(cpos) < 0: bad_inds.append(fil_ind) else: peak_pos.append(freqval) drive_pos.append(drive_freqval) errs.append(errval) drive_errs.append(drive_errval) old = np.mean(cpos) old_drive = np.mean(drive_cpos) newsoln[1] = freqval newsoln[2] = errval newsoln[3] = drive_freqval newsoln[4] = drive_errval oldtime = ctime computed_freq_dict[fil] = newsoln times2 = np.delete(times, bad_inds) times2 = times2 - np.min(times) peak_pos = np.array(peak_pos) drive_pos = np.array(drive_pos) sortinds = np.argsort(times2) times2 = times2[sortinds] peak_pos = peak_pos[sortinds] drive_pos = drive_pos[sortinds] times2 = np.array(times2) peak_pos = np.array(peak_pos) drive_pos = np.array(drive_pos) bad_inds = np.array(bad_inds) max_hours = np.max( times2*(1.0/3600) ) plot_ind = np.argmin(np.abs(times2*(1.0/3600) - (max_hours - plot_lastn_hours) ) ) if not plot_together: fig, ax = plt.subplots(2,1,figsize=(10,10), sharex=True, sharey=True) elif plot_together: fig, ax = plt.subplots(1,1,figsize=(10,5), sharex=True, sharey=True) ax = [ax] ax[0].errorbar(times2[plot_ind:]*(1.0/3600), peak_pos[plot_ind:], yerr=errs[plot_ind:], fmt='o', \ color='C0', label='Bead Rotation') if plot_together: ax[0].errorbar(times2[plot_ind:]*(1.0/3600), drive_pos[plot_ind:], \ yerr=drive_errs[plot_ind:], fmt='o', alpha=0.15, \ color='C1', label='Drive') elif not plot_together: ax[1].errorbar(times2[plot_ind:]*(1.0/3600), drive_pos[plot_ind:], \ yerr=drive_errs[plot_ind:], fmt='o', color='C1', label='Drive') if logtime: ax[0].set_xscale("log") if not plot_together: ax[1].set_xscale("log") if not plot_together: ax[1].set_xlabel('Elapsed Time [hrs]', fontsize=14) elif plot_together: ax[0].set_xlabel('Elapsed Time [hrs]', fontsize=14) ax[0].set_ylabel('Rotation Frequency [Hz]', fontsize=14) if not plot_together: ax[1].set_ylabel('Rotation Frequency [Hz]', fontsize=14) plt.setp(ax[0].get_xticklabels(), fontsize=14, visible = True) plt.setp(ax[0].get_yticklabels(), fontsize=14, visible = True) if not plot_together: plt.setp(ax[1].get_xticklabels(), fontsize=14, visible = True) plt.setp(ax[1].get_yticklabels(), fontsize=14, visible = True) ax[0].yaxis.grid(which='major', color='k', linestyle='--', linewidth=0.5) ax[0].xaxis.grid(which='major', color='k', linestyle='--', linewidth=0.5) if not plot_together: ax[1].yaxis.grid(which='major', color='k', linestyle='--', linewidth=0.5) ax[1].xaxis.grid(which='major', color='k', linestyle='--', linewidth=0.5) label_keys = list(dirmarkers.keys()) plot_first = max_hours <= plot_lastn_hours if field_on_at_beginning and plot_first: ax[0].axvline(x=times2[0], lw=2, label='Field On', color='r', ls='-') if len(dirlengths) != 0: oldlength = 0 for dirind, length in enumerate(dirlengths): oldlength += length tlength = oldlength - np.sum(bad_inds < oldlength) if tlength < plot_ind: continue if dirind+2 in label_keys: ax[0].axvline(x=times2[tlength]*(1.0/3600), lw=2, label=dirmarkers[dirind+2][0], \ color=dirmarkers[dirind+2][1], ls=dirmarkers[dirind+2][2]) ax[0].legend() plt.tight_layout() pickle.dump(computed_freq_dict, open(computed_freq_path, 'wb')) plt.show()
def get_profile(self, fname, nbins=300, plot_raw_data=False): df = bu.DataFile() df.load_new(fname) df.calibrate_stage_position() dt = 1.0 / df.fsamp if '_Y_' in fname: stage_column = 1 if 'left' in fname: sign = -1.0 elif 'right' in fname: sign = 1.0 else: sign = -1.0 else: stage_column = 0 sign = 1.0 if plot_raw_data: plt.plot(np.sum(df.amp[:4], axis=0)) plt.figure() for j in range(3): plt.plot(df.cant_data[j, :], label=str(j)) plt.legend() plt.show() h = np.mean(df.cant_data[2, :]) h_round = bu.round_sig(h, sig=2) if h_round < 10.0: h_round = bu.round_sig(h_round, sig=1) if use_quad_sum: sig = np.sum(df.amp[:4], axis=0) else: sig_hf = df.other_data sig_ds = signal.resample(sig_hf, len(df.cant_data[stage_column]), window=None) sig = -1.0 * sig_ds + np.max(sig_ds) # plt.plot(sig) # plt.show() # input() proft = np.gradient(sig) dir_sign = np.sign(np.gradient(df.cant_data[stage_column])) * sign xvec = df.cant_data[stage_column, :] yvec = (proft - proft * dir_sign) * 0.5 - (proft + proft * dir_sign) * 0.5 b_int, y_int, e_int = bu.spatial_bin(xvec, sig, dt, nbins=nbins,\ nharmonics=300, \ add_mean=True, plot=False) b, y, e = bu.spatial_bin(xvec, yvec, dt, nbins=nbins, nharmonics=300, \ add_mean=True, plot=False) self.profile = [b, y, e] self.integral = [b_int, y_int, e_int] self.cant_height = h_round self.prof_dx = np.abs(self.profile[0][1] - self.profile[0][0]) self.int_dx = np.abs(self.integral[0][1] - self.integral[0][0]) result = {} result['profile'] = self.profile result['integral'] = self.integral result['height'] = self.cant_height return result
def get_force_curve_dictionary(files, ax1='x', ax2='z', fullax1=True, fullax2=True, \ ax1val=0, ax2val=0, spacing=1e-6, diag=False): '''Loops over a list of file names, loads each file, diagonalizes, computes force v position and then closes then discards the raw data to avoid filling memory. Returns the result as a nested dictionary with the first level of keys the ax1 positions and the second level of keys the ax2 positions INPUTS: files, list of files names to extract data ax1, first axis in output array ax2, second axis in output array fullax1, boolean specifying to loop over all values of ax1 fullax2, boolean specifying to loop over all values of ax2 ax1val, if not fullax1 -> value to keep ax2val, if not fullax2 -> value to keep spacing, spacing around ax1val or ax2val to keep diag, boolean specifying whether to diagonalize OUTPUTS: outdic, ouput dictionary with the following indexing outdic[ax1pos][ax2pos][resp(0,1,2)][bins(0) or dat(1)] ax1pos and ax2pos are dictionary keys, resp and bins/dat are array indices (native python lists) diagoutdic, if diag=True second dictionary with diagonalized data ''' if len(files) == 0: print("No Files Found!!") return ### Do inital looping over files to concatenate data at the same ### heights and separations force_curves = {} if diag: diag_force_curves = {} old_per = 0 print() print(os.path.dirname(files[0])) print("Processing %i files" % len(files)) print("Percent complete: ") for fil_ind, fil in enumerate(files): bu.progress_bar(fil_ind, len(files)) # Display percent completion #per = int(100. * float(fil_ind) / float(len(files)) ) #if per > old_per: # print old_per, # sys.stdout.flush() # old_per = per # Load data df = bu.DataFile() df.load(fil) df.calibrate_stage_position() # Pick out height and separation ax1pos = df.stage_settings[ax1 + ' DC'] ax2pos = df.stage_settings[ax2 + ' DC'] # If subselection is desired, do that now if not fullax1: dif1 = np.abs(ax1pos - ax1val) if dif1 > spacing: continue if not fullax2: dif2 = np.abs(ax2pos - ax2val) if dif2 > spacing: continue if diag: df.diagonalize(maxfreq=lpf) df.get_force_v_pos(verbose=False, nbins=nbins, nharmonics=nharmonics, \ width=width, fakedrive=fakedrive, fakefreq=fakefreq, fakeamp=fakeamp) # Add the current data to the output dictionary if ax1pos not in list(force_curves.keys()): force_curves[ax1pos] = {} if diag: diag_force_curves[ax1pos] = {} if ax2pos not in list(force_curves[ax1pos].keys()): # if height and sep not found, adds them to the directory force_curves[ax1pos][ax2pos] = [[], [], []] if diag: diag_force_curves[ax1pos][ax2pos] = [[], [], []] for resp in [0, 1, 2]: force_curves[ax1pos][ax2pos][resp] = \ [df.binned_data[resp][0], \ df.binned_data[resp][1] * df.conv_facs[resp]] if diag: diag_force_curves[ax1pos][ax2pos][resp] = \ [df.diag_binned_data[resp][0], \ df.diag_binned_data[resp][1]] else: for resp in [0, 1, 2]: # if this combination of height and sep have already been recorded, # this correctly concatenates and sorts data from multiple files old_bins = force_curves[ax1pos][ax2pos][resp][0] old_dat = force_curves[ax1pos][ax2pos][resp][1] new_bins = np.hstack((old_bins, df.binned_data[resp][0])) new_dat = np.hstack( (old_dat, df.binned_data[resp][1] * df.conv_facs[resp])) sort_inds = np.argsort(new_bins) force_curves[ax1pos][ax2pos][resp] = \ [new_bins[sort_inds], new_dat[sort_inds]] if diag: old_diag_bins = diag_force_curves[ax1pos][ax2pos][resp][0] old_diag_dat = diag_force_curves[ax1pos][ax2pos][resp][1] new_diag_bins = np.hstack( (old_diag_bins, df.diag_binned_data[resp][0])) new_diag_dat = np.hstack( (old_diag_dat, df.diag_binned_data[resp][1])) diag_sort_inds = np.argsort(new_diag_bins) diag_force_curves[ax1pos][ax2pos][resp] = \ [new_diag_bins[diag_sort_inds], new_diag_dat[diag_sort_inds]] ax1_keys = list(force_curves.keys()) ax2_keys = list(force_curves[ax1_keys[0]].keys()) print() print('Averaging files and building standard deviations') sys.stdout.flush() #max_ax1 = np.max( ax1_keys ) test_ax1 = 38 max_ax1 = ax1_keys[np.argmin(np.abs(test_ax1 - np.array(ax1_keys)))] ax2pos = ax2_keys[np.argmin(np.abs(ax2_toplot - np.array(ax2_keys)))] ax1_keys.sort() ax2_keys.sort() for ax1_k in ax1_keys: for ax2_k in ax2_keys: for resp in [0, 1, 2]: old_bins = force_curves[ax1_k][ax2_k][resp][0] old_dat = force_curves[ax1_k][ax2_k][resp][1] new_bins = np.linspace( np.min(old_bins) + 1e-9, np.max(old_bins) - 1e-9, nbins) bin_sp = new_bins[1] - new_bins[0] int_bins = [] int_dat = [] num_files = int( np.sum(np.abs(old_bins - old_bins[0]) <= 0.2 * bin_sp)) #num_files = 3 #print num_files #for binval in old_bins[::num_files]: # inds = np.abs(old_bins - binval) <= 0.2 * bin_sp # avg_bin = np.mean(old_bins[inds]) # if avg_bin not in int_bins: # int_bins.append(avg_bin) # int_dat.append(np.mean(old_dat[inds])) #dat_func = interp.interp1d(old_bins, old_dat, kind='cubic', bounds_error=False,\ # fill_value='extrapolate') #new_dat = dat_func(new_bins) #new_errs = np.zeros_like(new_dat) new_dat = np.zeros_like(new_bins) new_errs = np.zeros_like(new_bins) for binind, binval in enumerate(new_bins): inds = np.abs(old_bins - binval) <= 0.5 * bin_sp new_dat[binind] = np.mean(old_dat[inds]) new_errs[binind] = np.std(old_dat[inds]) if ax1_k == max_ax1: if ax2_k == ax2pos: test_posvec[resp] = old_bins test_posvec_int[resp] = int_bins test_posvec_final[resp] = new_bins test_arr[resp] = old_dat test_arr_int[resp] = int_dat test_arr_final[resp] = new_dat force_curves[ax1_k][ax2_k][resp] = [ new_bins, new_dat, new_errs ] if diag: old_diag_bins = diag_force_curves[ax1_k][ax2_k][resp][0] old_diag_dat = diag_force_curves[ax1_k][ax2_k][resp][1] if ax1_k == max_ax1: if ax2_k == ax2pos: diag_test_posvec[resp] = old_diag_bins diag_test_arr[resp] = old_diag_dat new_diag_bins = np.linspace(np.min(old_diag_bins)+1e-9, \ np.max(old_diag_bins)-1e-9, nbins) diag_bin_sp = new_diag_bins[1] - new_diag_bins[0] int_diag_bins = [] int_diag_dat = [] # num_files = int( np.sum( np.abs(old_diag_bins - old_diag_bins[0]) \ # <= 0.2 * diag_bin_sp ) ) # for binval in old_diag_bins[::num_files]: # inds = np.abs(old_diag_bins - binval) <= 0.2 * diag_bin_sp # int_diag_bins.append(np.mean(old_diag_bins[inds])) # int_diag_dat.append(np.mean(old_diag_dat[inds])) # diag_dat_func = interp.interp1d(int_diag_bins, int_diag_dat, kind='cubic', \ # bounds_error=False, fill_value='extrapolate') # new_diag_dat = diag_dat_func(new_diag_bins) # new_diag_errs = np.zeros_like(new_diag_dat) # diag_bin_sp = new_diag_bins[1] - new_diag_bins[0] # for binind, binval in enumerate(new_diag_bins): # diaginds = np.abs( old_diag_bins - binval ) < diag_bin_sp # new_diag_errs[binind] = np.std( old_diag_dat[diaginds] ) new_diag_dat = np.zeros_like(new_diag_bins) new_diag_errs = np.zeros_like(new_diag_bins) for binind, binval in enumerate(new_diag_bins): inds = np.abs(old_diag_bins - binval) <= 0.5 * diag_bin_sp new_diag_dat[binind] = np.mean(old_diag_dat[inds]) new_diag_errs[binind] = np.std(old_diag_dat[inds]) diag_force_curves[ax1_k][ax2_k][resp] = \ [new_diag_bins, new_diag_dat, new_diag_errs] if diag: return force_curves, diag_force_curves else: return force_curves
def weigh_bead(files, pcol=0, colormap='plasma', sort='time', file_inds=(0, 10000)): '''Loops over a list of file names, loads each file, diagonalizes, then plots the amplitude spectral density of any number of data or cantilever/electrode drive signals INPUTS: files, list of files names to extract data data_axes, list of pos_data axes to plot cant_axes, list of cant_data axes to plot elec_axes, list of electrode_data axes to plot diag, boolean specifying whether to diagonalize OUTPUTS: none, plots stuff ''' files = [(os.stat(path), path) for path in files] files = [(stat.st_ctime, path) for stat, path in files] files.sort(key=lambda x: (x[0])) files = [obj[1] for obj in files] files = files[file_inds[0]:file_inds[1]] if step10: files = files[::10] if invert_order: files = files[::-1] date = re.search(r"\d{8,}", files[0])[0] charge_dat = np.load( open('/calibrations/charges/' + date + '.charge', 'rb')) q_bead = -1.0 * charge_dat[0] * constants.elementary_charge # q_bead = -25.0 * 1.602e-19 nfiles = len(files) colors = bu.get_color_map(nfiles, cmap=colormap) avg_fft = [] print("Processing %i files..." % nfiles) for fil_ind, fil in enumerate(files): color = colors[fil_ind] bu.progress_bar(fil_ind, nfiles) # Load data df = bu.DataFile() df.load(fil) df.calibrate_stage_position() df.calibrate_phase() #plt.hist( df.zcal / df.phase[4] ) #plt.show() #print np.mean(df.zcal / df.phase[4]), np.std(df.zcal / df.phase[4]) freqs = np.fft.rfftfreq(df.nsamp, d=1.0 / df.fsamp) fft = np.fft.rfft(df.zcal) * bu.fft_norm(df.nsamp, df.fsamp) \ * np.sqrt(freqs[1] - freqs[0]) fft2 = np.fft.rfft(df.phase[4]) * bu.fft_norm(df.nsamp, df.fsamp) \ * np.sqrt(freqs[1] - freqs[0]) fftd = np.fft.rfft(df.zcal - np.pi*df.phase[4]) * bu.fft_norm(df.nsamp, df.fsamp) \ * np.sqrt(freqs[1] - freqs[0]) #plt.plot(np.pi * df.phase[4]) #plt.plot(df.zcal) #plt.figure() #plt.loglog(freqs, np.abs(fft)) #plt.loglog(freqs, np.pi * np.abs(fft2)) #plt.loglog(freqs, np.abs(fftd)) #plt.show() drive_fft = np.fft.rfft(df.electrode_data[1]) #plt.figure() #plt.loglog(freqs, np.abs(drive_fft)) #plt.show() inds = np.abs(drive_fft) > 1e4 inds *= (freqs > 2.0) * (freqs < 300.0) inds = np.arange(len(inds))[inds] ninds = inds + 5 drive_amp = np.abs( drive_fft[inds][0] * bu.fft_norm(df.nsamp, df.fsamp) \ * np.sqrt(freqs[1] - freqs[0]) ) if not len(avg_fft): avg_fft = fft avg_drive_fft = drive_fft ratio = fft[inds] / drive_fft[inds] else: avg_fft += fft avg_drive_fft += drive_fft ratio += fft[inds] / drive_fft[inds] fac = bu.fft_norm(df.nsamp, df.fsamp) * np.sqrt(freqs[1] - freqs[0]) avg_fft *= (1.0 / nfiles) avg_drive_fft *= (1.0 / nfiles) resp = fft[inds] * (1064.0e-9 / 2.0) * (1.0 / (2.0 * np.pi)) noise = fft[ninds] * (1064.0e-9 / 2.0) * (1.0 / (2.0 * np.pi)) drive_noise = np.abs(np.median(avg_drive_fft[ninds] * fac)) #plt.loglog(freqs[inds], np.abs(resp)) #plt.loglog(freqs[ninds], np.abs(noise)) #plt.show() resp_sc = resp * 1e9 # put resp in units of nm noise_sc = noise * 1e9 def amp_sc(f, d_accel, f0, g): return np.abs(harmonic_osc(f, d_accel, f0, g)) * 1e9 def phase_sc(f, d_accel, f0, g): return np.angle(harmonic_osc(f, d_accel, f0, g)) #plt.loglog(freqs[inds], np.abs(resp_sc)) #plt.loglog(freqs[inds], np.abs(harmonic_osc(freqs[inds], 1e-3, 160, 75e1))*1e9) #plt.show() #plt.loglog(freqs[inds], np.abs(resp_sc)) #plt.loglog(freqs, amp_sc(freqs, 1e-3, 160, 750)) #plt.show() popt, pcov = opti.curve_fit(amp_sc, freqs[inds], np.abs(resp_sc), sigma=np.abs(noise_sc), \ absolute_sigma=True, p0=[1e-3, 160, 750], maxfev=10000) #popt2, pcov2 = opti.curve_fit(phase_sc, freqs[inds], np.angle(resp_sc), p0=[1e-3, 160, 750]) print(popt) print(pcov) plt.figure() plt.errorbar(freqs[inds], np.abs(resp), np.abs(noise), fmt='.', ms=10, lw=2) #plt.loglog(freqs[inds], np.abs(noise)) plt.loglog(freqs, np.abs(harmonic_osc(freqs, *popt))) plt.xlabel('Frequency [Hz]', fontsize=16) plt.ylabel('Z Amplitude [m]', fontsize=16) force = (drive_amp / (4.0e-3)) * q_bead mass = np.abs(popt[0]**(-1) * force) * 10**12 fit_err = np.sqrt(pcov[0, 0] / popt[0]) charge_err = 0.1 drive_err = drive_noise / drive_amp print(drive_err) mass_err = np.sqrt((fit_err)**2 + (charge_err)**2 + (drive_err)**2) * mass #print "IMPLIED MASS [ng]: ", mass print('%0.3f ng, %0.2f e^-, %0.1f V' % (mass, q_bead * (1.602e-19)**(-1), drive_amp)) print('%0.6f ng' % (mass_err)) plt.tight_layout() plt.show()
def weigh_bead(files, colormap='jet', sort='time', file_inds=(0, 10000)): '''Loops over a list of file names, loads each file, diagonalizes, then plots the amplitude spectral density of any number of data or cantilever/electrode drive signals INPUTS: files, list of files names to extract data data_axes, list of pos_data axes to plot cant_axes, list of cant_data axes to plot elec_axes, list of electrode_data axes to plot diag, boolean specifying whether to diagonalize OUTPUTS: none, plots stuff ''' files = [(os.stat(path), path) for path in files] files = [(stat.st_ctime, path) for stat, path in files] files.sort(key=lambda x: (x[0])) files = [obj[1] for obj in files] #files = files[file_inds[0]:file_inds[1]] #files = [files[0], files[-1]] #files = files[::10] date = files[0].split('/')[2] charge_dat = np.load( open('/calibrations/charges/' + date + '.charge', 'rb')) #q_bead = -1.0 * charge_dat[0] * 1.602e-19 q_bead = 25.0 * 1.602e-19 nfiles = len(files) colors = bu.get_color_map(nfiles, cmap=colormap) avg_fft = [] mass_arr = [] times = [] q_arr = [] print("Processing %i files..." % nfiles) for fil_ind, fil in enumerate(files): date = fil.split('/')[2] charge_dat = np.load( open('/calibrations/charges/' + date + '.charge', 'rb')) q_bead = -1.0 * charge_dat[0] * 1.602e-19 color = colors[fil_ind] bu.progress_bar(fil_ind, nfiles) # Load data df = bu.DataFile() try: df.load(fil) except: continue df.calibrate_stage_position() df.calibrate_phase() #df.diagonalize() if fil_ind == 0: init_phi = np.mean(df.zcal) #plt.hist( df.zcal / df.phase[4] ) #plt.show() #print np.mean(df.zcal / df.phase[4]), np.std(df.zcal / df.phase[4]) freqs = np.fft.rfftfreq(df.nsamp, d=1.0 / df.fsamp) fac = bu.fft_norm(df.nsamp, df.fsamp) * np.sqrt(freqs[1] - freqs[0]) fft = np.fft.rfft(df.zcal) * fac fft2 = np.fft.rfft(df.phase[4]) * fac fftd = np.fft.rfft(df.zcal - np.pi * df.phase[4]) * fac #plt.plot(np.pi * df.phase[4]) #plt.plot((df.zcal-np.mean(df.zcal))*(0.532 / (2*np.pi))) #plt.figure() #plt.loglog(freqs, np.abs(fft)) #plt.loglog(freqs, np.pi * np.abs(fft2)) #plt.loglog(freqs, np.abs(fftd)) #plt.show() drive_fft = np.fft.rfft(df.electrode_data[1]) #plt.figure() #plt.loglog(freqs, np.abs(drive_fft)) #plt.show() inds = np.abs(drive_fft) > 1e4 inds *= (freqs > 2.0) * (freqs < 300.0) inds = np.arange(len(inds))[inds] ninds = inds + 5 drive_amp = np.abs(drive_fft[inds][0] * fac) resp = fft[inds] * (1064.0e-9 / 2.0) * (1.0 / (2.0 * np.pi)) noise = fft[ninds] * (1064.0e-9 / 2.0) * (1.0 / (2.0 * np.pi)) drive_noise = np.abs(np.median(drive_fft[ninds] * fac)) #plt.loglog(freqs[inds], np.abs(resp)) #plt.loglog(freqs[ninds], np.abs(noise)) #plt.show() resp_sc = resp * 1e9 # put resp in units of nm noise_sc = noise * 1e9 def amp_sc(f, d_accel, f0, g): return np.abs(harmonic_osc(f, d_accel, f0, g)) * 1e9 def phase_sc(f, d_accel, f0, g): return np.angle(harmonic_osc(f, d_accel, f0, g)) popt, pcov = opti.curve_fit(amp_sc, freqs[inds], np.abs(resp_sc), \ sigma=np.abs(noise_sc), absolute_sigma=True, p0=[1e-3, 160, 750], maxfev=10000) #plt.figure() #plt.errorbar(freqs[inds], np.abs(resp), np.abs(noise), fmt='.', ms=10, lw=2) #plt.loglog(freqs[inds], np.abs(noise)) #plt.loglog(freqs, np.abs(harmonic_osc(freqs, *popt))) #plt.xlabel('Frequency [Hz]', fontsize=16) #plt.ylabel('Z Amplitude [m]', fontsize=16) #plt.show() if fil_ind == 0: q_bead = 25.0 * 1.602e-19 resps = [resp] N = 1 elif fil_ind < 100: q_bead = 25.0 * 1.602e-19 resps.append(resp) else: mean_resp = np.mean(np.array(resps), axis=0) inner_prod = np.abs(np.vdot(resp, mean_resp)) proj = inner_prod / np.abs(np.vdot(mean_resp, mean_resp)) q_bead = (proj * 25.0) * 1.602e-19 q_arr.append(q_bead / (1.602e-19)) force = (drive_amp / (4.0e-3)) * q_bead mass = np.abs(popt[0]**(-1) * force) * 10**12 # in ng #if mass > 0.2: # continue #print mass #print df.xy_tf_res_freqs if fil_ind == 0: delta_phi = [0.0] else: delta_phi.append(np.mean(df.zcal) - init_phi) mass_arr.append(mass) times.append(df.time) #fit_err = np.sqrt(pcov[0,0] / popt[0]) #charge_err = 0.1 #drive_err = drive_noise / drive_amp #mass_err = np.sqrt( (fit_err)**2 + (charge_err)**2 + (drive_err)**2 ) * mass plt.plot((times - times[0]) * 1e-9, q_arr) plt.grid(axis='y') plt.xlabel('Time') plt.ylabel('Charge [e]') err_bars = 0.002 * np.ones(len(delta_phi)) fig, axarr = plt.subplots(2, 1, sharey=True) #plt.plot((times - times[0])*1e-9, mass_arr) axarr[0].errorbar((times - times[0]) * 1e-9, mass_arr, err_bars, fmt='-o', markersize=5) axarr[0].set_xlabel('Time [s]', fontsize=14) axarr[0].set_ylabel('Measured Mass [ng]', fontsize=14) plt.tight_layout() plt.figure(2) n, bin_edge, patch = plt.hist(mass_arr, bins=20, \ color='w', edgecolor='k', linewidth=2) real_bins = bin_edge[:-1] + 0.5 * (bin_edge[1] - bin_edge[0]) popt, pcov = opti.curve_fit(gauss, real_bins, n, p0=[100, 0.08, 0.01], maxfev=10000) lab = r'$\mu=%0.3f~\rm{ng}$, $\sigma=%0.3f~\rm{ng}$' % (popt[1], popt[2]) test_vals = np.linspace(np.min(mass_arr), np.max(mass_arr), 100) plt.plot(test_vals, gauss(test_vals, *popt), color='r', linewidth=2, \ label=lab) plt.legend() plt.xlabel('Measured Mass [ng]', fontsize=14) plt.ylabel('Arb', fontsize=14) plt.tight_layout() #plt.figure() #plt.scatter(np.array(delta_phi) * (1.0 / (2 * np.pi)) * (1064.0e-9 / 2) * 1e6, mass_arr) axarr[1].errorbar(np.array(delta_phi) * (1.0 / (2 * np.pi)) * (1064.0e-9 / 2) * 1e6, mass_arr, err_bars, fmt='o', markersize=5) axarr[1].set_xlabel('Mean z-position (arb. offset) [um]', fontsize=14) axarr[1].set_ylabel('Measured Mass [ng]', fontsize=14) plt.tight_layout() plt.show()
import numpy as np import matplotlib.pyplot as plt import matplotlib import bead_util as bu import scipy.signal as ss path_0 = "/data/20181030/bead1/spinning_trans_data/0Hz_div4" path_50k = "/data/20181030/bead1/spinning_trans_data/50kKz_div4" files_0 = bu.find_all_fnames(path_0) files_50k = bu.find_all_fnames(path_50k) df_0 = bu.DataFile() df_0.load(files_0[0]) df_0.diagonalize() df_50k = bu.DataFile() df_50k.load(files_50k[0]) df_50k.diagonalize() ns_0 = np.shape(df_0.pos_data)[-1] ns_50k = np.shape(df_50k.pos_data)[-1] fft_0 = np.einsum( 'ij, i->ij', np.fft.rfft(ss.detrend(df_0.pos_data, axis=-1), axis=-1) * 2. / ns_0, df_0.conv_facs) fft_50k = np.einsum( 'ij, i->ij', np.fft.rfft(ss.detrend(df_50k.pos_data, axis=-1), axis=-1) * 2. / ns_50k, df_50k.conv_facs)
filesubs = [ 'pos0', 'pos1', 'pos2', 'pos3', 'pos4', 'pos5', 'pos6', 'pos7', 'pos8' ] x = np.array([26.5, 36.7, 49.5, 75.5, 93.5, 107.7, 129.3, 146.4, 167.2]) x *= 1e-2 fit_pts = x == x #fit_pts = x > np.mean(x) wx = [] wy = [] for sub in filesubs: xfil = bu.DataFile() xfil.load(filebase + 'xprof_' + sub + '.h5') xfil.load_other_data() x_d, x_prof, x_popt = chopfuncs.profile(xfil, raw_dat_col = 4, \ return_pos = True, numbins = 500, \ fit_intensity=True, drum_diam=3.17e-2) yfil = bu.DataFile() yfil.load(filebase + 'yprof_' + sub + '.h5') yfil.load_other_data() y_d, y_prof, y_popt = chopfuncs.profile(yfil, raw_dat_col = 4, \ return_pos = True, numbins = 500, \ fit_intensity=True, drum_diam=3.17e-2)
def check_backscatter(files, colormap='jet', sort='time', file_inds=(0, 10000)): '''Loops over a list of file names, loads each file, diagonalizes, then plots the amplitude spectral density of any number of data or cantilever/electrode drive signals INPUTS: files, list of files names to extract data OUTPUTS: none, plots stuff ''' files = [(os.stat(path), path) for path in files] files = [(stat.st_ctime, path) for stat, path in files] files.sort(key=lambda x: (x[0])) files = [obj[1] for obj in files] files = files[file_inds[0]:file_inds[1]] #files = files[::10] date = files[0].split('/')[2] nfiles = len(files) amps = [] print("Processing %i files..." % nfiles) for fil_ind, fil in enumerate(files): bu.progress_bar(fil_ind, nfiles) # Load data df = bu.DataFile() try: df.load(fil) except: continue df.calibrate_stage_position() df.calibrate_phase() dz_dphi = (1064e-9 / 2.0) / (2.0 * np.pi) dat1 = df.zcal * dz_dphi * 1e6 dat2 = df.cant_data[2] freqs = np.fft.rfftfreq(df.nsamp, d=1.0 / df.fsamp) fft = np.fft.rfft(dat1) fft_fb = np.fft.rfft(df.pos_fb[2]) #plt.loglog(freqs, np.abs(fft)) #plt.loglog(freqs, np.abs(fft_fb)) #plt.show() times = (df.daqmx_time - df.daqmx_time[0]) * 1e-9 plt.plot(times, dat1 - np.mean(dat1), label='Phase Measurement, Naive Calibration') plt.plot(times, dat2 - np.mean(dat2), label='Cantilever Monitor', ls='--') plt.xlabel('Time [s]', fontsize=14) plt.ylabel('Amplitude [$\mu$m]', fontsize=14) plt.legend(loc=1) plt.tight_layout() plt.show()
import bead_util as bu import configuration as config import time dirname = '/data/20180618/bead1/discharge/fine3/' files = bu.find_all_fnames(dirname) #files = ['/data/20180618/bead1/discharge/fine3/turbombar_xyzcool_elec3_10000mV41Hz0mVdc_56.h5'] print(files[:5]) for filname in files[:1000]: df = bu.DataFile() df.load(filname, plot_sync=True) print(filname) posdat_range = np.max(df.pos_data[0]) - np.min(df.pos_data[0]) cantdat_range = np.max(df.electrode_data[3]) - np.min(df.electrode_data[3]) fac = cantdat_range / posdat_range #for point in df.pos_data[2][:100]: # print np.binary_repr(point.astype(np.int32)) # time.sleep(0.5) fig, ax = plt.subplots(1, 1) #ax.plot((df.pos_data[2]-np.mean(df.pos_data[2])) * fac) ax.plot((df.pos_data[0] - np.mean(df.pos_data[0]))[:1500] * fac, '-', \
def build_hvamp_tf(files, hvamp_key='trek', drive_axes=[4], resp_axes=[6],\ monitor_div=200, file_inds=(0,10000), save='True', \ title=''): '''Loops over a list of file names, loads each file, diagonalizes, then plots the amplitude spectral density of any number of data or cantilever/electrode drive signals INPUTS: hvamp_key, string specifying which hvamp (to store tf) ex. 'burleigh', 'trek' files, list of files names to extract data drive_axes, list of other_data axes with drive resp_axes, list of other_data axes with HV monitor file_inds, indices for min and max file OUTPUTS: none, plots stuff ''' files = [(os.stat(path), path) for path in files] files = [(stat.st_ctime, path) for stat, path in files] files.sort(key=lambda x: (x[0])) files = [obj[1] for obj in files] files = files[file_inds[0]:file_inds[1]] tffreqs = [] tfvals = [] old_per = 0 print("Processing %i files..." % len(files)) print("Percent complete: ") for fil_ind, fil in enumerate(files): # Display percent completion per = int(100. * float(fil_ind) / float(len(files))) if per > old_per: print(old_per, end=' ') sys.stdout.flush() old_per = per if hvamp_key in computed_tf_dict and not recompute: tf = computed_tf_dict[hvamp_key] tffreqs = tf[0] tfvals = tf[1] break # Load data df = bu.DataFile() try: df.load(fil) df.load_other_data() except: continue df.detrend_poly() for axind, ax in enumerate(drive_axes): ax = ax - 3 rax = resp_axes[axind] - 3 normfac = len(df.other_data[ax]) * df.fsamp * 0.5 freqs = np.fft.rfftfreq(len(df.other_data[ax]), d=1.0 / df.fsamp) fft = np.fft.rfft(df.other_data[ax]) rfft = np.fft.rfft(df.other_data[rax]) maxind = np.argmax(np.abs(fft[1:])) + 1 # ignore dc bin tfval = rfft[maxind] * monitor_div / fft[maxind] tfvals.append(tfval) tffreqs.append(freqs[maxind]) tffreqs = np.array(tffreqs) tfvals = np.array(tfvals) sortinds = np.argsort(tffreqs) tffreqs = tffreqs[sortinds] tfvals = tfvals[sortinds] computed_tf_dict[hvamp_key] = (tffreqs, tfvals) fig, ax = plt.subplots(2, 1, figsize=(10, 6), sharex=True) magfit = lambda x, a, fc: np.abs(lowpass_filter(x, a, fc)) guess = [np.abs(tfvals)[0], 1000] popt1, pcov1 = opti.curve_fit(magfit, tffreqs, np.abs(tfvals)) phasefit = lambda x, a, fc: np.angle(lowpass_filter(x, a, fc)) #phasefit = lambda x,a: np.angle(lowpass_filter(x,a,popt1[1])) popt2, pcov2 = opti.curve_fit(phasefit, tffreqs, np.angle(tfvals)) ax[0].loglog(tffreqs, np.abs(tfvals), linewidth=2, label='data') ax[0].loglog(tffreqs, magfit(tffreqs, *popt1), '--', label='fit') ax[1].semilogx(tffreqs, np.angle(tfvals) * (180.0 / np.pi), linewidth=2) ax[1].semilogx(tffreqs, phasefit(tffreqs, *popt1) * (180.0 / np.pi), '--') ax[0].set_ylabel('TF Mag [abs]', fontsize=14) ax[1].set_xlabel('Frequency [Hz]', fontsize=14) ax[1].set_ylabel('TF Phase [deg]', fontsize=14) plt.setp(ax[0].get_xticklabels(), fontsize=14, visible=True) plt.setp(ax[0].get_yticklabels(), fontsize=14, visible=True) plt.setp(ax[1].get_xticklabels(), fontsize=14, visible=True) plt.setp(ax[1].get_yticklabels(), fontsize=14, visible=True) ax[0].yaxis.grid(which='major', color='k', linestyle='--', linewidth=0.5) ax[0].xaxis.grid(which='major', color='k', linestyle='--', linewidth=0.5) ax[1].yaxis.grid(which='major', color='k', linestyle='--', linewidth=0.5) ax[1].xaxis.grid(which='major', color='k', linestyle='--', linewidth=0.5) ax[0].legend() plt.tight_layout() if len(title): plt.subplots_adjust(top=0.92) plt.suptitle(title, fontsize=18) if save: pickle.dump(computed_tf_dict, open(computed_tf_path, 'wb')) print() print("AMP FIT: %0.1e gain, %0.1e cutoff" % (popt1[0], popt1[1])) print("PHASE FIT: %0.1e gain, %0.1e cutoff" % (popt2[0], popt2[1])) sys.stdout.flush() plt.show()
def get_force_curve_dictionary(files, cantind=0, ax1='z', fullax1=True, \ ax1val=0, spacing=1e-6, diag=False, fit_xdat=False, \ fit_zdat=False, plottf=False): '''Loops over a list of file names, loads each file, diagonalizes, computes force v position and then closes then discards the raw data to avoid filling memory. Returns the result as a nested dictionary with the first level of keys the cantilever biases and the second level of keys the height INPUTS: files, list of files names to extract data cantind, cantilever electrode index ax1, axis with different DC positions, usually the height fullax1, boolean specifying to loop over all values of ax1 ax1val, if not fullax1 -> value to keep OUTPUTS: outdic, ouput dictionary with the following indexing outdic[cantbias][ax1pos][resp(0,1,2)][bins(0) or dat(1)] cantbias and ax2pos are dictionary keys, resp and bins/dat are array indices (native python lists) diagoutdic, if diag=True second dictionary with diagonalized data ''' force_curves = {} if diag: diag_force_curves = {} old_per = 0 for fil_ind, fil in enumerate(files): # Display percent completion bu.progress_bar(fil_ind, len(files)) # Load data df = bu.DataFile() df.load(fil) df.calibrate_stage_position() cantbias = df.electrode_settings['dc_settings'][0] ax1pos = df.stage_settings[ax1 + ' DC'] # If subselection is desired, do that now if not fullax1: dif1 = np.abs(ax1pos - ax1val) if dif1 > spacing: continue if diag: if fil_ind == 0 and plottf: df.diagonalize(date=tfdate, maxfreq=tophatf, plot=True) else: df.diagonalize(date=tfdate, maxfreq=tophatf) df.get_force_v_pos(verbose=False, nbins=nbins) # Add the current data to the output dictionary if cantbias not in list(force_curves.keys()): force_curves[cantbias] = {} if diag: diag_force_curves[cantbias] = {} if ax1pos not in list(force_curves[cantbias].keys()): # if height and sep not found, adds them to the directory force_curves[cantbias][ax1pos] = [[], [], []] if diag: diag_force_curves[cantbias][ax1pos] = [[], [], []] for resp in [0, 1, 2]: force_curves[cantbias][ax1pos][resp] = \ [df.binned_data[resp][0], \ df.binned_data[resp][1] * df.conv_facs[resp]] if diag: diag_force_curves[cantbias][ax1pos][resp] = \ [df.diag_binned_data[resp][0], \ df.diag_binned_data[resp][1]] else: for resp in [0, 1, 2]: # if this combination of height and sep have already been recorded, # this correctly concatenates and sorts data from multiple files old_bins = force_curves[cantbias][ax1pos][resp][0] old_dat = force_curves[cantbias][ax1pos][resp][1] new_bins = np.hstack((old_bins, df.binned_data[resp][0])) new_dat = np.hstack( (old_dat, df.binned_data[resp][1] * df.conv_facs[resp])) sort_inds = np.argsort(new_bins) #plt.plot(new_bins[sort_inds], new_dat[sort_inds]) #plt.show() force_curves[cantbias][ax1pos][resp] = \ [new_bins[sort_inds], new_dat[sort_inds]] if diag: old_diag_bins = diag_force_curves[cantbias][ax1pos][resp][ 0] old_diag_dat = diag_force_curves[cantbias][ax1pos][resp][1] new_diag_bins = np.hstack( (old_diag_bins, df.diag_binned_data[resp][0])) new_diag_dat = np.hstack( (old_diag_dat, df.diag_binned_data[resp][1])) diag_sort_inds = np.argsort(new_diag_bins) diag_force_curves[cantbias][ax1pos][resp] = \ [new_diag_bins[diag_sort_inds], new_diag_dat[diag_sort_inds]] cantV_keys = list(force_curves.keys()) ax1_keys = list(force_curves[cantV_keys[0]].keys()) print() print('Averaging files and building standard deviations') sys.stdout.flush() if fit_xdat: xdat = {'fit': dipole_force} diag_xdat = {'fit': dipole_force} if fit_zdat: zdat = {'fit': dipole_force} diag_zdat = {'fit': dipole_force} for cantV_k in cantV_keys: if fit_zdat: if cantV_k not in zdat: zdat[cantV_k] = {} if diag: diag_zdat[cantV_k] = {} if fit_xdat: if cantV_k not in xdat: xdat[cantV_k] = {} if diag: diag_xdat[cantV_k] = {} for ax1_k in ax1_keys: for resp in [0, 1, 2]: old_bins = force_curves[cantV_k][ax1_k][resp][0] old_dat = force_curves[cantV_k][ax1_k][resp][1] #dat_func = interp.interp1d(old_bins, old_dat, kind='cubic') new_bins = np.linspace( np.min(old_bins) + 1e-9, np.max(old_bins) - 1e-9, nbins) new_dat = np.zeros_like(new_bins) new_errs = np.zeros_like(new_bins) bin_sp = new_bins[1] - new_bins[0] for binind, binval in enumerate(new_bins): inds = np.abs(old_bins - binval) < bin_sp new_dat[binind] = np.mean(old_dat[inds]) new_errs[binind] = np.std(old_dat[inds]) force_curves[cantV_k][ax1_k][resp] = [ new_bins, new_dat, new_errs ] if fit_xdat and resp == 0: x0 = np.max(new_bins) + closest_sep p0 = [np.max(new_dat) / closest_sep**2, 0, 0] fitfun = lambda x, a, b, c: xdat['fit'](x, a, b, c, x0=x0) popt, pcov = opti.curve_fit(fitfun, new_bins, new_dat) val = fitfun(np.max(new_bins), popt[0], popt[1], 0) #print resp #print fitfun(-200, *popt) - popt[2] #print fitfun(50, *popt) - popt[2] #plt.plot(new_bins, new_dat, label='Dat') #plt.plot(new_bins, fitfun(new_bins, *popt), label='Fit') #plt.legend() #plt.show() xdat[cantV_k][ax1_k] = (popt, val) if fit_zdat and resp == 2: x0 = np.max(new_bins) + closest_sep p0 = [np.max(new_dat) / closest_sep**2, 0, 0] fitfun = lambda x, a, b, c: zdat['fit'](x, a, b, c, x0=x0) popt, pcov = opti.curve_fit(fitfun, new_bins, new_dat) val = fitfun(np.max(new_bins), popt[0], popt[1], 0) zdat[cantV_k][ax1_k] = (popt, val) if diag: old_diag_bins = diag_force_curves[cantV_k][ax1_k][resp][0] old_diag_dat = diag_force_curves[cantV_k][ax1_k][resp][1] #diag_dat_func = interp.interp1d(old_diag_bins, old_diag_dat, kind='cubic') new_diag_bins = np.linspace(np.min(old_diag_bins)+1e-9, \ np.max(old_diag_bins)-1e-9, nbins) new_diag_dat = np.zeros_like(new_diag_bins) new_diag_errs = np.zeros_like(new_diag_bins) diag_bin_sp = new_diag_bins[1] - new_diag_bins[0] for binind, binval in enumerate(new_diag_bins): diaginds = np.abs(old_diag_bins - binval) < diag_bin_sp new_diag_errs[binind] = np.std(old_diag_dat[diaginds]) new_diag_dat[binind] = np.mean(old_diag_dat[diaginds]) diag_force_curves[cantV_k][ax1_k][resp] = \ [new_diag_bins, new_diag_dat, new_diag_errs] if fit_xdat and resp == 0: x0 = np.max(new_diag_bins) + closest_sep p0 = [np.max(new_diag_dat) / closest_sep**2, 0, 0] fitfun = lambda x, a, b, c: diag_xdat['fit']( x, a, b, c, x0=x0) popt, pcov = opti.curve_fit(fitfun, new_diag_bins, new_diag_dat) val = fitfun(np.max(new_diag_bins), popt[0], popt[1], 0) diag_xdat[cantV_k][ax1_k] = (popt, val) if fit_zdat and resp == 2: x0 = np.max(new_diag_bins) + closest_sep p0 = [np.max(new_diag_dat) / closest_sep**2, 0, 0] fitfun = lambda x, a, b, c: diag_zdat['fit']( x, a, b, c, x0=x0) popt, pcov = opti.curve_fit(fitfun, new_diag_bins, new_diag_dat) val = fitfun(np.max(new_diag_bins), popt[0], popt[1], 0) diag_zdat[cantV_k][ax1_k] = (popt, val) fits = {} if fit_xdat: if diag: fits['x'] = (xdat, diag_xdat) else: fits['x'] = (xdat) if fit_zdat: if diag: fits['z'] = (zdat, diag_zdat) else: fits['z'] = (zdat) if diag: return force_curves, diag_force_curves, fits else: return force_curves, fits
def get_force_curve_dictionary(files, ax1='x', ax2='z', ax1val=0, ax2val=0, \ diag=False): '''Loops over a list of file names, loads each file, diagonalizes, computes force v position and then closes then discards the raw data to avoid filling memory. Returns the result as a dictionary with cantilever biases as keys INPUTS: files, list of files names to extract data ax1, first axis in output array ax2, second axis in output array ax1val, value to keep (or closest) ax2val, value to keep diag, boolean specifying whether to diagonalize OUTPUTS: outdic, ouput dictionary with the following indexing outdic[ax1pos][ax2pos][resp(0,1,2)][bins(0) or dat(1)] ax1pos and ax2pos are dictionary keys, resp and bins/dat are array indices (native python lists) diagoutdic, if diag=True second dictionary with diagonalized data ''' force_curves = {} if diag: diag_force_curves = {} old_per = 0 print("Percent complete: ") for fil_ind, fil in enumerate(files): # Display percent completion per = int(100. * float(fil_ind) / float(len(files)) ) if per > old_per: print(old_per, end=' ') sys.stdout.flush() old_per = per # Load data df = bu.DataFile() df.load(fil) df.calibrate_stage_position() df.high_pass_filter(fc=5) #cantbias = df.electrode_settings['dc_settings'][0] cantbias = df.electrode_settings['amplitudes'][0] ax1pos = df.stage_settings[ax1 + ' DC'] ax2pos = df.stage_settings[ax2 + ' DC'] if diag: df.diagonalize(maxfreq=lpf) df.get_force_v_pos(verbose=False, nbins=nbins, nharmonics=10, width=0) if cantbias not in list(force_curves.keys()): force_curves[cantbias] = {} if diag: diag_force_curves[cantbias] = {} if ax1pos not in list(force_curves[cantbias].keys()): force_curves[cantbias][ax1pos] = {} if diag: diag_force_curves[cantbias][ax1pos] = {} if ax2pos not in list(force_curves[cantbias][ax1pos].keys()): force_curves[cantbias][ax1pos][ax2pos] = [[], [], []] if diag: diag_force_curves[cantbias][ax1pos][ax2pos] = [[], [], []] for resp in [0,1,2]: force_curves[cantbias][ax1pos][ax2pos][resp] = \ [df.binned_data[resp][0], \ df.binned_data[resp][1] * df.conv_facs[resp]] if diag: diag_force_curves[cantbias][ax1pos][ax2pos][resp] = \ [df.diag_binned_data[resp][0], \ df.diag_binned_data[resp][1]] else: for resp in [0,1,2]: old_bins = force_curves[cantbias][ax1pos][ax2pos][resp][0] old_dat = force_curves[cantbias][ax1pos][ax2pos][resp][1] new_bins = np.hstack((old_bins, df.binned_data[resp][0])) new_dat = np.hstack((old_dat, df.binned_data[resp][1] * df.conv_facs[resp])) sort_inds = np.argsort(new_bins) force_curves[cantbias][ax1pos][ax2pos][resp] = \ [new_bins[sort_inds], new_dat[sort_inds]] if diag: old_diag_bins = diag_force_curves[cantbias][ax1pos][ax2pos][resp][0] old_diag_dat = diag_force_curves[cantbias][ax1pos][ax2pos][resp][1] new_diag_bins = np.hstack((old_diag_bins, df.diag_binned_data[resp][0])) new_diag_dat = np.hstack((old_diag_dat, df.diag_binned_data[resp][1])) diag_sort_inds = np.argsort(new_diag_bins) diag_force_curves[cantbias][ax1pos][ax2pos][resp] = \ [new_diag_bins[diag_sort_inds], new_diag_dat[diag_sort_inds]] cantV_keys = list(force_curves.keys()) ax1_keys = list(force_curves[cantV_keys[0]].keys()) ax2_keys = list(force_curves[cantV_keys[0]][ax1_keys[0]].keys()) for cantV in cantV_keys: for ax1 in ax1_keys: for ax2 in ax2_keys: for resp in [0,1,2]: old_bins = force_curves[cantV][ax1][ax2][resp][0] old_dat = force_curves[cantV][ax1][ax2][resp][1] dat_func = interp.interp1d(old_bins, old_dat, kind='cubic') new_bins = np.linspace(np.min(old_bins)+1e-9, np.max(old_bins)-1e-9, nbins) new_dat = dat_func(new_bins) new_errs = np.zeros_like(new_dat) bin_sp = new_bins[1] - new_bins[0] for binind, binval in enumerate(new_bins): inds = np.abs( old_bins - binval ) < bin_sp new_errs[binind] = np.std( old_dat[inds] ) force_curves[cantV][ax1][ax2][resp] = \ [new_bins, new_dat, new_errs] if diag: old_diag_bins = diag_force_curves[cantV][ax1][ax2][resp][0] old_diag_dat = diag_force_curves[cantV][ax1][ax2][resp][1] diag_dat_func = interp.interp1d(old_diag_bins, \ old_diag_dat, kind='cubic') new_diag_bins = np.linspace(np.min(old_diag_bins)+1e-9, np.max(old_diag_bins)-1e-9, nbins) new_diag_dat = diag_dat_func(new_diag_bins) new_diag_errs = np.zeros_like(new_diag_dat) diag_bin_sp = new_diag_bins[1] - new_diag_bins[0] for binind, binval in enumerate(new_diag_bins): inds = np.abs( old_diag_bins - binval ) < diag_bin_sp new_diag_errs[binind] = np.std( old_diag_dat[inds] ) diag_force_curves[cantV][ax1][ax2][resp] = \ [new_diag_bins, new_diag_dat, new_diag_errs] if diag: return force_curves, diag_force_curves else: return force_curves