Exemplo n.º 1
0
    def __init__(self, fnames, p0_bead=[16, 0, 20], tophatf=2500, harms=[]):

        self.fnames = fnames
        self.p0_bead = p0_bead
        self.file_data_objs = []

        Nnames = len(self.fnames)

        suff = 'Processing %i files' % Nnames
        for name_ind, name in enumerate(self.fnames):
            bu.progress_bar(name_ind, Nnames, suffix=suff)

            # Initialize FileData obj, extract the data, then close the big file
            try:
                new_obj = FileData(name, tophatf=tophatf)
                new_obj.extract_data(harms=harms)
                new_obj.load_position_and_bias()

                new_obj.close_datafile()

                self.file_data_objs.append(new_obj)

            except:
                continue

        self.grav_loaded = False

        self.alpha_dict = ''
        self.agg_dict = ''
def profile_directory(prof_dir, raw_dat_col = 0, drum_diam=3.25e-2, \
                      return_pos=False, plot_peaks=False, guess=3e-3):
    ''' Takes a directory path and profiles each file, and averages 
        for a final result
    
    INPUTS:  prof_dir, directory path
             raw_dat_col, column in 'other_data' with raw WM100 monitor
             drum_diam, diameter of the optical head that rotates
             return_pos, boolean to specify if return in raw time or calibrated
                         drum position using the drum_diam argument

    OUTPUTS: tot_x, all t/disp associated with profiles, overlain/sorted
             tot_prof, all profiles overlain and sorted
    '''
    prof_files = []
    for root, dirnames, filenames in os.walk(prof_dir):
        for filename in fnmatch.filter(filenames,
                                       '*' + config.extensions['data']):
            prof_files.append(os.path.join(root, filename))

    tot_x = []
    tor_prof = []
    nfiles = len(prof_files)
    for fil_ind, fil_path in enumerate(prof_files):
        bu.progress_bar(fil_ind, nfiles)
        prof_df = bu.DataFile()
        prof_df.load(fil_path, skip_fpga=True)
        prof_df.load_other_data()

        x, prof, popt = profile(prof_df, raw_dat_col = raw_dat_col, \
                                drum_diam = drum_diam, return_pos = return_pos, \
                                fit_intensity=True, plot_peaks=plot_peaks, \
                                guess=guess)

        #plt.plot(x, prof)
        #plt.show()

        #x, prof, errs = bu.rebin(x, prof, numbins=5000)
        #plt.plot(x, prof)
        #plt.show()

        if not len(tot_x):
            tot_x = x
            tot_prof = prof
            tot_popt = [popt]
        else:
            tot_x = np.block([tot_x, x])  # = np.hstack((tot_x, x))
            tot_prof = np.block([tot_prof,
                                 prof])  # = np.hstack((tot_prof, prof))
            tot_popt.append(popt)  # = np.concatenate((tot_popt, popt), axis=0)

    #tot_x = np.concatenate(tot_x)
    #tot_prof = np.concatenate(tot_x)

    tot_popt = np.array(tot_popt)
    tot_popt_mean = np.mean(tot_popt, axis=0)

    sort_inds = tot_x.argsort()

    return tot_x[sort_inds], tot_prof[sort_inds], tot_popt_mean
def plot_xy_orbit(dirname, allfiles=True, user_filind=0, filter=True, fdrive=41.0):

    print('Analyzing: ', dirname, '  ...')

    files, lengths = bu.find_all_fnames(dirname)
    nfiles = len(files)

    for filind, fil in enumerate(files):
        if not allfiles:
            if filind != user_filind:
                continue

        bu.progress_bar(filind, nfiles)

        df = bu.DataFile()
        df.load(fil)

        freqs = np.fft.rfftfreq(df.nsamp, d=1.0/df.fsamp)

        plt.loglog(freqs, np.abs(np.fft.rfft(df.pos_data[0])))
        plt.loglog(freqs, np.abs(np.fft.rfft(df.pos_data[1])))

        plt.figure()
        plt.scatter(df.pos_data[0], df.pos_data[1])
        plt.show()
    def find_stage_positions(self, find_again=False):
        '''Loops over a list of file names, loads the attributes of each file, 
           then extracts the DC stage position to sort through data.'''

        axvecs = [{}, {}, {}]
        nfiles = len(self.allfiles)
        for fil_ind, fil in enumerate(self.allfiles):
            bu.progress_bar(fil_ind, nfiles, suffix='sorting by stage pos')

            df = bu.DataFile()
            df.load_only_attribs(fil)

            if df.badfile:
                continue

            df.calibrate_stage_position()

            for axind, axstr in enumerate(['x', 'y', 'z']):
                axpos = df.stage_settings[axstr + ' DC']
                if axpos not in list(axvecs[axind].keys()):
                    axvecs[axind][axpos] = []
                axvecs[axind][axpos].append(fil)
        pickle.dump(axvecs, open('/backgrounds/axvecs/' + self.bead + '_' + \
                                 self.parent_dir + '_axvecs.p', 'wb'))

        self.axvecs = axvecs
Exemplo n.º 5
0
    def find_alpha_vs_time(self,
                           br_temps=[],
                           single_lambda=True,
                           lambda_value=25E-6):

        print("Computing alpha as a function of time...")

        if not self.grav_loaded:
            print("Must load theory data first...")
            return

        Nobj = len(self.file_data_objs)

        dft = pd.DataFrame()
        for objind, file_data_obj in enumerate(self.file_data_objs):
            bu.progress_bar(objind, Nobj, suffix='Fitting Alpha vs. Time')

            t = file_data_obj.time
            phi = file_data_obj.phi_cm

            ## Get sep and height from axis positions

            full_pts = file_data_obj.generate_pts(self.p0_bead)

            ## Loop over lambdas and
            lambda_inds = np.arange(len(self.lambdas))
            if single_lambda:
                lind = np.argmin((lambda_value - self.lambdas)**2)
                lambda_inds = [lambda_inds[lind]]
                n_lam = 1
            else:
                n_lam = len(self.lambdas)

            for i, lambind in enumerate(lambda_inds):
                yukfft = [[], [], []]
                for resp in [0, 1, 2]:
                    yukforcet = self.yukfuncs[resp][lambind](full_pts * 1.0e-6)
                    yukfft[resp] = np.fft.rfft(yukforcet)[file_data_obj.ginds]
                yukfft = np.array(yukfft)

                dfl = file_data_obj.fit_alpha_xyz([yukfft] + br_temps)
                dfl["lambda"] = self.lambdas[lambind]
                index = [[objind], [lambind]]
                dfl.index = index
                dft = dft.append(dfl)

        return dft
def proc_dir(files, T=10., G=15., tuning=.14):
    T = 10.
    gf = al2.GravFile()

    gf.load(files[0])
    amps = np.zeros((len(files), 3, gf.num_harmonics))
    delta_f = np.zeros(len(files))
    phis = np.zeros((len(files), 3, gf.num_harmonics))
    sig_amps = np.zeros((len(files), 3, gf.num_harmonics))
    sig_phis = np.zeros((len(files), 3, gf.num_harmonics))
    ps = np.zeros(len(files))
    n = len(files)
    for i, f in enumerate(files[:-1]):
        bu.progress_bar(i, n)
        gf_temp = al2.GravFile()
        gf_temp.load(f)
        gf_temp.estimate_sig(use_diag=True)
        N = len(gf_temp.freq_vector)
        amps[i, :, :] = gf_temp.amps_at_harms / N
        phis[i, :, :] = gf_temp.phis_at_harms
        delta_f[i] = np.mean(gf_temp.electrode_data[3, :]) * G * tuning
        sig_amps[i, :, :] = gf_temp.noise / (N * np.sqrt(2))
        sig_phis[i, :, :] = gf_temp.sigma_phis_at_harms
        ps[i] = float(gf_temp.pressures['pirani'])

    def line(x, m, b):
        return m * x + b

    fnum = np.arange(len(files))
    popt, pcov = curve_fit(line, fnum, ps)
    pfit = line(fnum, *popt)
    tarr = T * np.arange(len(files))

    return {"amps [N]": amps, "sig_amps [N]":sig_amps, "phis [rad]": phis, \
            "sig_phis [rad]": sig_phis, "p [Torr]": pfit, "t [s]": tarr,\
            "delta_f [GHz]": delta_f}
def get_alpha_vs_file(fildat, diag=True, ignoreX=False, ignoreY=False, ignoreZ=False, \
                     plot=True, save=False, savepath='', confidence_level=0.95, \
                     only_closest=False, ax1='x', ax2='z', lamb_range=(1e-9, 1e-2)):
    '''Loops over a list of file names, loads each file, diagonalizes,
       then performs an optimal filter using the cantilever drive and 
       a theoretical force vs position to generate the filter/template.
       The result of the optimal filtering is stored, and the data 
       released from memory

       INPUTS: fildat

       OUTPUTS: 
    '''

    # For the confidence interval, compute the inverse CDF of a 
    # chi^2 distribution at given confidence level and compare to 
    # liklihood ratio via a goodness of fit parameter.
    # Refer to scipy.stats documentation to understand chi2
    chi2dist = stats.chi2(1)
    # factor of 0.5 from Wilks's theorem: -2 log (Liklihood) ~ chi^2(1)
    con_val = 0.5 * chi2dist.ppf(confidence_level)

    colors = bu.get_color_map(len(lambdas))

    alphas = np.zeros_like(lambdas)
    diagalphas = np.zeros_like(lambdas)
    testalphas = np.linspace(-10**10, 10**10, 11)

    biasvec = list(fildat.keys())
    biasvec.sort()
    ax1posvec = list(fildat[biasvec[0]].keys())
    ax1posvec.sort()
    ax2posvec = list(fildat[biasvec[0]][ax1posvec[0]].keys())
    ax2posvec.sort()

    if only_closest:
        if ax1 == 'x' and ax2 == 'z':
            seps = minsep + (maxthrow - np.array(ax1posvec))
            heights = np.array(ax2posvec) - beadheight
            sind = np.argmin(seps)
            hind = np.argmin(np.abs(heights - beadheight))
            ax1posvec = [ax1posvec[sind]]
            ax2posvec = [ax2posvec[hind]]

        elif ax1 =='z' and ax2 == 'x':
            seps = minsep + (maxthrow - np.array(ax2posvec))
            heights = np.array(ax1pos) - beadheight
            sind = np.argmin(seps)
            hind = np.argmin(np.abs(heights - beadheight))
            ax1posvec = [ax1posvec[hind]]
            ax2posvec = [ax2posvec[sind]]
        

    newlamb = lambdas[(lambdas > lamb_range[0]) * (lambdas < lamb_range[-1])]
    tot_iterations = len(biasvec) * len(ax1posvec) * len(ax2posvec) * len(newlamb) * len(testalphas)
    i = -1

    for lambind, yuklambda in enumerate(lambdas):
        if lambind != 48:
            continue

        if (yuklambda < lamb_range[0]) or (yuklambda > lamb_range[1]):
            continue

        test = fildat[biasvec[0]][ax1posvec[0]][ax2posvec[0]][0][lambind]
        test_yukdat = test[-1]
        test_dat = test[1]
        
        newalpha = 1e-4 * np.sqrt(np.mean(np.abs(test_dat) / np.abs(test_yukdat)))
        testalphas = np.linspace(-1.0*newalpha, newalpha, 11)

        for bias, ax1pos, ax2pos in itertools.product(biasvec, ax1posvec, ax2posvec):
            i += 1
            bu.progress_bar(i, tot_iterations)
 
            minalphas = [0] * len(fildat[bias][ax1pos][ax2pos])
            diag_minalphas = [0] * len(fildat[bias][ax1pos][ax2pos])

            for fil_ind in range(len(fildat[bias][ax1pos][ax2pos])):
                dat = fildat[bias][ax1pos][ax2pos][fil_ind][lambind]
                assert dat[0] == yuklambda
                _, datfft, diagdatfft, daterr, diagdaterr, gfft, yukfft = dat

                chi_sqs = np.zeros(len(testalphas))
                diagchi_sqs = np.zeros(len(testalphas))

                for alphaind, testalpha in enumerate(testalphas):

                    chi_sq = 0
                    diagchi_sq = 0
                    N = 0
                
                    for resp in [0,1,2]:
                        if (ignoreX and resp == 0) or \
                           (ignoreY and resp == 1) or \
                           (ignoreZ and resp == 2):
                            continue
                        re_diff = datfft[resp].real - \
                                  (gfft[resp].real + testalpha * yukfft[resp].real )
                        im_diff = datfft[resp].imag - \
                                  (gfft[resp].imag + testalpha * yukfft[resp].imag )
                        if diag:
                            diag_re_diff = diagdatfft[resp].real - \
                                           (gfft[resp].real + testalpha * yukfft[resp].real )
                            diag_im_diff = diagdatfft[resp].imag - \
                                           (gfft[resp].imag + testalpha * yukfft[resp].imag )

                        #plt.plot(np.abs(re_diff))
                        #plt.plot(daterr[resp])
                        #plt.show()

                        chi_sq += ( np.sum( np.abs(re_diff)**2 / (0.5*(daterr[resp]**2)) ) + \
                                  np.sum( np.abs(im_diff)**2 / (0.5*(daterr[resp]**2)) ) )
                        if diag:
                            diagchi_sq += ( np.sum( np.abs(diag_re_diff)**2 / \
                                                    (0.5*(diagdaterr[resp]**2)) ) + \
                                            np.sum( np.abs(diag_im_diff)**2 / \
                                                    (0.5*(diagdaterr[resp]**2)) ) )

                        N += len(re_diff) + len(im_diff)

                    chi_sqs[alphaind] = chi_sq / (N - 1)
                    if diag:
                        diagchi_sqs[alphaind] = diagchi_sq / (N - 1)

                max_chi = np.max(chi_sqs)
                if diag:
                    max_diagchi = np.max(diagchi_sqs)

                max_alpha = np.max(testalphas)

                p0 = [max_chi/max_alpha**2, 0, 1]
                if diag:
                    diag_p0 = [max_diagchi/max_alpha**2, 0, 1]
    
                try:
                    popt, pcov = opti.curve_fit(parabola, testalphas, chi_sqs, \
                                                p0=p0, maxfev=100000)
                    if diag:
                        diagpopt, diagpcov = opti.curve_fit(parabola, testalphas, diagchi_sqs, \
                                                            p0=diag_p0, maxfev=1000000)
                except:
                    print("Couldn't fit")
                    popt = [0,0,0]
                    popt[2] = np.mean(chi_sqs)

                regular_con_val = con_val + np.min(chi_sqs)
                if diag:
                    diag_con_val = con_val + np.min(diagchi_sqs)

                # Select the positive root for the non-diagonalized data
                soln1 = ( -1.0 * popt[1] + np.sqrt( popt[1]**2 - \
                        4 * popt[0] * (popt[2] - regular_con_val)) ) / (2 * popt[0])
                soln2 = ( -1.0 * popt[1] - np.sqrt( popt[1]**2 - \
                        4 * popt[0] * (popt[2] - regular_con_val)) ) / (2 * popt[0])

                if diag:
                    diagsoln1 = ( -1.0 * diagpopt[1] + np.sqrt( diagpopt[1]**2 - \
                            4 * diagpopt[0] * (diagpopt[2] - diag_con_val)) ) / (2 * diagpopt[0])
                    diagsoln2 = ( -1.0 * diagpopt[1] - np.sqrt( diagpopt[1]**2 - \
                            4 * diagpopt[0] * (diagpopt[2] - diag_con_val)) ) / (2 * diagpopt[0])

                if soln1 > soln2:
                    alpha_con = soln1
                else:
                    alpha_con = soln2

                if diag:
                    if diagsoln1 > diagsoln2:
                        diagalpha_con = diagsoln1
                    else:
                        diagalpha_con = diagsoln2

                minalphas[fil_ind] = alpha_con
                if diag:
                    diag_minalphas[fil_ind] = diagalpha_con


            if plot:
                minfig, minaxarr = plt.subplots(1,2,figsize=(10,5),dpi=150)
                minaxarr[0].plot(minalphas)
                minaxarr[0].set_title('Min $\\alpha$ vs. Time', fontsize=18)
                minaxarr[0].set_xlabel('File Num', fontsize=16)
                minaxarr[0].set_ylabel('$\\alpha$ [arb]', fontsize=16)

                minaxarr[1].hist(minalphas, bins=20)
                minaxarr[1].set_xlabel('$\\alpha$ [arb]', fontsize=16)

                plt.tight_layout()
                plt.show()


    return minalphas
def get_data_at_harms(files, gfuncs, yukfuncs, lambdas, lims, \
                      minsep=20, maxthrow=80, beadheight=5,\
                      cantind=0, ax1='x', ax2='z', diag=True, plottf=False, \
                      width=0, nharmonics=10, harms=[], \
                      ext_cant_drive=False, ext_cant_ind=1, \
                      ignoreX=False, ignoreY=False, ignoreZ=False,  noiseband=10):
    '''Loops over a list of file names, loads each file, diagonalizes,
       then performs an optimal filter using the cantilever drive and 
       a theoretical force vs position to generate the filter/template.
       The result of the optimal filtering is stored, and the data 
       released from memory

       INPUTS: files, list of files names to extract data
               cantind, cantilever electrode index
               ax1, axis with different DC positions
               ax2, 2nd axis with different DC positions

       OUTPUTS: 
    '''

    #parts = data_dir.split('/')
    #prefix = parts[-1]
    #savepath = '/processed_data/grav_data/' + prefix + '_fildat.p'
    #try:
    #    fildat = pickle.load(open(savepath, 'rb'))
    #    return fildat
    #except:
    #    print 'Loading data from: ', data_dir

    fildat = {}
    temp_gdat = {}
    for fil_ind, fil in enumerate(files):
        bu.progress_bar(fil_ind, len(files), suffix=' Sorting Files, Extracting Data')

        ### Load data
        df = bu.DataFile()
        df.load(fil)

        df.calibrate_stage_position()
    
        cantbias = df.electrode_settings['dc_settings'][0]
        ax1pos = df.stage_settings[ax1 + ' DC']
        ax2pos = df.stage_settings[ax2 + ' DC']
        
        if cantbias not in list(fildat.keys()):
            fildat[cantbias] = {}
        if ax1pos not in list(fildat[cantbias].keys()):
            fildat[cantbias][ax1pos] = {}
        if ax2pos not in list(fildat[cantbias][ax1pos].keys()):
            fildat[cantbias][ax1pos][ax2pos] = []

        if ax1pos not in list(temp_gdat.keys()):
            temp_gdat[ax1pos] = {}
        if ax2pos not in list(temp_gdat[ax1pos].keys()):
            temp_gdat[ax1pos][ax2pos] = [[], []]
            temp_gdat[ax1pos][ax2pos][1] = [[]] * len(lambdas)

        cfind = len(fildat[cantbias][ax1pos][ax2pos])
        fildat[cantbias][ax1pos][ax2pos].append([])

        if fil_ind == 0 and plottf:
            df.diagonalize(date=tfdate, maxfreq=tophatf, plot=True)
        else:
            df.diagonalize(date=tfdate, maxfreq=tophatf)

        if fil_ind == 0:
            ginds, fund_ind, drive_freq, drive_ind = \
                df.get_boolean_cantfilt(ext_cant_drive=ext_cant_drive, ext_cant_ind=ext_cant_ind, \
                                        nharmonics=nharmonics, harms=harms, width=width)

        datffts, diagdatffts, daterrs, diagdaterrs = \
                    df.get_datffts_and_errs(ginds, drive_freq, noiseband=noiseband, plot=False, \
                                            diag=diag)

        drivevec = df.cant_data[drive_ind]
        
        mindrive = np.min(drivevec)
        maxdrive = np.max(drivevec)

        posvec = np.linspace(mindrive, maxdrive, 500)
        ones = np.ones_like(posvec)

        start = time.time()
        for lambind, yuklambda in enumerate(lambdas):

            if ax1 == 'x' and ax2 == 'z':
                newxpos = minsep + (maxthrow - ax1pos)
                newheight = ax2pos - beadheight
            elif ax1 =='z' and ax2 == 'x':
                newxpos = minsep + (maxthrow - ax2pos)
                newheight = ax1pos - beadheight
            else:
                print("Coordinate axes don't make sense for gravity data...")
                print("Proceeding anyway, but results might be hard to interpret")
                newxpos = ax1pos
                newheight = ax2pos

            if (newxpos < lims[0][0]*1e6) or (newxpos > lims[0][1]*1e6):
                #print 'skipped x'
                continue

            if (newheight < lims[2][0]*1e6) or (newheight > lims[2][1]*1e6):
                #print 'skipped z'
                continue

            pts = np.stack((newxpos*ones, posvec, newheight*ones), axis=-1)

            gfft = [[], [], []]
            yukfft = [[], [], []]
            for resp in [0,1,2]:
                if (ignoreX and resp == 0) or (ignoreY and resp == 1) or (ignoreZ and resp == 2):
                    gfft[resp] = np.zeros(np.sum(ginds))
                    yukfft[resp] = np.zeros(np.sum(ginds))
                    continue

                if len(temp_gdat[ax1pos][ax2pos][0]):
                    gfft[resp] = temp_gdat[ax1pos][ax2pos][0][resp]
                else:
                    gforcevec = gfuncs[resp](pts*1e-6)
                    gforcefunc = interp.interp1d(posvec, gforcevec)
                    gforcet = gforcefunc(drivevec)

                    gfft[resp] =  np.fft.rfft(gforcet)[ginds]

                if len(temp_gdat[ax1pos][ax2pos][1][lambind]):
                    yukfft[resp] = temp_gdat[ax1pos][ax2pos][1][lambind][resp]
                else:
                    yukforcevec = yukfuncs[resp][lambind](pts*1e-6)
                    yukforcefunc = interp.interp1d(posvec, yukforcevec)
                    yukforcet = yukforcefunc(drivevec)

                    yukfft[resp] = np.fft.rfft(yukforcet)[ginds]

            gfft = np.array(gfft)
            yukfft = np.array(yukfft)

            temp_gdat[ax1pos][ax2pos][0] = gfft
            temp_gdat[ax1pos][ax2pos][1][lambind] = yukfft

            outdat = (yuklambda, datffts, diagdatffts, daterrs, diagdaterrs, gfft, yukfft)

            fildat[cantbias][ax1pos][ax2pos][cfind].append(outdat)

        stop = time.time()
        #print 'func eval time: ', stop-start

    return fildat
def get_alpha_lambda(fildat, diag=True, ignoreX=False, ignoreY=False, ignoreZ=False, \
                     plot=True, save=False, savepath='', confidence_level=0.95, \
                     only_closest=False, ax1='x', ax2='z', lamb_range=(1e-9, 1e-2)):
    '''Loops over a list of file names, loads each file, diagonalizes,
       then performs an optimal filter using the cantilever drive and 
       a theoretical force vs position to generate the filter/template.
       The result of the optimal filtering is stored, and the data 
       released from memory

       INPUTS: fildat

       OUTPUTS: 
    '''

    # For the confidence interval, compute the inverse CDF of a 
    # chi^2 distribution at given confidence level and compare to 
    # liklihood ratio via a goodness of fit parameter.
    # Refer to scipy.stats documentation to understand chi2
    chi2dist = stats.chi2(1)
    # factor of 0.5 from Wilks's theorem: -2 log (Liklihood) ~ chi^2(1)
    con_val = 0.5 * chi2dist.ppf(confidence_level)

    colors = bu.get_color_map(len(lambdas))

    alphas = np.zeros_like(lambdas)
    diagalphas = np.zeros_like(lambdas)
    testalphas = np.linspace(-10**10, 10**10, 11)

    minalphas = [[]] * len(lambdas)

    biasvec = list(fildat.keys())
    biasvec.sort()
    ax1posvec = list(fildat[biasvec[0]].keys())
    ax1posvec.sort()
    ax2posvec = list(fildat[biasvec[0]][ax1posvec[0]].keys())
    ax2posvec.sort()

    if only_closest:
        if ax1 == 'x' and ax2 == 'z':
            seps = minsep + (maxthrow - np.array(ax1posvec))
            heights = np.array(ax2posvec) - beadheight
            sind = np.argmin(seps)
            hind = np.argmin(np.abs(heights - beadheight))
            ax1posvec = [ax1posvec[sind]]
            ax2posvec = [ax2posvec[hind]]

        elif ax1 =='z' and ax2 == 'x':
            seps = minsep + (maxthrow - np.array(ax2posvec))
            heights = np.array(ax1pos) - beadheight
            sind = np.argmin(seps)
            hind = np.argmin(np.abs(heights - beadheight))
            ax1posvec = [ax1posvec[hind]]
            ax2posvec = [ax2posvec[sind]]
        

    newlamb = lambdas[(lambdas > lamb_range[0]) * (lambdas < lamb_range[-1])]
    tot_iterations = len(biasvec) * len(ax1posvec) * len(ax2posvec) * \
                         len(newlamb) * len(testalphas) + 1
    i = -1

    # To test chi2 fit against "fake" data, uncomment these lines
    rands = np.random.randn(*fildat[biasvec[0]][ax1posvec[0]][ax2posvec[0]][0][0][1].shape)
    rands2 = np.random.randn(*fildat[biasvec[0]][ax1posvec[0]][ax2posvec[0]][0][0][1].shape)

    for lambind, yuklambda in enumerate(lambdas):
        #if lambind != 48:
        #    continue

        if (yuklambda < lamb_range[0]) or (yuklambda > lamb_range[1]):
            continue

        test = fildat[biasvec[0]][ax1posvec[0]][ax2posvec[0]][0][lambind]
        test_yukdat = test[-1]
        test_dat = test[1]

        newalpha = 1e-4 * np.sqrt(np.mean(np.abs(test_dat) / np.abs(test_yukdat)))
        testalphas = np.linspace(-1.0*newalpha, newalpha, 21)

        chi_sqs = np.zeros(len(testalphas))
        diagchi_sqs = np.zeros(len(testalphas))

        for alphaind, testalpha in enumerate(testalphas):
            N = 0
            chi_sq = 0
            diagchi_sq = 0

            for bias, ax1pos, ax2pos in itertools.product(biasvec, ax1posvec, ax2posvec):
                i += 1
                bu.progress_bar(i, tot_iterations, suffix=' Fitting the Data for Chi^2')

                for fil_ind in range(len(fildat[bias][ax1pos][ax2pos])):
                    dat = fildat[bias][ax1pos][ax2pos][fil_ind][lambind]
                    assert dat[0] == yuklambda
                    _, datfft, diagdatfft, daterr, diagdaterr, gfft, yukfft = dat

                    # To test chi2 fit against "fake" data, uncomment these lines
                    #datfft = yukfft * -0.5e9
                    #datfft += (1.0 / np.sqrt(2)) * daterr * rands + \
                    #          (1.0 / np.sqrt(2)) * daterr * rands2 * 1.0j
                    #gfft = np.zeros_like(datfft)

                    for resp in [0,1,2]:
                        if (ignoreX and resp == 0) or \
                           (ignoreY and resp == 1) or \
                           (ignoreZ and resp == 2):
                            print(ignoreX, ignoreY, ignoreZ, resp)
                            continue
                        re_diff = datfft[resp].real - \
                                  (gfft[resp].real + testalpha * yukfft[resp].real )
                        im_diff = datfft[resp].imag - \
                                  (gfft[resp].imag + testalpha * yukfft[resp].imag )
                        if diag:
                            diag_re_diff = diagdatfft[resp].real - \
                                           (gfft[resp].real + testalpha * yukfft[resp].real )
                            diag_im_diff = diagdatfft[resp].imag - \
                                           (gfft[resp].imag + testalpha * yukfft[resp].imag )

                        #plt.plot(np.abs(re_diff))
                        #plt.plot(daterr[resp])
                        #plt.show()

                        chi_sq += ( np.sum( np.abs(re_diff)**2 / (0.5*daterr[resp]**2) ) + \
                                  np.sum( np.abs(im_diff)**2 / (0.5*daterr[resp]**2) ) )
                        if diag:
                            diagchi_sq += ( np.sum( np.abs(diag_re_diff)**2 / \
                                                    (0.5*diagdaterr[resp]**2) ) + \
                                            np.sum( np.abs(diag_im_diff)**2 / \
                                                    (0.5*diagdaterr[resp]**2) ) )

                        N += len(re_diff) + len(im_diff)

            chi_sqs[alphaind] = chi_sq / (N - 1)
            if diag:
                diagchi_sqs[alphaind] = diagchi_sq / (N - 1)

        max_chi = np.max(chi_sqs)
        if diag:
            max_diagchi = np.max(diagchi_sqs)

        max_alpha = np.max(testalphas)

        p0 = [max_chi/max_alpha**2, 0, 1]
        if diag:
            diag_p0 = [max_diagchi/max_alpha**2, 0, 1]

        #if lambind == 0:
        #    p0 = [0.15e9, 0, 5]
        #else:
        #    p0 = p0_old

        if plot:
            plt.figure(1)
            plt.plot(testalphas, chi_sqs, color = colors[lambind])
            if diag:
                plt.figure(2)
                plt.plot(testalphas, diagchi_sqs, color = colors[lambind])
    
        try:
            popt, pcov = opti.curve_fit(parabola, testalphas, chi_sqs, \
                                            p0=p0, maxfev=100000)
            if diag:
                diagpopt, diagpcov = opti.curve_fit(parabola, testalphas, diagchi_sqs, \
                                                    p0=diag_p0, maxfev=1000000)
        except:
            print("Couldn't fit")
            popt = [0,0,0]
            popt[2] = np.mean(chi_sqs)

        regular_con_val = con_val + np.min(chi_sqs)
        if diag:
            diag_con_val = con_val + np.min(diagchi_sqs)

        # Select the positive root for the non-diagonalized data
        soln1 = ( -1.0 * popt[1] + np.sqrt( popt[1]**2 - \
                        4 * popt[0] * (popt[2] - regular_con_val)) ) / (2 * popt[0])
        soln2 = ( -1.0 * popt[1] - np.sqrt( popt[1]**2 - \
                        4 * popt[0] * (popt[2] - regular_con_val)) ) / (2 * popt[0])

        if diag:
            diagsoln1 = ( -1.0 * diagpopt[1] + np.sqrt( diagpopt[1]**2 - \
                            4 * diagpopt[0] * (diagpopt[2] - diag_con_val)) ) / (2 * diagpopt[0])
            diagsoln2 = ( -1.0 * diagpopt[1] - np.sqrt( diagpopt[1]**2 - \
                            4 * diagpopt[0] * (diagpopt[2] - diag_con_val)) ) / (2 * diagpopt[0])

        if soln1 > soln2:
            alpha_con = soln1
        else:
            alpha_con = soln2

        if diag:
            if diagsoln1 > diagsoln2:
                diagalpha_con = diagsoln1
            else:
                diagalpha_con = diagsoln2

        alphas[lambind] = alpha_con
        if diag:
            diagalphas[lambind] = alpha_con


    if plot:
        plt.figure(1)
        plt.title('Goodness of Fit for Various Lambda', fontsize=16)
        plt.xlabel('Alpha Parameter [arb]', fontsize=14)
        plt.ylabel('$\chi^2$', fontsize=18)

        if diag:
            plt.figure(2)
            plt.title('Goodness of Fit for Various Lambda - DIAG', fontsize=16)
            plt.xlabel('Alpha Parameter [arb]', fontsize=14)
            plt.ylabel('$\chi^2$', fontsize=18)

        plt.show()

    if not diag:
        diagalphas = np.zeros_like(alphas)

    if save:
        if savepath == '':
            print('No save path given, type full path here')
            savepath = input('path: ')
        
        np.save(savepath, [lambdas, alphas, diagalphas])


    return lambdas, alphas, diagalphas
Exemplo n.º 10
0
    def find_mean_alpha_vs_position(self, ignoreXYZ=(0, 0, 0)):

        print('Finding alpha vs. height/separation...')

        if not self.grav_loaded:
            print("FAILED: Must load thoery data first!")
            return

        alpha_dict = {}
        for bias in list(self.agg_dict.keys()):
            alpha_dict[bias] = {}
            for ax1key in self.ax1vec:
                alpha_dict[bias][ax1key] = {}
                for ax2key in self.ax2vec:
                    alpha_dict[bias][ax1key][ax2key] = []

        i = 0
        totlen = len(list(self.agg_dict.keys())) * len(self.ax1vec) * len(
            self.ax2vec)
        for bias, ax1, ax2 in itertools.product(list(self.agg_dict.keys()),
                                                self.ax1vec, self.ax2vec):
            i += 1
            suff = '%i / %i position combinations' % (i, totlen)
            newline = False
            if i == totlen:
                newline = True

            objs = self.agg_dict[bias][ax1][ax2]

            nfiles = len(objs)
            filfac = 1.0 / float(nfiles)

            xpos = 0.0
            height = 0.0

            ### Initialize average arrays
            drivevec_avg = np.zeros_like(objs[0].rebuild_drive())
            posvec_avg = np.zeros_like(objs[0].posvec)
            datfft_avg = np.zeros_like(objs[0].datffts)
            daterr_avg = np.zeros_like(objs[0].daterrs)
            binned_avg = np.zeros_like(objs[0].binned)
            old_ginds = []

            #average over integrateions at the same position
            for obj in objs:
                xpos += filfac * (self.p0_bead[0] + (80 - obj.ax1pos))
                height += filfac * (obj.ax2pos - self.p0_bead[2])

                if not len(old_ginds):
                    old_ginds = obj.ginds
                np.testing.assert_array_equal(
                    obj.ginds,
                    old_ginds,
                    err_msg='notch filter changes between files...')
                old_ginds = obj.ginds

                drivevec = obj.rebuild_drive()
                drivevec_avg += filfac * drivevec

                posvec_avg += filfac * obj.posvec
                datfft_avg += filfac * obj.datffts
                daterr_avg += filfac * obj.daterrs
                binned_avg += filfac * obj.binned

            full_ones = np.ones_like(drivevec_avg)
            full_pts = np.stack(
                (xpos * full_ones, drivevec_avg, height * full_ones), axis=-1)

            ones = np.ones_like(posvec_avg)
            pts = np.stack((xpos * ones, posvec_avg, height * ones), axis=-1)

            ## Include normal gravity in fit. But why???
            gfft = [[], [], []]
            for resp in [0, 1, 2]:
                if ignoreXYZ[resp]:
                    gfft[resp] = np.zeros(np.sum(old_ginds))
                    continue
                gforcet = self.gfuncs[resp](full_pts * 1.0e-6)
                gfft[resp] = np.fft.rfft(gforcet)[old_ginds]
            gfft = np.array(gfft)

            best_fit_alphas = np.zeros(len(self.lambdas))
            best_fit_errs = np.zeros(len(self.lambdas))

            ## Loop over lambdas and

            for lambind, yuklambda in enumerate(self.lambdas):
                bu.progress_bar(lambind,
                                len(self.lambdas),
                                suffix=suff,
                                newline=newline)

                yukfft = [[], [], []]
                start_yuk2 = time.time()
                for resp in [0, 1, 2]:
                    if ignoreXYZ[resp]:
                        yukfft[resp] = np.zeros(np.sum(old_ginds))
                        continue
                    yukforce = self.yukfuncs[resp][lambind](pts * 1.0e-6)
                    yukforce_func = interp.interp1d(posvec_avg, yukforce)

                    yukforcet = yukforce_func(drivevec_avg)
                    yukfft[resp] = np.fft.rfft(yukforcet)[old_ginds]
                yukfft2 = np.array(yukfft)

                newalpha = 2.0 * np.mean(np.abs(datfft_avg[0])) / np.mean(
                    np.abs(yukfft[0])) * 1.5 * 10**(-1)
                testalphas = np.linspace(-1.0 * newalpha, newalpha, 51)

                chi_sq_dat = get_chi2_vs_param_complex(datfft_avg, daterr_avg,
                                                       ignoreXYZ, yukfft,
                                                       testalphas)
                chi_sqs = chi_sq_dat['red_chi_sqs']
                fit_result = fit_parabola_to_chi2(testalphas, chi_sqs)

                best_fit_alphas[lambind] = fit_result['best_fit_param']
                best_fit_errs[lambind] = fit_result['param95']

            alpha_dict[bias][ax1][ax2] = [best_fit_alphas, best_fit_errs]

        print('Done!')
        self.alpha_dict = alpha_dict
def plot_many_spectra(files, data_axes=[0,1,2], colormap='jet', \
                      sort='time', plot_freqs=(0.0,1000000.0), labels=[]):
    '''Loops over a list of file names, loads each file,
       then plots the amplitude spectral density of any number 
       of data channels

       INPUTS: files, list of files names to extract data
               data_axes, list of pos_data axes to plot

       OUTPUTS: none, plots stuff
    '''



    dfig, daxarr = plt.subplots(len(data_axes),sharex=True,sharey=False, \
                                figsize=(8,8))
    if len(data_axes) == 1:
        daxarr = [daxarr]

    colors = bu.get_color_map(len(files), cmap=colormap)
    #colors = ['C0', 'C1', 'C2']

    if track_feature:
        times = []
        feature_locs = []

    old_per = 0
    print("Processing %i files..." % len(files))
    for fil_ind, fil in enumerate(files):
        print(fil)

        color = colors[fil_ind]

        # Display percent completion
        bu.progress_bar(fil_ind, len(files))

        # Load data
        obj = bu.hsDat(fil, load=True)

        #plt.figure()
        #plt.plot(df.pos_data[0])
        #plt.show()

        fsamp = obj.attribs['fsamp']
        nsamp = obj.attribs['nsamp']
        t = obj.attribs['time']

        freqs = np.fft.rfftfreq(nsamp, d=1.0 / fsamp)

        if waterfall:
            fac = waterfall_fac**fil_ind
        else:
            fac = 1.0

        if not fullNFFT:
            NFFT = userNFFT
        else:
            NFFT = nsamp

        for axind, ax in enumerate(data_axes):
            # if fullNFFT:
            #     NFFT = len(df.pos_data[ax])
            # else:
            #     NFFT = userNFFT

            # asd = np.abs(np.fft.rfft(obj.dat[:,axind]))

            psd, freqs = mlab.psd(obj.dat[:,axind], Fs=obj.attribs['fsamp'], \
                                    NFFT=NFFT, window=window)
            asd = np.sqrt(psd)

            plot_inds = (freqs > plot_freqs[0]) * (freqs < plot_freqs[1])

            if len(labels):
                daxarr[axind].loglog(freqs[plot_inds], asd[plot_inds]*fac, \
                                     label=labels[fil_ind], color=colors[fil_ind])
            else:
                daxarr[axind].loglog(freqs[plot_inds], asd[plot_inds]*fac, \
                                     color=colors[fil_ind])

            daxarr[axind].set_ylabel('$\sqrt{\mathrm{PSD}}$')
            if ax == data_axes[-1]:
                daxarr[axind].set_xlabel('Frequency [Hz]')

    if len(axes_labels):
        for labelind, label in enumerate(axes_labels):
            daxarr[labelind].set_title(label)

    if len(labels):
        daxarr[0].legend(fontsize=10)
    if len(xlim):
        daxarr[0].set_xlim(xlim[0], xlim[1])
    if len(ylim):
        daxarr[0].set_ylim(ylim[0], ylim[1])
    plt.tight_layout()

    if savefig:
        dfig.savefig(fig_savename)

    plt.show()
Exemplo n.º 12
0
    plot_x = np.linspace(all_time_flat[0],
                         all_time_flat[int(np.sum(fit_inds) - 1)], 500)

    last_ind = np.sum(times < exp_fit_end_time) + 1

    time_many = []
    tau_many = []
    tau_upper = []
    tau_lower = []
    for i in range(ndat):
        t_mean = np.mean(all_time[i])
        t_last = np.max(all_time[i]) + 0.25
        if i > last_ind:
            break
        bu.progress_bar(i, last_ind + 1)

        fit_inds = all_time_flat < t_last
        derp_inds = all_time_flat < fit_end_time

        npts = np.sum(fit_inds)
        xdat = all_time_flat[fit_inds]
        ydat = all_freq_flat[fit_inds]
        yerr = all_freq_err_flat[fit_inds]

        #yerr = np.sqrt(ydat)
        #yerr = np.random.randn(npts) * np.std(yerr) + np.mean(yerr)


        def chisquare_1d(f0, tau, fopt):
            resid = ydat - exp_decay(xdat, f0, tau, fopt)
Exemplo n.º 13
0
def plot_spectra_3d(files, ax_to_plot=0, diag=False, colormap='plasma'):
    '''Makes a cool 3d plot since waterfalls/cascaded plots end up kind 
       being f****d up.
    '''

    res_freqs = []
    powers = []

    fig = plt.figure(figsize=(7, 5))
    ax = fig.gca(projection='3d')

    ax.get_proj = lambda: np.dot(Axes3D.get_proj(ax), np.diag([1, 1, 0.6, 1]))

    # fig.suptitle('XYZ Data', fontsize=18)

    files = files

    colors = bu.get_color_map(len(files_to_plot), cmap=colormap)
    i = 0
    #colors = ['C0', 'C1', 'C2']

    print("Processing %i files..." % len(files))
    for fil_ind, fil in enumerate(files):

        # Display percent completion
        bu.progress_bar(fil_ind, len(files))

        # Load data
        df = bu.DataFile()
        if new_trap:
            df.load_new(fil)
        else:
            df.load(fil)

        df.calibrate_stage_position()

        if diag:
            df.diagonalize(maxfreq=lpf, date=tfdate, plot=tf_plot)

        try:
            fac = df.conv_facs[ax_to_plot]  # * (1.0 / 0.12e-12)
        except:
            fac = 1.0

        if fullNFFT:
            NFFT = len(df.pos_data[ax_to_plot])
        else:
            NFFT = userNFFT

        if diag:
            psd, freqs = mlab.psd(df.diag_pos_data[ax_to_plot], Fs=df.fsamp, \
                                    NFFT=NFFT, window=window)
        else:
            psd, freqs = mlab.psd(df.pos_data[ax_to_plot], Fs=df.fsamp, \
                                  NFFT=NFFT, window=window)

        inds = (freqs > ylim[0]) * (freqs < ylim[1]) * (
            np.sqrt(psd) > zlim[0] * fac_for_resfreq)

        freqs = freqs[inds]
        psd = psd[inds]

        norm = bu.fft_norm(df.nsamp, df.fsamp)
        new_freqs = np.fft.rfftfreq(df.nsamp, d=1.0 / df.fsamp)

        xs = np.zeros_like(freqs) + fil_ind

        if fil_ind in files_to_plot:
            popt, pcov = bu.fit_damped_osc_amp(df.pos_data[ax_to_plot], fit_band=[10, 2000], \
                                                fsamp=df.fsamp, plot=False)
            res_freqs.append(popt[1])

            color = colors[i]
            i += 1
            ax.plot(xs, np.log10(freqs), np.log10(np.sqrt(psd)), color=color)

    zlim_actual = (zlim[0] * fac_for_resfreq, zlim[1])

    x = np.arange(len(res_freqs))
    interpfunc = interpolate.UnivariateSpline(x, res_freqs, k=2)

    ax.scatter(x, np.log10(res_freqs), zs=np.log10(zlim_actual[0]), \
                zdir='z', s=25, c=colors, alpha=1)
    ax.plot(x, np.log10(interpfunc(x)), zs=np.log10(zlim_actual[0]), \
            zdir='z', lw=2, color='k', zorder=1)

    # ax.grid()

    if ylim:
        ax.set_ylim(np.log10(ylim[0]), np.log10(ylim[1]))

    if zlim:
        ax.set_zlim(np.log10(zlim_actual[0]), np.log10(zlim_actual[1]))

    ax.set_xticks([])

    ax.set_yticks(np.log10(yticks))
    ax.set_yticklabels(yticks)

    ax.set_zticks(np.log10(zticks))
    ax.set_zticklabels(zticklabels)

    # ax.ticklabel_format(axis='z', style='sci')

    ax.set_xlabel('Closer to Focus $\\rightarrow$', labelpad=0)
    ax.set_ylabel('Frequency [Hz]', labelpad=20)
    ax.set_zlabel('ASD [Arb/$\\sqrt{\\rm Hz}$]', labelpad=15)

    # if xlim:
    #     ax.set_xlim(*xlim)

    # if ylim:
    #     ax.set_ylim(*ylim)

    # if zlim:
    #     ax.set_zlim(*zlim)

    # fig.tight_layout()

    ax.view_init(elev=15, azim=-15)

    fig.tight_layout()

    fig.subplots_adjust(top=1.35, left=-0.07, right=0.95, bottom=-0.05)
    plt.show()
Exemplo n.º 14
0
def plot_many_spectra(files, data_axes=[0,1,2], cant_axes=[], elec_axes=[], other_axes=[], \
                      fb_axes=[], plot_power=False, diag=True, colormap='plasma', \
                      sort='time', file_inds=(0,10000)):
    '''Loops over a list of file names, loads each file, diagonalizes,
       then plots the amplitude spectral density of any number of data
       or cantilever/electrode drive signals

       INPUTS: files, list of files names to extract data
               data_axes, list of pos_data axes to plot
               cant_axes, list of cant_data axes to plot
               elec_axes, list of electrode_data axes to plot
               diag, boolean specifying whether to diagonalize

       OUTPUTS: none, plots stuff
    '''

    if diag:
        dfig, daxarr = plt.subplots(len(data_axes),2,sharex=True,sharey=True, \
                                    figsize=figsize)
    else:
        dfig, daxarr = plt.subplots(len(data_axes),1,sharex=True,sharey=True, \
                                    figsize=figsize)
    dfig.suptitle('XYZ Data', fontsize=18)

    if len(cant_axes):
        cfig, caxarr = plt.subplots(len(data_axes),
                                    1,
                                    sharex=True,
                                    sharey=True)
        if len(cant_axes) == 1:
            caxarr = [caxarr]
        cfig.suptitle('Attractor Data', fontsize=18)
    if len(elec_axes):
        efig, eaxarr = plt.subplots(len(elec_axes),
                                    1,
                                    sharex=True,
                                    sharey=True)
        if len(elec_axes) == 1:
            eaxarr = [eaxarr]
        efig.suptitle('Electrode Data', fontsize=18)
    if len(other_axes):
        ofig, oaxarr = plt.subplots(len(other_axes),
                                    1,
                                    sharex=True,
                                    sharey=True)
        if len(other_axes) == 1:
            oaxarr = [oaxarr]
        ofig.suptitle('Other Data', fontsize=18)
    if len(fb_axes):
        fbfig, fbaxarr = plt.subplots(len(fb_axes),1,sharex=True,sharey=True, \
                                    figsize=figsize)
        if len(fb_axes) == 1:
            fbaxarr = [fbaxarr]
        fbfig.suptitle('Feedback Data', fontsize=18)
    if plot_power:
        pfig, paxarr = plt.subplots(2, 1, sharex=True, figsize=(6, 6))
        pfig.suptitle('Power/Power Feedback Data', fontsize=18)

    kludge_fig, kludge_ax = plt.subplots(1, 1)

    files = files[file_inds[0]:file_inds[1]]
    if step10:
        files = files[::10]
    if invert_order:
        files = files[::-1]

    colors = bu.get_color_map(len(files), cmap=colormap)
    #colors = ['C0', 'C1', 'C2']

    old_per = 0
    print("Processing %i files..." % len(files))
    for fil_ind, fil in enumerate(files):
        color = colors[fil_ind]

        # Display percent completion
        bu.progress_bar(fil_ind, len(files))

        # Load data
        df = bu.DataFile()
        if new_trap:
            df.load_new(fil)
        else:
            df.load(fil)

        if len(other_axes):
            df.load_other_data()

        df.calibrate_stage_position()

        #df.high_pass_filter(fc=1)
        #df.detrend_poly()

        #plt.figure()
        #plt.plot(df.pos_data[0])
        #plt.show()

        if cascade:
            cascade_scale = (cascade_fac)**fil_ind
        else:
            cascade_scale = 1.0

        freqs = np.fft.rfftfreq(len(df.pos_data[0]), d=1.0 / df.fsamp)

        if diag:
            df.diagonalize(maxfreq=lpf, date=tfdate, plot=tf_plot)

        if fil_ind == 0 and len(cant_axes):
            drivepsd = np.abs(np.fft.rfft(df.cant_data[drive_ax]))
            driveind = np.argmax(drivepsd[1:]) + 1
            drive_freq = freqs[driveind]

        for axind, ax in enumerate(data_axes):

            try:
                fac = cascade_scale * df.conv_facs[ax]  # * (1.0 / 0.12e-12)
            except:
                fac = cascade_scale

            if fullNFFT:
                NFFT = len(df.pos_data[ax])
            else:
                NFFT = userNFFT

            psd, freqs = mlab.psd(df.pos_data[ax], Fs=df.fsamp, \
                                  NFFT=NFFT, window=window)

            norm = bu.fft_norm(df.nsamp, df.fsamp)
            new_freqs = np.fft.rfftfreq(df.nsamp, d=1.0 / df.fsamp)
            #fac = 1.0

            kludge_fac = 1.0
            #kludge_fac = 1.0 / np.sqrt(10)
            if diag:
                dpsd, dfreqs = mlab.psd(df.diag_pos_data[ax], Fs=df.fsamp, \
                                        NFFT=NFFT, window=window)
                kludge_ax.loglog(freqs, np.sqrt(dpsd) *kludge_fac, color='C'+str(axind), \
                                    label=posdic[axind])
                kludge_ax.set_ylabel(
                    '$\sqrt{\mathrm{PSD}}$ $[\mathrm{N}/\sqrt{\mathrm{Hz}}]$')
                kludge_ax.set_xlabel('Frequency [Hz]')

                # daxarr[axind,0].loglog(new_freqs, fac*norm*np.abs(np.fft.rfft(df.pos_data[ax]))*kludge_fac, color='k', label='np.fft with manual normalization')
                daxarr[axind, 0].loglog(freqs,
                                        np.sqrt(psd) * fac * kludge_fac,
                                        color=color,
                                        label=df.fname)  #'mlab.psd')
                daxarr[axind, 0].grid(alpha=0.5)
                daxarr[axind, 1].loglog(
                    new_freqs,
                    norm * np.abs(np.fft.rfft(df.diag_pos_data[ax])) *
                    kludge_fac,
                    color='k')
                daxarr[axind, 1].loglog(freqs,
                                        np.sqrt(dpsd) * kludge_fac,
                                        color=color)
                daxarr[axind, 1].grid(alpha=0.5)
                daxarr[axind, 0].set_ylabel(
                    '$\sqrt{\mathrm{PSD}}$ $[\mathrm{N}/\sqrt{\mathrm{Hz}}]$')
                if ax == data_axes[-1]:
                    daxarr[axind, 0].set_xlabel('Frequency [Hz]')
                    daxarr[axind, 1].set_xlabel('Frequency [Hz]')
            else:
                # daxarr[axind].loglog(new_freqs, norm*np.abs(np.fft.rfft(df.pos_data[ax])), color='k', label='np.fft with manual normalization')
                daxarr[axind].loglog(freqs,
                                     np.sqrt(psd) * fac,
                                     color=color,
                                     label=df.fname)  #'mlab.psd')
                daxarr[axind].grid(alpha=0.5)
                daxarr[axind].set_ylabel(
                    '$\\sqrt{\mathrm{PSD}}$ $[\\mathrm{Arb}/\\sqrt{\mathrm{Hz}}]$'
                )
                #daxarr[axind].set_ylabel('$\sqrt{\mathrm{PSD}}$ $[\mathrm{N}/\sqrt{\mathrm{Hz}}]$')

                if ax == data_axes[-1]:
                    daxarr[axind].set_xlabel('Frequency [Hz]')

        if len(fb_axes):
            for axind, ax in enumerate(fb_axes):
                fb_psd, freqs = mlab.psd(df.pos_fb[ax], Fs=df.fsamp, \
                                      NFFT=NFFT, window=window)
                fbaxarr[axind].loglog(freqs,
                                      np.sqrt(fb_psd) * fac,
                                      color=color)
                fbaxarr[axind].set_ylabel('$\\sqrt{\\mathrm{PSD}}$')

        if len(cant_axes):
            for axind, ax in enumerate(cant_axes):
                psd, freqs = mlab.psd(df.cant_data[ax], Fs=df.fsamp, \
                                      NFFT=NFFT, window=window)
                caxarr[axind].loglog(freqs, np.sqrt(psd), color=color)
                caxarr[axind].set_ylabel('$\\sqrt{\\mathrm{PSD}}$')

        if len(elec_axes):
            for axind, ax in enumerate(elec_axes):
                psd, freqs = mlab.psd(df.electrode_data[ax], Fs=df.fsamp, \
                                      NFFT=NFFT, window=window)
                eaxarr[axind].loglog(freqs, np.sqrt(psd), color=color)
                eaxarr[axind].set_ylabel('$\\sqrt{\\mathrm{PSD}}$')

        if len(other_axes):
            for axind, ax in enumerate(other_axes):
                #ax = ax - 3
                psd, freqs = mlab.psd(df.other_data[ax], Fs=df.fsamp, \
                                      NFFT=NFFT, window=window)
                oaxarr[axind].loglog(freqs, np.sqrt(psd), color=color)
                oaxarr[axind].set_ylabel('$\\sqrt{\\mathrm{PSD}}$')

        if plot_power:
            psd, freqs = mlab.psd(df.power, Fs=df.fsamp, \
                                        NFFT=NFFT, window=window)
            psd_fb, freqs_fb = mlab.psd(df.power_fb, Fs=df.fsamp, \
                                        NFFT=NFFT, window=window)
            paxarr[0].loglog(freqs, np.sqrt(psd), color=color)
            paxarr[1].loglog(freqs_fb, np.sqrt(psd_fb), color=color)
            for axind in [0, 1]:
                paxarr[axind].set_ylabel('$\\sqrt{\\mathrm{PSD}}$')

    if filename_labels:
        daxarr[0].legend(fontsize=10)
    if len(fb_axes):
        fbaxarr[0].legend(fontsize=10)

    #daxarr[0].set_xlim(0.5, 25000)

    if diag:
        derp_ax = daxarr[0, 0]
    else:
        derp_ax = daxarr[0]

    # derp_ax.legend(fontsize=10)

    if len(ylim):
        derp_ax.set_ylim(*ylim)
        kludge_ax.set_ylim(*ylim)
    if len(xlim):
        derp_ax.set_xlim(*xlim)
        kludge_ax.set_xlim(1, 500)

    dfig.tight_layout()
    dfig.subplots_adjust(top=0.91)

    kludge_ax.grid()
    kludge_ax.legend()
    kludge_fig.tight_layout()

    if plot_power:
        paxarr[-1].set_xlabel('Frequency [Hz]')
        pfig.tight_layout()
        pfig.subplots_adjust(top=0.91)
    if len(cant_axes):
        caxarr[-1].set_xlabel('Frequency [Hz]')
        cfig.tight_layout()
        cfig.subplots_adjust(top=0.91)
    if len(elec_axes):
        eaxarr[-1].set_xlabel('Frequency [Hz]')
        efig.tight_layout()
        efig.subplots_adjust(top=0.91)
    if len(other_axes):
        oaxarr[-1].set_xlabel('Frequency [Hz]')
        ofig.tight_layout()
        ofig.subplots_adjust(top=0.91)
    if len(fb_axes):
        fbaxarr[-1].set_xlabel('Frequency [Hz]')
        fbfig.tight_layout()
        fbfig.subplots_adjust(top=0.91)

    if savefigs:
        plt.savefig(title_pre + '.png')

        daxarr[0].set_xlim(2000, 25000)
        plt.tight_layout()

        plt.savefig(title_pre + '_zoomhf.png')

        daxarr[0].set_xlim(1, 80)
        plt.tight_layout()

        plt.savefig(title_pre + '_zoomlf.png')

        daxarr[0].set_xlim(0.5, 25000)

    if not savefigs:
        plt.show()
            plt.draw()

            old_mrf = mrf

        time.sleep(ts)

else:

    files = bu.find_all_fnames(dirname)
    files = bu.sort_files_by_timestamp(files)
    nfiles = len(files)

    avg_asd = []

    for fileind, filname in enumerate(files):
        bu.progress_bar(fileind, nfiles)

        df = bu.DataFile()
        df.load(filname)

        df.diagonalize(plot=False)

        drive = df.electrode_data[elec_ind]
        resp = df.pos_data[pos_ind]
        diag_resp = df.diag_pos_data[pos_ind]

        normfac = bu.fft_norm(df.nsamp, df.fsamp)

        if len(resp) != len(drive):
            continue
def check_backscatter(files,
                      colormap='jet',
                      sort='time',
                      file_inds=(0, 10000)):
    '''Loops over a list of file names, loads each file, diagonalizes,
       then plots the amplitude spectral density of any number of data
       or cantilever/electrode drive signals

       INPUTS: files, list of files names to extract data

       OUTPUTS: none, plots stuff
    '''

    files = [(os.stat(path), path) for path in files]
    files = [(stat.st_ctime, path) for stat, path in files]
    files.sort(key=lambda x: (x[0]))
    files = [obj[1] for obj in files]

    files = files[file_inds[0]:file_inds[1]]
    #files = files[::10]

    date = files[0].split('/')[2]

    nfiles = len(files)

    amps = []

    print("Processing %i files..." % nfiles)
    for fil_ind, fil in enumerate(files):

        bu.progress_bar(fil_ind, nfiles)

        # Load data
        df = bu.DataFile()
        try:
            df.load(fil)
        except:
            continue

        df.calibrate_stage_position()
        df.calibrate_phase()

        dz_dphi = (1064e-9 / 2.0) / (2.0 * np.pi)

        dat1 = df.zcal * dz_dphi * 1e6
        dat2 = df.cant_data[2]

        freqs = np.fft.rfftfreq(df.nsamp, d=1.0 / df.fsamp)
        fft = np.fft.rfft(dat1)
        fft_fb = np.fft.rfft(df.pos_fb[2])

        #plt.loglog(freqs, np.abs(fft))
        #plt.loglog(freqs, np.abs(fft_fb))
        #plt.show()

        times = (df.daqmx_time - df.daqmx_time[0]) * 1e-9

        plt.plot(times,
                 dat1 - np.mean(dat1),
                 label='Phase Measurement, Naive Calibration')
        plt.plot(times,
                 dat2 - np.mean(dat2),
                 label='Cantilever Monitor',
                 ls='--')
        plt.xlabel('Time [s]', fontsize=14)
        plt.ylabel('Amplitude [$\mu$m]', fontsize=14)
        plt.legend(loc=1)
        plt.tight_layout()
        plt.show()
    def analyze_background(self, data_axes=[0,1,2], lpf=2500, \
                           diag=False, colormap='jet', \
                           file_inds=(0,10000), unwrap=False, \
                           harms_to_track = [1, 2, 3], \
                           ext_cant_drive=False, ext_cant_ind=0, \
                           plot_first_drive=False, sub_cant_phase=True, \
                           progstr=''):
        '''Loops over a list of file names, loads each file, diagonalizes,
           then plots the amplitude spectral density of any number of data
           or cantilever/electrode drive signals

           INPUTS: files, list of files names to extract data
                   data_axes, list of pos_data axes to plot
                   ax_labs, dict with labels for plotted axes
                   diag, bool specifying whether to diagonalize
                   unwrap, bool to unwrap phase of background
                   harms, harmonics to label in ASD

           OUTPUTS: none, generates class attributes
        '''

        files = bu.sort_files_by_timestamp(self.relevant_files)
        files = files[file_inds[0]:file_inds[1]]

        nfreq = len(harms_to_track)
        nax = len(data_axes)
        nfiles = len(files)

        colors = bu.get_color_map(nfiles, cmap=colormap)

        avg_asd = [[]] * nax
        diag_avg_asd = [[]] * nax
        Nasds = [[]] * nax

        amps = np.zeros((nax, nfreq, nfiles))
        amp_errs = np.zeros((nax, nfreq, nfiles))
        phases = np.zeros((nax, nfreq, nfiles))
        phase_errs = np.zeros((nax, nfreq, nfiles))

        temps = np.zeros((2, nfiles))
        times = np.zeros(nfiles)

        print("Processing %i files..." % nfiles)
        for fil_ind, fil in enumerate(files):
            color = colors[fil_ind]

            # Display percent completion
            bu.progress_bar(fil_ind, nfiles, suffix=progstr)

            # Load data
            df = bu.DataFile()
            df.load(fil)

            try:
                temps[0, fil_ind] = df.temps[0]
                temps[1, fil_ind] = df.temps[1]
            except:
                temps[:, fil_ind] = 0.0

            if fil_ind == 0:
                self.fsamp = df.fsamp
                init_time = df.time
                times[0] = 0.0
            else:
                times[fil_ind] = (df.time - init_time).total_seconds()

            df.calibrate_stage_position()

            #df.high_pass_filter(fc=1)
            #df.detrend_poly()

            df.diagonalize(maxfreq=lpf, interpolate=False)

            Nsamp = len(df.pos_data[0])

            if len(harms_to_track):
                harms = harms_to_track
            else:
                harms = [1]

            ginds, driveind, drive_freq, drive_ax = \
                        df.get_boolean_cantfilt(ext_cant_drive=ext_cant_drive, \
                                                ext_cant_ind=ext_cant_ind, \
                                                nharmonics=10, harms=harms)

            if fil_ind == 0:
                if plot_first_drive:
                    df.plot_cant_asd(drive_ax)
                freqs = np.fft.rfftfreq(Nsamp, d=1.0 / df.fsamp)
                bin_sp = freqs[1] - freqs[0]

            datfft, diagdatfft, daterr, diagdaterr = \
                         df.get_datffts_and_errs(ginds, drive_freq, plot=False)

            harm_freqs = freqs[ginds]
            for axind, ax in enumerate(data_axes):
                print(ax, df.conv_facs[ax])

                asd = np.abs( np.fft.rfft(df.pos_data[ax]) ) * \
                      bu.fft_norm(Nsamp, df.fsamp) * df.conv_facs[ax]
                diag_asd = np.abs( np.fft.rfft(df.diag_pos_data[ax]) ) * \
                          bu.fft_norm(Nsamp, df.fsamp)

                if not len(avg_asd[axind]):
                    avg_asd[axind] = asd
                    diag_avg_asd[axind] = diag_asd
                    Nasds[axind] = 1
                else:
                    avg_asd[axind] += asd
                    diag_avg_asd[axind] += diag_asd
                    Nasds[axind] += 1

                for freqind, freq in enumerate(harm_freqs):
                    phase = np.angle(datfft[axind][freqind])
                    if sub_cant_phase:
                        cantfft = np.fft.rfft(df.cant_data[drive_ax])
                        cantphase = np.angle(cantfft[driveind])
                        phases[axind][freqind][fil_ind] = phase - cantphase
                    else:
                        phases[axind][freqind][fil_ind] = phase

                    sig_re = daterr[axind][freqind] / np.sqrt(2)
                    sig_im = np.copy(sig_re)

                    im = np.imag(datfft[axind][freqind])
                    re = np.real(datfft[axind][freqind])

                    phase_var = np.mean((im**2 * sig_re**2 + re**2 * sig_im**2) / \
                                        (re**2 + im**2)**2)
                    phase_errs[axind][freqind][fil_ind] = np.sqrt(phase_var)

                    amps[axind][freqind][fil_ind] = np.abs(datfft[axind][freqind] * \
                                                           np.sqrt(bin_sp) * \
                                                           bu.fft_norm(Nsamp, df.fsamp))
                    amp_errs[axind][freqind][fil_ind] = daterr[axind][freqind] * \
                                                        np.sqrt(bin_sp) * \
                                                        bu.fft_norm(Nsamp, df.fsamp)

        for axind, ax in enumerate(data_axes):
            avg_asd[axind] *= (1.0 / Nasds[axind])
            diag_avg_asd[axind] *= (1.0 / Nasds[axind])

        self.freqs = freqs
        self.ginds = ginds
        self.avg_asd = avg_asd
        self.diag_avg_asd = diag_avg_asd

        self.amps = amps
        self.phases = phases
        self.amp_errs = amp_errs
        self.phase_errs = phase_errs

        self.temps = temps
        self.times = times
Exemplo n.º 18
0
def weigh_bead(files,
               pcol=0,
               colormap='plasma',
               sort='time',
               file_inds=(0, 10000)):
    '''Loops over a list of file names, loads each file, diagonalizes,
       then plots the amplitude spectral density of any number of data
       or cantilever/electrode drive signals

       INPUTS: files, list of files names to extract data
               data_axes, list of pos_data axes to plot
               cant_axes, list of cant_data axes to plot
               elec_axes, list of electrode_data axes to plot
               diag, boolean specifying whether to diagonalize

       OUTPUTS: none, plots stuff
    '''

    files = [(os.stat(path), path) for path in files]
    files = [(stat.st_ctime, path) for stat, path in files]
    files.sort(key=lambda x: (x[0]))
    files = [obj[1] for obj in files]

    files = files[file_inds[0]:file_inds[1]]
    if step10:
        files = files[::10]
    if invert_order:
        files = files[::-1]

    date = re.search(r"\d{8,}", files[0])[0]
    charge_dat = np.load(
        open('/calibrations/charges/' + date + '.charge', 'rb'))
    q_bead = -1.0 * charge_dat[0] * constants.elementary_charge
    # q_bead = -25.0 * 1.602e-19

    nfiles = len(files)
    colors = bu.get_color_map(nfiles, cmap=colormap)

    avg_fft = []

    print("Processing %i files..." % nfiles)
    for fil_ind, fil in enumerate(files):
        color = colors[fil_ind]

        bu.progress_bar(fil_ind, nfiles)

        # Load data
        df = bu.DataFile()
        df.load(fil)

        df.calibrate_stage_position()

        df.calibrate_phase()

        #plt.hist( df.zcal / df.phase[4] )
        #plt.show()

        #print np.mean(df.zcal / df.phase[4]), np.std(df.zcal / df.phase[4])

        freqs = np.fft.rfftfreq(df.nsamp, d=1.0 / df.fsamp)
        fft = np.fft.rfft(df.zcal) * bu.fft_norm(df.nsamp, df.fsamp) \
              * np.sqrt(freqs[1] - freqs[0])
        fft2 = np.fft.rfft(df.phase[4]) * bu.fft_norm(df.nsamp, df.fsamp) \
              * np.sqrt(freqs[1] - freqs[0])

        fftd = np.fft.rfft(df.zcal - np.pi*df.phase[4]) * bu.fft_norm(df.nsamp, df.fsamp) \
               * np.sqrt(freqs[1] - freqs[0])

        #plt.plot(np.pi * df.phase[4])
        #plt.plot(df.zcal)

        #plt.figure()
        #plt.loglog(freqs, np.abs(fft))
        #plt.loglog(freqs, np.pi * np.abs(fft2))
        #plt.loglog(freqs, np.abs(fftd))
        #plt.show()

        drive_fft = np.fft.rfft(df.electrode_data[1])

        #plt.figure()
        #plt.loglog(freqs, np.abs(drive_fft))
        #plt.show()

        inds = np.abs(drive_fft) > 1e4
        inds *= (freqs > 2.0) * (freqs < 300.0)
        inds = np.arange(len(inds))[inds]

        ninds = inds + 5

        drive_amp = np.abs( drive_fft[inds][0] * bu.fft_norm(df.nsamp, df.fsamp) \
                            * np.sqrt(freqs[1] - freqs[0]) )

        if not len(avg_fft):
            avg_fft = fft
            avg_drive_fft = drive_fft

            ratio = fft[inds] / drive_fft[inds]
        else:
            avg_fft += fft
            avg_drive_fft += drive_fft

            ratio += fft[inds] / drive_fft[inds]

    fac = bu.fft_norm(df.nsamp, df.fsamp) * np.sqrt(freqs[1] - freqs[0])

    avg_fft *= (1.0 / nfiles)
    avg_drive_fft *= (1.0 / nfiles)

    resp = fft[inds] * (1064.0e-9 / 2.0) * (1.0 / (2.0 * np.pi))
    noise = fft[ninds] * (1064.0e-9 / 2.0) * (1.0 / (2.0 * np.pi))

    drive_noise = np.abs(np.median(avg_drive_fft[ninds] * fac))

    #plt.loglog(freqs[inds], np.abs(resp))
    #plt.loglog(freqs[ninds], np.abs(noise))
    #plt.show()

    resp_sc = resp * 1e9  # put resp in units of nm
    noise_sc = noise * 1e9

    def amp_sc(f, d_accel, f0, g):
        return np.abs(harmonic_osc(f, d_accel, f0, g)) * 1e9

    def phase_sc(f, d_accel, f0, g):
        return np.angle(harmonic_osc(f, d_accel, f0, g))

    #plt.loglog(freqs[inds], np.abs(resp_sc))
    #plt.loglog(freqs[inds], np.abs(harmonic_osc(freqs[inds], 1e-3, 160, 75e1))*1e9)
    #plt.show()

    #plt.loglog(freqs[inds], np.abs(resp_sc))
    #plt.loglog(freqs, amp_sc(freqs, 1e-3, 160, 750))
    #plt.show()

    popt, pcov = opti.curve_fit(amp_sc, freqs[inds], np.abs(resp_sc), sigma=np.abs(noise_sc), \
                                absolute_sigma=True, p0=[1e-3, 160, 750], maxfev=10000)
    #popt2, pcov2 = opti.curve_fit(phase_sc, freqs[inds], np.angle(resp_sc), p0=[1e-3, 160, 750])

    print(popt)
    print(pcov)

    plt.figure()
    plt.errorbar(freqs[inds],
                 np.abs(resp),
                 np.abs(noise),
                 fmt='.',
                 ms=10,
                 lw=2)
    #plt.loglog(freqs[inds], np.abs(noise))
    plt.loglog(freqs, np.abs(harmonic_osc(freqs, *popt)))
    plt.xlabel('Frequency [Hz]', fontsize=16)
    plt.ylabel('Z Amplitude [m]', fontsize=16)

    force = (drive_amp / (4.0e-3)) * q_bead

    mass = np.abs(popt[0]**(-1) * force) * 10**12
    fit_err = np.sqrt(pcov[0, 0] / popt[0])
    charge_err = 0.1
    drive_err = drive_noise / drive_amp

    print(drive_err)

    mass_err = np.sqrt((fit_err)**2 + (charge_err)**2 + (drive_err)**2) * mass

    #print "IMPLIED MASS [ng]: ", mass

    print('%0.3f ng,  %0.2f e^-,  %0.1f V' % (mass, q_bead *
                                              (1.602e-19)**(-1), drive_amp))
    print('%0.6f ng' % (mass_err))
    plt.tight_layout()

    plt.show()
Exemplo n.º 19
0
def weigh_bead(files, colormap='jet', sort='time', file_inds=(0, 10000)):
    '''Loops over a list of file names, loads each file, diagonalizes,
       then plots the amplitude spectral density of any number of data
       or cantilever/electrode drive signals

       INPUTS: files, list of files names to extract data
               data_axes, list of pos_data axes to plot
               cant_axes, list of cant_data axes to plot
               elec_axes, list of electrode_data axes to plot
               diag, boolean specifying whether to diagonalize

       OUTPUTS: none, plots stuff
    '''

    files = [(os.stat(path), path) for path in files]
    files = [(stat.st_ctime, path) for stat, path in files]
    files.sort(key=lambda x: (x[0]))
    files = [obj[1] for obj in files]

    #files = files[file_inds[0]:file_inds[1]]
    #files = [files[0], files[-1]]
    #files = files[::10]

    date = files[0].split('/')[2]
    charge_dat = np.load(
        open('/calibrations/charges/' + date + '.charge', 'rb'))
    #q_bead = -1.0 * charge_dat[0] * 1.602e-19
    q_bead = 25.0 * 1.602e-19

    nfiles = len(files)
    colors = bu.get_color_map(nfiles, cmap=colormap)

    avg_fft = []

    mass_arr = []
    times = []

    q_arr = []

    print("Processing %i files..." % nfiles)
    for fil_ind, fil in enumerate(files):

        date = fil.split('/')[2]
        charge_dat = np.load(
            open('/calibrations/charges/' + date + '.charge', 'rb'))
        q_bead = -1.0 * charge_dat[0] * 1.602e-19

        color = colors[fil_ind]

        bu.progress_bar(fil_ind, nfiles)

        # Load data
        df = bu.DataFile()
        try:
            df.load(fil)
        except:
            continue

        df.calibrate_stage_position()
        df.calibrate_phase()
        #df.diagonalize()

        if fil_ind == 0:
            init_phi = np.mean(df.zcal)

        #plt.hist( df.zcal / df.phase[4] )
        #plt.show()

        #print np.mean(df.zcal / df.phase[4]), np.std(df.zcal / df.phase[4])

        freqs = np.fft.rfftfreq(df.nsamp, d=1.0 / df.fsamp)
        fac = bu.fft_norm(df.nsamp, df.fsamp) * np.sqrt(freqs[1] - freqs[0])

        fft = np.fft.rfft(df.zcal) * fac
        fft2 = np.fft.rfft(df.phase[4]) * fac

        fftd = np.fft.rfft(df.zcal - np.pi * df.phase[4]) * fac

        #plt.plot(np.pi * df.phase[4])
        #plt.plot((df.zcal-np.mean(df.zcal))*(0.532 / (2*np.pi)))

        #plt.figure()
        #plt.loglog(freqs, np.abs(fft))
        #plt.loglog(freqs, np.pi * np.abs(fft2))
        #plt.loglog(freqs, np.abs(fftd))
        #plt.show()

        drive_fft = np.fft.rfft(df.electrode_data[1])

        #plt.figure()
        #plt.loglog(freqs, np.abs(drive_fft))
        #plt.show()

        inds = np.abs(drive_fft) > 1e4
        inds *= (freqs > 2.0) * (freqs < 300.0)
        inds = np.arange(len(inds))[inds]

        ninds = inds + 5

        drive_amp = np.abs(drive_fft[inds][0] * fac)

        resp = fft[inds] * (1064.0e-9 / 2.0) * (1.0 / (2.0 * np.pi))
        noise = fft[ninds] * (1064.0e-9 / 2.0) * (1.0 / (2.0 * np.pi))

        drive_noise = np.abs(np.median(drive_fft[ninds] * fac))

        #plt.loglog(freqs[inds], np.abs(resp))
        #plt.loglog(freqs[ninds], np.abs(noise))
        #plt.show()

        resp_sc = resp * 1e9  # put resp in units of nm
        noise_sc = noise * 1e9

        def amp_sc(f, d_accel, f0, g):
            return np.abs(harmonic_osc(f, d_accel, f0, g)) * 1e9

        def phase_sc(f, d_accel, f0, g):
            return np.angle(harmonic_osc(f, d_accel, f0, g))

        popt, pcov = opti.curve_fit(amp_sc, freqs[inds], np.abs(resp_sc), \
                                    sigma=np.abs(noise_sc), absolute_sigma=True,
                                    p0=[1e-3, 160, 750], maxfev=10000)

        #plt.figure()
        #plt.errorbar(freqs[inds], np.abs(resp), np.abs(noise), fmt='.', ms=10, lw=2)
        #plt.loglog(freqs[inds], np.abs(noise))
        #plt.loglog(freqs, np.abs(harmonic_osc(freqs, *popt)))
        #plt.xlabel('Frequency [Hz]', fontsize=16)
        #plt.ylabel('Z Amplitude [m]', fontsize=16)
        #plt.show()

        if fil_ind == 0:
            q_bead = 25.0 * 1.602e-19
            resps = [resp]
            N = 1
        elif fil_ind < 100:
            q_bead = 25.0 * 1.602e-19
            resps.append(resp)
        else:
            mean_resp = np.mean(np.array(resps), axis=0)
            inner_prod = np.abs(np.vdot(resp, mean_resp))

            proj = inner_prod / np.abs(np.vdot(mean_resp, mean_resp))

            q_bead = (proj * 25.0) * 1.602e-19

        q_arr.append(q_bead / (1.602e-19))

        force = (drive_amp / (4.0e-3)) * q_bead

        mass = np.abs(popt[0]**(-1) * force) * 10**12  # in ng

        #if mass > 0.2:
        #    continue

        #print mass
        #print df.xy_tf_res_freqs

        if fil_ind == 0:
            delta_phi = [0.0]
        else:
            delta_phi.append(np.mean(df.zcal) - init_phi)

        mass_arr.append(mass)
        times.append(df.time)

        #fit_err = np.sqrt(pcov[0,0] / popt[0])
        #charge_err = 0.1
        #drive_err = drive_noise / drive_amp
        #mass_err = np.sqrt( (fit_err)**2 + (charge_err)**2 + (drive_err)**2  ) * mass

    plt.plot((times - times[0]) * 1e-9, q_arr)
    plt.grid(axis='y')
    plt.xlabel('Time')
    plt.ylabel('Charge [e]')

    err_bars = 0.002 * np.ones(len(delta_phi))

    fig, axarr = plt.subplots(2, 1, sharey=True)
    #plt.plot((times - times[0])*1e-9, mass_arr)
    axarr[0].errorbar((times - times[0]) * 1e-9,
                      mass_arr,
                      err_bars,
                      fmt='-o',
                      markersize=5)
    axarr[0].set_xlabel('Time [s]', fontsize=14)
    axarr[0].set_ylabel('Measured Mass [ng]', fontsize=14)

    plt.tight_layout()

    plt.figure(2)
    n, bin_edge, patch = plt.hist(mass_arr, bins=20, \
                                  color='w', edgecolor='k', linewidth=2)
    real_bins = bin_edge[:-1] + 0.5 * (bin_edge[1] - bin_edge[0])
    popt, pcov = opti.curve_fit(gauss,
                                real_bins,
                                n,
                                p0=[100, 0.08, 0.01],
                                maxfev=10000)
    lab = r'$\mu=%0.3f~\rm{ng}$, $\sigma=%0.3f~\rm{ng}$' % (popt[1], popt[2])

    test_vals = np.linspace(np.min(mass_arr), np.max(mass_arr), 100)
    plt.plot(test_vals, gauss(test_vals, *popt), color='r', linewidth=2, \
             label=lab)
    plt.legend()
    plt.xlabel('Measured Mass [ng]', fontsize=14)
    plt.ylabel('Arb', fontsize=14)

    plt.tight_layout()

    #plt.figure()
    #plt.scatter(np.array(delta_phi) * (1.0 / (2 * np.pi)) * (1064.0e-9 / 2) * 1e6, mass_arr)
    axarr[1].errorbar(np.array(delta_phi) * (1.0 / (2 * np.pi)) *
                      (1064.0e-9 / 2) * 1e6,
                      mass_arr,
                      err_bars,
                      fmt='o',
                      markersize=5)
    axarr[1].set_xlabel('Mean z-position (arb. offset) [um]', fontsize=14)
    axarr[1].set_ylabel('Measured Mass [ng]', fontsize=14)

    plt.tight_layout()

    plt.show()
def get_force_curve_dictionary(files, ax1='x', ax2='z', fullax1=True, fullax2=True, \
                               ax1val=0, ax2val=0, spacing=1e-6, diag=False):
    '''Loops over a list of file names, loads each file, diagonalizes,
       computes force v position and then closes then discards the 
       raw data to avoid filling memory. Returns the result as a nested
       dictionary with the first level of keys the ax1 positions and the second
       level of keys the ax2 positions

       INPUTS: files, list of files names to extract data
               ax1, first axis in output array
               ax2, second axis in output array
               fullax1, boolean specifying to loop over all values of ax1
               fullax2, boolean specifying to loop over all values of ax2
               ax1val, if not fullax1 -> value to keep
               ax2val, if not fullax2 -> value to keep
               spacing, spacing around ax1val or ax2val to keep
               diag, boolean specifying whether to diagonalize

       OUTPUTS: outdic, ouput dictionary with the following indexing
                        outdic[ax1pos][ax2pos][resp(0,1,2)][bins(0) or dat(1)]
                        ax1pos and ax2pos are dictionary keys, resp and bins/dat
                        are array indices (native python lists)
                diagoutdic, if diag=True second dictionary with diagonalized data
    '''

    if len(files) == 0:
        print("No Files Found!!")
        return

    ### Do inital looping over files to concatenate data at the same
    ### heights and separations
    force_curves = {}
    if diag:
        diag_force_curves = {}
    old_per = 0
    print()
    print(os.path.dirname(files[0]))
    print("Processing %i files" % len(files))
    print("Percent complete: ")
    for fil_ind, fil in enumerate(files):

        bu.progress_bar(fil_ind, len(files))

        # Display percent completion
        #per = int(100. * float(fil_ind) / float(len(files)) )
        #if per > old_per:
        #    print old_per,
        #    sys.stdout.flush()
        #    old_per = per

        # Load data
        df = bu.DataFile()
        df.load(fil)

        df.calibrate_stage_position()

        # Pick out height and separation
        ax1pos = df.stage_settings[ax1 + ' DC']
        ax2pos = df.stage_settings[ax2 + ' DC']

        # If subselection is desired, do that now
        if not fullax1:
            dif1 = np.abs(ax1pos - ax1val)
            if dif1 > spacing:
                continue
        if not fullax2:
            dif2 = np.abs(ax2pos - ax2val)
            if dif2 > spacing:
                continue

        if diag:
            df.diagonalize(maxfreq=lpf)

        df.get_force_v_pos(verbose=False, nbins=nbins, nharmonics=nharmonics, \
                           width=width, fakedrive=fakedrive, fakefreq=fakefreq, fakeamp=fakeamp)

        # Add the current data to the output dictionary
        if ax1pos not in list(force_curves.keys()):
            force_curves[ax1pos] = {}
            if diag:
                diag_force_curves[ax1pos] = {}
        if ax2pos not in list(force_curves[ax1pos].keys()):
            # if height and sep not found, adds them to the directory
            force_curves[ax1pos][ax2pos] = [[], [], []]
            if diag:
                diag_force_curves[ax1pos][ax2pos] = [[], [], []]

            for resp in [0, 1, 2]:
                force_curves[ax1pos][ax2pos][resp] = \
                        [df.binned_data[resp][0], \
                         df.binned_data[resp][1] * df.conv_facs[resp]]
                if diag:
                    diag_force_curves[ax1pos][ax2pos][resp] = \
                           [df.diag_binned_data[resp][0], \
                            df.diag_binned_data[resp][1]]
        else:
            for resp in [0, 1, 2]:
                # if this combination of height and sep have already been recorded,
                # this correctly concatenates and sorts data from multiple files
                old_bins = force_curves[ax1pos][ax2pos][resp][0]
                old_dat = force_curves[ax1pos][ax2pos][resp][1]
                new_bins = np.hstack((old_bins, df.binned_data[resp][0]))
                new_dat = np.hstack(
                    (old_dat, df.binned_data[resp][1] * df.conv_facs[resp]))

                sort_inds = np.argsort(new_bins)

                force_curves[ax1pos][ax2pos][resp] = \
                            [new_bins[sort_inds], new_dat[sort_inds]]

                if diag:
                    old_diag_bins = diag_force_curves[ax1pos][ax2pos][resp][0]
                    old_diag_dat = diag_force_curves[ax1pos][ax2pos][resp][1]
                    new_diag_bins = np.hstack(
                        (old_diag_bins, df.diag_binned_data[resp][0]))
                    new_diag_dat = np.hstack(
                        (old_diag_dat, df.diag_binned_data[resp][1]))

                    diag_sort_inds = np.argsort(new_diag_bins)

                    diag_force_curves[ax1pos][ax2pos][resp] = \
                                [new_diag_bins[diag_sort_inds], new_diag_dat[diag_sort_inds]]

    ax1_keys = list(force_curves.keys())
    ax2_keys = list(force_curves[ax1_keys[0]].keys())

    print()
    print('Averaging files and building standard deviations')
    sys.stdout.flush()

    #max_ax1 = np.max( ax1_keys )
    test_ax1 = 38
    max_ax1 = ax1_keys[np.argmin(np.abs(test_ax1 - np.array(ax1_keys)))]
    ax2pos = ax2_keys[np.argmin(np.abs(ax2_toplot - np.array(ax2_keys)))]

    ax1_keys.sort()
    ax2_keys.sort()

    for ax1_k in ax1_keys:
        for ax2_k in ax2_keys:
            for resp in [0, 1, 2]:

                old_bins = force_curves[ax1_k][ax2_k][resp][0]
                old_dat = force_curves[ax1_k][ax2_k][resp][1]

                new_bins = np.linspace(
                    np.min(old_bins) + 1e-9,
                    np.max(old_bins) - 1e-9, nbins)

                bin_sp = new_bins[1] - new_bins[0]

                int_bins = []
                int_dat = []

                num_files = int(
                    np.sum(np.abs(old_bins - old_bins[0]) <= 0.2 * bin_sp))
                #num_files = 3
                #print num_files

                #for binval in old_bins[::num_files]:
                #    inds = np.abs(old_bins - binval) <= 0.2 * bin_sp
                #    avg_bin = np.mean(old_bins[inds])
                #    if avg_bin not in int_bins:
                #        int_bins.append(avg_bin)
                #        int_dat.append(np.mean(old_dat[inds]))

                #dat_func = interp.interp1d(old_bins, old_dat, kind='cubic', bounds_error=False,\
                #                           fill_value='extrapolate')

                #new_dat = dat_func(new_bins)
                #new_errs = np.zeros_like(new_dat)

                new_dat = np.zeros_like(new_bins)
                new_errs = np.zeros_like(new_bins)
                for binind, binval in enumerate(new_bins):
                    inds = np.abs(old_bins - binval) <= 0.5 * bin_sp
                    new_dat[binind] = np.mean(old_dat[inds])
                    new_errs[binind] = np.std(old_dat[inds])

                if ax1_k == max_ax1:
                    if ax2_k == ax2pos:
                        test_posvec[resp] = old_bins
                        test_posvec_int[resp] = int_bins
                        test_posvec_final[resp] = new_bins
                        test_arr[resp] = old_dat
                        test_arr_int[resp] = int_dat
                        test_arr_final[resp] = new_dat

                force_curves[ax1_k][ax2_k][resp] = [
                    new_bins, new_dat, new_errs
                ]

                if diag:
                    old_diag_bins = diag_force_curves[ax1_k][ax2_k][resp][0]
                    old_diag_dat = diag_force_curves[ax1_k][ax2_k][resp][1]

                    if ax1_k == max_ax1:
                        if ax2_k == ax2pos:
                            diag_test_posvec[resp] = old_diag_bins
                            diag_test_arr[resp] = old_diag_dat


                    new_diag_bins = np.linspace(np.min(old_diag_bins)+1e-9, \
                                                np.max(old_diag_bins)-1e-9, nbins)

                    diag_bin_sp = new_diag_bins[1] - new_diag_bins[0]

                    int_diag_bins = []
                    int_diag_dat = []

                    # num_files = int( np.sum( np.abs(old_diag_bins - old_diag_bins[0]) \
                    #                          <= 0.2 * diag_bin_sp ) )

                    # for binval in old_diag_bins[::num_files]:
                    #     inds = np.abs(old_diag_bins - binval) <= 0.2 * diag_bin_sp
                    #     int_diag_bins.append(np.mean(old_diag_bins[inds]))
                    #     int_diag_dat.append(np.mean(old_diag_dat[inds]))

                    # diag_dat_func = interp.interp1d(int_diag_bins, int_diag_dat, kind='cubic', \
                    #                                bounds_error=False, fill_value='extrapolate')

                    # new_diag_dat = diag_dat_func(new_diag_bins)
                    # new_diag_errs = np.zeros_like(new_diag_dat)

                    # diag_bin_sp = new_diag_bins[1] - new_diag_bins[0]
                    # for binind, binval in enumerate(new_diag_bins):
                    #     diaginds = np.abs( old_diag_bins - binval ) < diag_bin_sp
                    #     new_diag_errs[binind] = np.std( old_diag_dat[diaginds] )

                    new_diag_dat = np.zeros_like(new_diag_bins)
                    new_diag_errs = np.zeros_like(new_diag_bins)
                    for binind, binval in enumerate(new_diag_bins):
                        inds = np.abs(old_diag_bins -
                                      binval) <= 0.5 * diag_bin_sp
                        new_diag_dat[binind] = np.mean(old_diag_dat[inds])
                        new_diag_errs[binind] = np.std(old_diag_dat[inds])

                    diag_force_curves[ax1_k][ax2_k][resp] = \
                                            [new_diag_bins, new_diag_dat, new_diag_errs]

    if diag:
        return force_curves, diag_force_curves
    else:
        return force_curves
    field_amp_errs = np.zeros(n_file)
    field_freqs = np.zeros(n_file)
    field_freq_errs = np.zeros(n_file)
    pressures = np.zeros((n_file, 3))
    pressures_mbar = np.zeros((n_file, 3))

    # Construct simple top-hat filters to pick out only the rotation
    # peak of interest: at frot for the drive and 2*frot for the 
    # polarization rotation signal
    f_rot2 = 2.*f_rot
    finds = np.abs(freqs - f_rot) > bw/2.
    finds2 = np.abs(freqs - f_rot2) > bw/2.

    # Loop over files
    for i, f in enumerate(files[init_file:final_file]):
        bu.progress_bar(i, n_file)
        sys.stdout.flush()
        # Analysis nested in try/except block just in case there
        # is a corrupted file or something
        try:
            # Load data, computer FFTs and plot if requested
            obj = hsDat(f)
            dat_fft = np.fft.rfft(obj.dat[:, data_ax])

            elec_mon = obj.dat[:,drive_ax]
            drive_fft = np.fft.rfft(elec_mon)

            elec_filt = tabor_mon_fac * signal.filtfilt(b2, a2, elec_mon)

            zeros = np.zeros(nsamp)
            voltage = np.array([zeros, zeros, zeros, elec_filt, \
Exemplo n.º 22
0
def simulation(params):
    '''Simulation function taking one argument and returning one object,
       for use with joblib parallelization.'''

    ### Parse the parameters
    rbead, sep, height = params

    ### Build a filename from the parameters
    filename = 'rbead_' + str(rbead)
    filename += '_sep_' + str(sep)
    filename += '_height_' + str(height)
    filename += '.p'
    full_filename = os.path.join(results_path, filename)

    ### Instantiate a dictionary that will be populated with results
    results_dic = {}
    results_dic['order'] = 'Rbead, Sep, Height, Yuklambda'
    results_dic[rbead] = {}
    results_dic[rbead][sep] = {}
    results_dic[rbead][sep][height] = {}

    ### Some timing stuff
    all_start = time.time()
    calc_times = []

    ### A thing that needs to be in every term (POSSIBLE SIGN AMBIGUITY)
    Gterm = 2. * rbead**3

    ### Loop over the long array of bead positions and compute the force from
    ### only the central finger. This can be sampled and added up to build the
    ### force curve from the entire attractor
    Gforcecurves = [[], [], []]
    for ind, ypos in enumerate(beadposvec2):
        beadpos = [sep + rbead, ypos, height]

        ### These are used to compute projections and thus need to maintain sign.
        ### Use the xx2, yy2, and zz2 arrays which are a subselections of the full
        ### attractor covering only a single period of the fingers
        xsep, ysep, zsep = np.meshgrid(beadpos[0] - xx2, \
                                       beadpos[1] - yy2, \
                                       beadpos[2] - zz2, indexing='ij')

        ### Compute the separation between each point mass and the center
        ### of the microsphere
        full_sep = np.sqrt(xsep**2 + ysep**2 + zsep**2)

        ### Refer to a soon-to-exist document expanding on Alex R's
        prefac = -1.0 * ((2. * G * m2 * rhobead * np.pi) / (3. * full_sep**2))

        ### Append the computed values for the force from a single finger
        Gforcecurves[0].append(np.sum(prefac * Gterm * xsep / full_sep))
        Gforcecurves[1].append(np.sum(prefac * Gterm * ysep / full_sep))
        Gforcecurves[2].append(np.sum(prefac * Gterm * zsep / full_sep))

    Gforcecurves = np.array(Gforcecurves)

    ### Build interpolating functions from the long position vector
    ### and the force due to a single period of the fingers
    GX = interp.interp1d(beadposvec2, Gforcecurves[0], kind='cubic')
    GY = interp.interp1d(beadposvec2, Gforcecurves[1], kind='cubic')
    GZ = interp.interp1d(beadposvec2, Gforcecurves[2], kind='cubic')

    ### Loop over the actual array of desired bead positions, and compute the
    ### force from the full attractor at that position
    newGs = np.zeros((3, len(beadposvec)))
    for ind, ypos in enumerate(beadposvec):
        start = time.time()

        ### Compute the contribution from the points external to the
        ### periodicity, if desired
        if include_edge:

            ### sep parameter is assumed to be face to face
            beadpos = [sep + rbead, ypos, height]

            ### These are used to compute projections and thus need to maintain sign
            xsep, ysep, zsep = np.meshgrid(beadpos[0] - xx2, \
                                           beadpos[1] - yy3, \
                                           beadpos[2] - zz2, indexing='ij')
            full_sep = np.sqrt(xsep**2 + ysep**2 + zsep**2)

            prefac = -1.0 * ((2. * G * m3 * rhobead * np.pi) /
                             (3. * full_sep**2))

            newGs[0][ind] += np.sum(prefac * Gterm * xsep / full_sep)
            newGs[1][ind] += np.sum(prefac * Gterm * ysep / full_sep)
            newGs[2][ind] += np.sum(prefac * Gterm * zsep / full_sep)

        ### Find the finger in which we're in front of, and compute an
        ### equivalent position as if we're in front of the center finger
        finger_ind, newypos = find_ind(ypos)

        ### Sample the interpolating functions we built before, with one sample
        ### for each finger, properly displaced
        newGs[0][ind] += np.sum(
            GX(newypos + (finger_inds + finger_ind) * full_period))
        newGs[1][ind] += np.sum(
            GY(newypos + (finger_inds + finger_ind) * full_period))
        newGs[2][ind] += np.sum(
            GZ(newypos + (finger_inds + finger_ind) * full_period))
        stop = time.time()
        calc_times.append(stop - start)

    if verbose:
        print('Computed normal grav.')
        print('Processing Yukawa modifications...')
        sys.stdout.flush()

    ### Loop over the desired values of the Yukawa lambda parameter, simulating
    ### the force for each one
    nlambda = len(lambdas)
    for yukind, yuklambda in enumerate(lambdas):
        if verbose:
            bu.progress_bar(yukind, nlambda)

        ### Refer to the non-existent LaTeX document in ../documents/ to explain this.
        ### It's a term necessary for every position
        func = np.exp(-2. * rbead / yuklambda) * (
            1. + rbead / yuklambda) + rbead / yuklambda - 1.

        ### Loop over the long array of values computing the force from a single finger
        yukforcecurves = [[], [], []]
        for ind, ypos in enumerate(beadposvec2):

            ### sep parameter is assumed to be face to face
            beadpos = [sep + rbead, ypos, height]

            #### These are used to compute projections and thus need to maintain sign
            xsep, ysep, zsep = np.meshgrid(beadpos[0] - xx2, \
                                           beadpos[1] - yy2, \
                                           beadpos[2] - zz2, indexing='ij')

            ### This isn't the full sep this time, because the Yukawa term depends on
            ### the distance between the point mass and the surface of the MS
            s = np.sqrt(xsep**2 + ysep**2 + zsep**2) - rbead

            ### Refer to the non-existent LaTeX document in ../documents/ to explain this.
            ### Two position dependent terms
            prefac = -1.0 * ((2. * G * m2 * rhobead * np.pi) /
                             (3. * (s + rbead)**2))
            yukterm = 3 * yuklambda**2 * (
                s + rbead + yuklambda) * func * np.exp(-s / yuklambda)

            ### Build up the force curve at this point in the bead's position
            yukforcecurves[0].append(
                np.sum(prefac * yukterm * xsep / (s + rbead)))
            yukforcecurves[1].append(
                np.sum(prefac * yukterm * ysep / (s + rbead)))
            yukforcecurves[2].append(
                np.sum(prefac * yukterm * zsep / (s + rbead)))

        yukforcecurves = np.array(yukforcecurves)

        ### Construct interpolating functions for the yukawa modified force term
        ### from only the central finger
        yukX = interp.interp1d(beadposvec2, yukforcecurves[0], kind='cubic')
        yukY = interp.interp1d(beadposvec2, yukforcecurves[1], kind='cubic')
        yukZ = interp.interp1d(beadposvec2, yukforcecurves[2], kind='cubic')

        ### Loop over the actual array of desired bead positions, and compute the
        ### force from the full attractor at that position
        newyuks = np.zeros((3, len(beadposvec)))
        for ind, ypos in enumerate(beadposvec):
            start = time.time()

            ### Compute the contribution from the points external to the
            ### periodicity, if desired
            if include_edge:

                beadpos = [sep + rbead, ypos, height]

                #### These are used to compute projections and thus need to maintain sign
                xsep, ysep, zsep = np.meshgrid(beadpos[0] - xx2, \
                                               beadpos[1] - yy3, \
                                               beadpos[2] - zz2, indexing='ij')

                ### This isn't the full sep this time, because the Yukawa term depends on
                ### the distance between the point mass and the surface of the MS
                s = np.sqrt(xsep**2 + ysep**2 + zsep**2) - rbead

                ### Refer to the non-existent LaTeX document in ../documents/ to explain this.
                ### Two position dependent terms
                prefac = -1.0 * ((2. * G * m3 * rhobead * np.pi) /
                                 (3. * (rbead + s)**2))
                yukterm = 3 * yuklambda**2 * (
                    rbead + s + yuklambda) * func * np.exp(-s / yuklambda)

                newyuks[0][ind] += np.sum(prefac * yukterm * xsep /
                                          (s + rbead))
                newyuks[1][ind] += np.sum(prefac * yukterm * ysep /
                                          (s + rbead))
                newyuks[2][ind] += np.sum(prefac * yukterm * zsep /
                                          (s + rbead))

            ### Find the finger in which we're in front of, and compute an
            ### equivalent position as if we're in front of the center finger
            finger_ind, newypos = find_ind(ypos)

            ### Sample the interpolating functions we built before, with one sample
            ### for each finger, properly displaced
            newyuks[0][ind] += np.sum(
                yukX(newypos + (finger_inds + finger_ind) * full_period))
            newyuks[1][ind] += np.sum(
                yukY(newypos + (finger_inds + finger_ind) * full_period))
            newyuks[2][ind] += np.sum(
                yukZ(newypos + (finger_inds + finger_ind) * full_period))
            stop = time.time()
            calc_times.append(stop - start)

        results_dic[rbead][sep][height][yuklambda] = \
                    (newGs[0], newGs[1], newGs[2], newyuks[0], newyuks[1], newyuks[2])

    all_stop = time.time()

    if verbose:
        print("100% Done!")
        print( 'Mean: {:0.3g} ms, Std.: {:0.3g} ms    per bead-position'\
                .format(np.mean(calc_times)*1e3, np.std(calc_times)*1e3) )
        print()
        print('Total Computation Time: {:0.1f}'.format(all_stop - all_start))

        # input()

    ### Save the results to a unique filename
    results_dic['posvec'] = beadposvec
    results_dic['rhobead'] = rhobead
    results_dic['attractor_params'] = density.attractor_params
    try:
        pickle.dump(results_dic, open(full_filename, 'wb'))
    except:
        print("Save didn't work! : ", full_filename)

    ### Return the file name to avoid building up too much shit when computing
    ### thousands of different parameters with a joblib implementation
    return full_filename
Exemplo n.º 23
0
nstep_files = len(step_cal_files)

# nstep_files = np.min([max_file, len(step_cal_files)])
# Do the step calibration
if not fake_step_cal:
    step_file_objs = []
    step_cal_vec_inphase = []
    step_cal_vec_max = []
    step_cal_vec_userphase = []
    pow_vec = []
    zpos_vec = []
    time_vec = []
    #for fileind, filname in enumerate(step_cal_files[:max_file]):
    print('Processing discharge files...')
    for fileind, filname in enumerate(step_cal_files):
        bu.progress_bar(fileind, nstep_files)
        df = bu.DataFile()
        try:
            if new_trap:
                df.load_new(filname)
                if not df.electrode_settings['driven'][elec_channel_select]:
                    continue
            else:
                df.load(filname, plot_raw_dat=plot_raw_dat)
        except Exception:
            traceback.print_exc()
            continue

        if using_tabor and not new_trap:
            df.load_other_data()
def weigh_bead_efield(files, colormap='jet', sort='time', file_inds=(0,10000), \
                      pos=False):
    '''Loops over a list of file names, loads each file, diagonalizes,
       then plots the amplitude spectral density of any number of data
       or cantilever/electrode drive signals

       INPUTS: files, list of files names to extract data
               data_axes, list of pos_data axes to plot
               cant_axes, list of cant_data axes to plot
               elec_axes, list of electrode_data axes to plot
               diag, boolean specifying whether to diagonalize

       OUTPUTS: none, plots stuff
    '''

    files = [(os.stat(path), path) for path in files]
    files = [(stat.st_ctime, path) for stat, path in files]
    files.sort(key=lambda x: (x[0]))
    files = [obj[1] for obj in files]

    files = files[file_inds[0]:file_inds[1]]
    #files = files[::10]

    date = files[0].split('/')[2]

    charge_file = '/calibrations/charges/' + date
    if pos:
        charge_file += '_recharge.charge'
    else:
        charge_file += '.charge'

    q_bead = np.load(charge_file)[0] * constants.elementary_charge
    print(q_bead / constants.elementary_charge)

    run_index = 0

    masses = []

    nfiles = len(files)
    print("Processing %i files..." % nfiles)

    eforce = []
    power = []

    for fil_ind, fil in enumerate(files):  #files[56*(i):56*(i+1)]):

        bu.progress_bar(fil_ind, nfiles)

        # Load data
        df = bu.DataFile()
        try:
            df.load(fil, load_other=True)
        except:
            continue

        df.calibrate_stage_position()

        df.calibrate_phase()

        if fil_ind == 0:
            init_phi = np.mean(df.zcal)

        top_elec = mon_fac * np.mean(df.other_data[6])
        bot_elec = mon_fac * np.mean(df.other_data[7])

        # Synth plugged in negative so just adding instead of subtracting negative
        Vdiff = V2 + amp_gain * df.synth_settings[0]

        Vdiff = np.mean(df.electrode_data[2]) - np.mean(df.electrode_data[1])

        Vdiff = top_elec - bot_elec

        force = -(Vdiff / (4.0e-3)) * q_bead
        force2 = (top_elec * e_top_func(0.0) +
                  bot_elec * e_bot_func(0.0)) * q_bead

        try:
            mean_fb = np.mean(df.pos_fb[2])
            mean_pow = bits_to_power(mean_fb)
        except:
            continue

        #eforce.append(force)
        eforce.append(force2)
        power.append(mean_pow)

    eforce = np.array(eforce)
    power = np.array(power)

    power = power / np.mean(power)

    inds = np.abs(eforce) < 2e-13
    eforce = eforce[inds]
    power = power[inds]

    popt, pcov = opti.curve_fit(line, eforce*1e13, power, \
                                absolute_sigma=False, maxfev=10000)
    test_vals = np.linspace(np.min(eforce * 1e13), np.max(eforce * 1e13), 100)

    fit = line(test_vals, *popt)

    lev_force = -popt[1] / (popt[0] * 1e13)

    mass = lev_force / (9.806)

    mass_err = np.sqrt( pcov[0,0] / popt[0]**2 + \
                        pcov[1,1] / popt[1]**2 + \
                        np.abs(pcov[0,1]) / np.abs(popt[0]*popt[1]) ) * mass

    #masses.append(mass)

    print(mass * 1e12)
    print(mass_err * 1e12)

    plt.figure()

    plt.plot(eforce, power, 'o')
    plt.xlabel('Elec. Force [N]', fontsize=14)
    plt.ylabel('Levitation Power [arb]', fontsize=14)

    plt.tight_layout()

    plt.plot(test_vals*1e-13, fit, lw=2, color='r', \
             label='Implied mass: %0.3f ng' % (mass*1e12))
    plt.legend()

    plt.show()
    datfiles, lengths = bu.find_all_fnames(cdir, ext=ext)
    nfiles = lengths[0]

    gammas = []
    longdat = []
    nsamp = 0
    lib_freqs.append([])

    lib_calc = np.sqrt(drive_amp * p0 / Ibead) / (2.0 * np.pi)

    for fileind, file in enumerate(datfiles[::-1]):
        if fileind > 5:
            break

        bu.progress_bar(fileind,
                        nfiles,
                        suffix='{:d}/{:d}'.format(i + 1, n_mc))

        if hdf5:
            fobj = h5py.File(file, 'r')
            dat = np.copy(fobj['sim_data'])
            fobj.close()
        else:
            dat = np.load(file)

        tvec = dat[0]
        theta = dat[1]
        phi = dat[2]

        px = p0 * np.cos(phi) * np.sin(theta)
def get_force_curve_dictionary(files, cantind=0, ax1='z', fullax1=True, \
                               ax1val=0,  spacing=1e-6, diag=False, fit_xdat=False, \
                               fit_zdat=False, plottf=False):
    '''Loops over a list of file names, loads each file, diagonalizes,
       computes force v position and then closes then discards the 
       raw data to avoid filling memory. Returns the result as a nested
       dictionary with the first level of keys the cantilever biases and
       the second level of keys the height

       INPUTS: files, list of files names to extract data
               cantind, cantilever electrode index
               ax1, axis with different DC positions, usually the height
               fullax1, boolean specifying to loop over all values of ax1
               ax1val, if not fullax1 -> value to keep

       OUTPUTS: outdic, ouput dictionary with the following indexing
                        outdic[cantbias][ax1pos][resp(0,1,2)][bins(0) or dat(1)]
                        cantbias and ax2pos are dictionary keys, resp and bins/dat
                        are array indices (native python lists)
                diagoutdic, if diag=True second dictionary with diagonalized data
                '''

    force_curves = {}
    if diag:
        diag_force_curves = {}
    old_per = 0
    for fil_ind, fil in enumerate(files):
        # Display percent completion
        bu.progress_bar(fil_ind, len(files))

        # Load data
        df = bu.DataFile()
        df.load(fil)

        df.calibrate_stage_position()

        cantbias = df.electrode_settings['dc_settings'][0]
        ax1pos = df.stage_settings[ax1 + ' DC']

        # If subselection is desired, do that now
        if not fullax1:
            dif1 = np.abs(ax1pos - ax1val)
            if dif1 > spacing:
                continue

        if diag:
            if fil_ind == 0 and plottf:
                df.diagonalize(date=tfdate, maxfreq=tophatf, plot=True)
            else:
                df.diagonalize(date=tfdate, maxfreq=tophatf)

        df.get_force_v_pos(verbose=False, nbins=nbins)

        # Add the current data to the output dictionary
        if cantbias not in list(force_curves.keys()):
            force_curves[cantbias] = {}
            if diag:
                diag_force_curves[cantbias] = {}
        if ax1pos not in list(force_curves[cantbias].keys()):
            # if height and sep not found, adds them to the directory
            force_curves[cantbias][ax1pos] = [[], [], []]
            if diag:
                diag_force_curves[cantbias][ax1pos] = [[], [], []]

            for resp in [0, 1, 2]:
                force_curves[cantbias][ax1pos][resp] = \
                        [df.binned_data[resp][0], \
                         df.binned_data[resp][1] * df.conv_facs[resp]]
                if diag:
                    diag_force_curves[cantbias][ax1pos][resp] = \
                           [df.diag_binned_data[resp][0], \
                            df.diag_binned_data[resp][1]]
        else:
            for resp in [0, 1, 2]:
                # if this combination of height and sep have already been recorded,
                # this correctly concatenates and sorts data from multiple files
                old_bins = force_curves[cantbias][ax1pos][resp][0]
                old_dat = force_curves[cantbias][ax1pos][resp][1]
                new_bins = np.hstack((old_bins, df.binned_data[resp][0]))
                new_dat = np.hstack(
                    (old_dat, df.binned_data[resp][1] * df.conv_facs[resp]))

                sort_inds = np.argsort(new_bins)

                #plt.plot(new_bins[sort_inds], new_dat[sort_inds])
                #plt.show()

                force_curves[cantbias][ax1pos][resp] = \
                            [new_bins[sort_inds], new_dat[sort_inds]]

                if diag:
                    old_diag_bins = diag_force_curves[cantbias][ax1pos][resp][
                        0]
                    old_diag_dat = diag_force_curves[cantbias][ax1pos][resp][1]
                    new_diag_bins = np.hstack(
                        (old_diag_bins, df.diag_binned_data[resp][0]))
                    new_diag_dat = np.hstack(
                        (old_diag_dat, df.diag_binned_data[resp][1]))

                    diag_sort_inds = np.argsort(new_diag_bins)

                    diag_force_curves[cantbias][ax1pos][resp] = \
                                [new_diag_bins[diag_sort_inds], new_diag_dat[diag_sort_inds]]

    cantV_keys = list(force_curves.keys())
    ax1_keys = list(force_curves[cantV_keys[0]].keys())

    print()
    print('Averaging files and building standard deviations')
    sys.stdout.flush()

    if fit_xdat:
        xdat = {'fit': dipole_force}
        diag_xdat = {'fit': dipole_force}
    if fit_zdat:
        zdat = {'fit': dipole_force}
        diag_zdat = {'fit': dipole_force}

    for cantV_k in cantV_keys:
        if fit_zdat:
            if cantV_k not in zdat:
                zdat[cantV_k] = {}
                if diag:
                    diag_zdat[cantV_k] = {}
        if fit_xdat:
            if cantV_k not in xdat:
                xdat[cantV_k] = {}
                if diag:
                    diag_xdat[cantV_k] = {}

        for ax1_k in ax1_keys:
            for resp in [0, 1, 2]:

                old_bins = force_curves[cantV_k][ax1_k][resp][0]
                old_dat = force_curves[cantV_k][ax1_k][resp][1]

                #dat_func = interp.interp1d(old_bins, old_dat, kind='cubic')

                new_bins = np.linspace(
                    np.min(old_bins) + 1e-9,
                    np.max(old_bins) - 1e-9, nbins)
                new_dat = np.zeros_like(new_bins)
                new_errs = np.zeros_like(new_bins)

                bin_sp = new_bins[1] - new_bins[0]
                for binind, binval in enumerate(new_bins):
                    inds = np.abs(old_bins - binval) < bin_sp
                    new_dat[binind] = np.mean(old_dat[inds])
                    new_errs[binind] = np.std(old_dat[inds])

                force_curves[cantV_k][ax1_k][resp] = [
                    new_bins, new_dat, new_errs
                ]

                if fit_xdat and resp == 0:
                    x0 = np.max(new_bins) + closest_sep
                    p0 = [np.max(new_dat) / closest_sep**2, 0, 0]
                    fitfun = lambda x, a, b, c: xdat['fit'](x, a, b, c, x0=x0)
                    popt, pcov = opti.curve_fit(fitfun, new_bins, new_dat)
                    val = fitfun(np.max(new_bins), popt[0], popt[1], 0)

                    #print resp
                    #print fitfun(-200, *popt) - popt[2]
                    #print fitfun(50, *popt) - popt[2]
                    #plt.plot(new_bins, new_dat, label='Dat')
                    #plt.plot(new_bins, fitfun(new_bins, *popt), label='Fit')
                    #plt.legend()
                    #plt.show()

                    xdat[cantV_k][ax1_k] = (popt, val)

                if fit_zdat and resp == 2:
                    x0 = np.max(new_bins) + closest_sep
                    p0 = [np.max(new_dat) / closest_sep**2, 0, 0]
                    fitfun = lambda x, a, b, c: zdat['fit'](x, a, b, c, x0=x0)
                    popt, pcov = opti.curve_fit(fitfun, new_bins, new_dat)
                    val = fitfun(np.max(new_bins), popt[0], popt[1], 0)

                    zdat[cantV_k][ax1_k] = (popt, val)

                if diag:
                    old_diag_bins = diag_force_curves[cantV_k][ax1_k][resp][0]
                    old_diag_dat = diag_force_curves[cantV_k][ax1_k][resp][1]

                    #diag_dat_func = interp.interp1d(old_diag_bins, old_diag_dat, kind='cubic')

                    new_diag_bins = np.linspace(np.min(old_diag_bins)+1e-9, \
                                                np.max(old_diag_bins)-1e-9, nbins)
                    new_diag_dat = np.zeros_like(new_diag_bins)
                    new_diag_errs = np.zeros_like(new_diag_bins)

                    diag_bin_sp = new_diag_bins[1] - new_diag_bins[0]
                    for binind, binval in enumerate(new_diag_bins):
                        diaginds = np.abs(old_diag_bins - binval) < diag_bin_sp
                        new_diag_errs[binind] = np.std(old_diag_dat[diaginds])
                        new_diag_dat[binind] = np.mean(old_diag_dat[diaginds])

                    diag_force_curves[cantV_k][ax1_k][resp] = \
                                        [new_diag_bins, new_diag_dat, new_diag_errs]

                    if fit_xdat and resp == 0:
                        x0 = np.max(new_diag_bins) + closest_sep
                        p0 = [np.max(new_diag_dat) / closest_sep**2, 0, 0]
                        fitfun = lambda x, a, b, c: diag_xdat['fit'](
                            x, a, b, c, x0=x0)
                        popt, pcov = opti.curve_fit(fitfun, new_diag_bins,
                                                    new_diag_dat)
                        val = fitfun(np.max(new_diag_bins), popt[0], popt[1],
                                     0)

                        diag_xdat[cantV_k][ax1_k] = (popt, val)

                    if fit_zdat and resp == 2:
                        x0 = np.max(new_diag_bins) + closest_sep
                        p0 = [np.max(new_diag_dat) / closest_sep**2, 0, 0]
                        fitfun = lambda x, a, b, c: diag_zdat['fit'](
                            x, a, b, c, x0=x0)
                        popt, pcov = opti.curve_fit(fitfun, new_diag_bins,
                                                    new_diag_dat)
                        val = fitfun(np.max(new_diag_bins), popt[0], popt[1],
                                     0)

                        diag_zdat[cantV_k][ax1_k] = (popt, val)

    fits = {}
    if fit_xdat:
        if diag:
            fits['x'] = (xdat, diag_xdat)
        else:
            fits['x'] = (xdat)
    if fit_zdat:
        if diag:
            fits['z'] = (zdat, diag_zdat)
        else:
            fits['z'] = (zdat)

    if diag:
        return force_curves, diag_force_curves, fits
    else:
        return force_curves, fits
Exemplo n.º 27
0
def weigh_bead_efield(files, elec_ind, pow_ind, colormap='plasma', sort='time',\
                      file_inds=(0,10000), plot=True, print_res=False, pos=False, \
                      save_mass=False, new_trap=False, correct_phase_shift=False):
    '''Loops over a list of file names, loads each file, diagonalizes,
       then plots the amplitude spectral density of any number of data
       or cantilever/electrode drive signals

       INPUTS: files, list of files names to extract data
               data_axes, list of pos_data axes to plot
               cant_axes, list of cant_data axes to plot
               elec_axes, list of electrode_data axes to plot
               diag, boolean specifying whether to diagonalize

       OUTPUTS: none, plots stuff
    '''
    date = re.search(r"\d{8,}", files[0])[0]
    suffix = files[0].split('/')[-2]

    if new_trap:
        trap_str = 'new_trap'
    else:
        trap_str = 'old_trap'

    charge_file = '/data/{:s}_processed/calibrations/charges/'.format(
        trap_str) + date
    save_filename = '/data/{:s}_processed/calibrations/masses/'.format(trap_str) \
                            + date + '_' + suffix + '.mass'
    bu.make_all_pardirs(save_filename)

    if pos:
        charge_file += '_recharge.charge'
    else:
        charge_file += '.charge'

    try:
        nq = np.load(charge_file)[0]
        found_charge = True
    except:
        found_charge = False

    if not found_charge or manual_charge:
        user_nq = input('No charge file or manual requested. Guess q: ')
        nq = int(user_nq)

    if correct_phase_shift:
        print('Correcting anomalous phase-shift during analysis.')

    # nq = -16
    print('qbead: {:d} e'.format(int(nq)))
    q_bead = nq * constants.elementary_charge

    run_index = 0

    masses = []

    nfiles = len(files)
    if not print_res:
        print("Processing %i files..." % nfiles)

    all_eforce = []
    all_power = []

    all_param = []

    mass_vec = []

    p_ac = []
    p_dc = []

    e_ac = []
    e_dc = []

    pressure_vec = []

    zamp_avg = 0
    zphase_avg = 0
    zamp_N = 0
    zfb_avg = 0
    zfb_N = 0
    power_avg = 0
    power_N = 0

    Nbad = 0

    powpsd = []

    for fil_ind, fil in enumerate(files):  # 15-65

        # 4
        # if fil_ind == 16 or fil_ind == 4:
        #     continue

        bu.progress_bar(fil_ind, nfiles)

        # Load data
        df = bu.DataFile()
        try:
            if new_trap:
                df.load_new(fil)
            else:
                df.load(fil, load_other=True)
        except Exception:
            traceback.print_exc()
            continue

        try:
            # df.calibrate_stage_position()
            df.calibrate_phase()
        except Exception:
            traceback.print_exc()
            continue

        if ('20181129' in fil) and ('high' in fil):
            pressure_vec.append(1.5)
        else:
            try:
                pressure_vec.append(df.pressures['pirani'])
            except Exception:
                pressure_vec.append(0.0)

        ### Extract electrode data
        if new_trap:
            top_elec = df.electrode_data[1]
            bot_elec = df.electrode_data[2]
        else:
            top_elec = mon_fac * df.other_data[elec_ind]
            bot_elec = mon_fac * df.other_data[elec_ind + 1]

        fac = 1.0
        if np.std(top_elec) < 0.5 * np.std(bot_elec) \
                or np.std(bot_elec) < 0.5 * np.std(top_elec):
            print(
                'Adjusting electric field since only one electrode was digitized.'
            )
            fac = 2.0

        nsamp = len(top_elec)
        zeros = np.zeros(nsamp)

        voltages = [zeros, top_elec, bot_elec, zeros, \
                    zeros, zeros, zeros, zeros]
        efield = bu.trap_efield(voltages, new_trap=new_trap)
        eforce2 = fac * sign * efield[2] * q_bead

        tarr = np.arange(0, df.nsamp / df.fsamp, 1.0 / df.fsamp)

        # fig, axarr = plt.subplots(2,1,sharex=True,figsize=(10,8))

        # axarr[0].plot(tarr, top_elec, label='Top elec.')
        # axarr[0].plot(tarr, bot_elec, label='Bottom elec.')
        # axarr[0].set_ylabel('Apparent Voltages [V]')
        # axarr[0].legend(fontsize=12, loc='upper right')

        # axarr[1].plot(tarr, efield[2])
        # axarr[1].set_xlabel('Time [s]')
        # axarr[1].set_ylabel('Apparent Electric Field [V/m]')

        # fig.tight_layout()

        # plt.show()
        # input()

        freqs = np.fft.rfftfreq(df.nsamp, d=1.0 / df.fsamp)
        drive_ind = np.argmax(np.abs(np.fft.rfft(eforce2)))
        drive_freq = freqs[drive_ind]

        zamp = np.abs( np.fft.rfft(df.zcal) * bu.fft_norm(df.nsamp, df.fsamp) * \
                       np.sqrt(freqs[1] - freqs[0]) )
        zamp *= (1064.0e-9 / 2.0) * (1.0 / (2.9 * np.pi))
        zphase = np.angle(np.fft.rfft(df.zcal))
        zamp_avg += zamp[drive_ind]
        zamp_N += 1

        #plt.loglog(freqs, zamp)
        #plt.scatter(freqs[drive_ind], zamp[drive_ind], s=10, color='r')
        #plt.show()


        zfb = np.abs(np.fft.rfft(df.pos_fb[2]) * bu.fft_norm(df.nsamp, df.fsamp) * \
                      np.sqrt(freqs[1] - freqs[0]) )
        zfb_avg += zfb[drive_ind]
        zfb_N += 1

        #eforce2 = (top_elec * e_top_func(0.0) + bot_elec * e_bot_func(0.0)) * q_bead
        if noise:
            e_dc.append(np.mean(eforce2))
            e_ac_val = np.abs(np.fft.rfft(eforce2))[drive_ind]
            e_ac.append(e_ac_val * bu.fft_norm(df.nsamp, df.fsamp) \
                        * np.sqrt(freqs[1] - freqs[0]) )

        zphase_avg += (zphase[drive_ind] - np.angle(eforce2)[drive_ind])

        if np.sum(df.power) == 0.0:
            current = np.abs(df.other_data[pow_ind]) / trans_gain
        else:
            fac = 1e-6
            current = fac * df.power / trans_gain

        power = current / pd_gain
        power = power / line_filter_trans
        power = power / bs_fac

        power_avg += np.mean(power)
        power_N += 1
        if noise:
            p_dc.append(np.mean(power))
            p_ac_val = np.abs(np.fft.rfft(power))[drive_ind]
            p_ac.append(p_ac_val * bu.fft_norm(df.nsamp, df.fsamp) \
                        * np.sqrt(freqs[1] - freqs[0]) )

        fft1 = np.fft.rfft(power)
        fft2 = np.fft.rfft(df.pos_fb[2])

        if not len(powpsd):
            powpsd = np.abs(fft1)
            Npsd = 1
        else:
            powpsd += np.abs(fft1)
            Npsd += 1

        # freqs = np.fft.rfftfreq(df.nsamp, d=1.0/df.fsamp)
        # plt.loglog(freqs, np.abs(np.fft.rfft(eforce2)))
        # plt.loglog(freqs, np.abs(np.fft.rfft(power)))
        # plt.show()
        # input()

        # fig, axarr = plt.subplots(2,1,sharex=True,figsize=(10,8))

        # axarr[0].plot(tarr, power)
        # axarr[0].set_ylabel('Measured Power [Arb.]')

        # axarr[1].plot(tarr, power)
        # axarr[1].set_xlabel('Time [s]')
        # axarr[1].set_ylabel('Measured Power [Arb.]')

        # bot, top = axarr[1].get_ylim()
        # axarr[1].set_ylim(1.05*bot, 0)

        # fig.tight_layout()

        # plt.show()
        # input()


        bins, dat, errs = bu.spatial_bin(eforce2, power, nbins=200, width=0.0, #width=0.05, \
                                         dt=1.0/df.fsamp, harms=[1], \
                                         add_mean=True, verbose=False, \
                                         correct_phase_shift=correct_phase_shift, \
                                         grad_sign=0)

        dat = dat / np.mean(dat)

        #plt.plot(bins, dat, 'o')
        #plt.show()

        popt, pcov = opti.curve_fit(line, bins*1.0e13, dat, \
                                    absolute_sigma=False, maxfev=10000)
        test_vals = np.linspace(np.min(eforce2 * 1.0e13),
                                np.max(eforce2 * 1.0e13), 100)

        fit = line(test_vals, *popt)

        lev_force = -popt[1] / (popt[0] * 1.0e13)
        mass = lev_force / (9.806)

        #umass = ulev_force / 9.806
        #lmass = llev_force / 9.806

        if mass > upper_outlier or mass < lower_outlier:
            print('Crazy mass: {:0.2f} pg.... ignoring'.format(mass * 1e15))
            # fig, axarr = plt.subplots(3,1,sharex=True)
            # axarr[0].plot(eforce2)
            # axarr[1].plot(power)
            # axarr[2].plot(df.pos_data[2])
            # ylims = axarr[1].get_ylim()
            # axarr[1].set_ylim(ylims[0], 0)
            # plt.show()
            continue

        all_param.append(popt)

        all_eforce.append(bins)
        all_power.append(dat)

        mass_vec.append(mass)

    if noise:
        print('DC power: ', np.mean(p_dc), np.std(p_dc))
        print('AC power: ', np.mean(p_ac), np.std(p_ac))
        print('DC field: ', np.mean(e_dc), np.std(e_dc))
        print('AC field: ', np.mean(e_ac), np.std(e_ac))
        return

    #plt.plot(mass_vec)

    mean_popt = np.mean(all_param, axis=0)

    mean_lev = np.mean(mass_vec) * 9.806
    plot_vec = np.linspace(np.min(all_eforce), mean_lev, 100)

    if plot:
        fig = plt.figure(dpi=200, figsize=(6, 4))
        ax = fig.add_subplot(111)
        ### Plot force (in pN / g = pg) vs power
        plt.plot(np.array(all_eforce).flatten()[::5]*1e15*(1.0/9.806), \
                 np.array(all_power).flatten()[::5], \
                 'o', alpha = 0.5)
        #for params in all_param:
        #    plt.plot(plot_vec, line(plot_vec, params[0]*1e13, params[1]), \
        #             '--', color='r', lw=1, alpha=0.05)
        plt.plot(plot_vec*1e12*(1.0/9.806)*1e3, \
                 line(plot_vec, mean_popt[0]*1e13, mean_popt[1]), \
                 '--', color='k', lw=2, \
                 label='Implied mass: %0.1f pg' % (np.mean(mass_vec)*1e15))
        left, right = ax.get_xlim()
        # ax.set_xlim((left, 500))
        ax.set_xlim(*xlim)

        bot, top = ax.get_ylim()
        ax.set_ylim((0, top))

        plt.legend()
        plt.xlabel('Applied electrostatic force/$g$ (pg)')
        plt.ylabel('Optical power (arb. units)')
        plt.grid()
        plt.tight_layout()
        if save_example:
            fig.savefig(example_filename)
            fig.savefig(example_filename[:-4] + '.pdf')
            fig.savefig(example_filename[:-4] + '.svg')

        x_plotvec = np.array(all_eforce).flatten()
        y_plotvec = np.array(all_power).flatten()

        yresid = (y_plotvec - line(x_plotvec, mean_popt[0] * 1e13,
                                   mean_popt[1])) / y_plotvec

        plt.figure(dpi=200, figsize=(3, 2))
        plt.hist(yresid * 100, bins=30)
        plt.legend()
        plt.xlabel('Resid. Power [%]')
        plt.ylabel('Counts')
        plt.grid()
        plt.tight_layout()

        plt.figure(dpi=200, figsize=(3, 2))
        plt.plot(x_plotvec * 1e15, yresid * 100, 'o')
        plt.legend()
        plt.xlabel('E-Force [pN]')
        plt.ylabel('Resid. Pow. [%]')
        plt.grid()
        plt.tight_layout()

        derpfig = plt.figure(dpi=200, figsize=(3, 2))
        #derpfig.patch.set_alpha(0.0)
        plt.hist(np.array(mass_vec) * 1e15, bins=10)
        plt.xlabel('Mass (pg)')
        plt.ylabel('Count')
        plt.grid()
        #plt.title('Implied Masses, Each from 50s Integration')
        #plt.xlim(0.125, 0.131)
        plt.tight_layout()
        if save_example:
            derpfig.savefig(example_filename[:-4] + '_hist.png')
            derpfig.savefig(example_filename[:-4] + '_hist.pdf')
            derpfig.savefig(example_filename[:-4] + '_hist.svg')

        plt.show()

    final_mass = np.mean(mass_vec)
    final_err_stat = 0.5 * np.std(mass_vec)  #/ np.sqrt(len(mass_vec))
    final_err_sys = np.sqrt((0.015**2 + 0.01**2) * final_mass**2)
    final_pressure = np.mean(pressure_vec)

    if save_mass:
        save_arr = [final_mass, final_err_stat, final_err_sys]
        np.save(open(save_filename, 'wb'), save_arr)

    print('Bad Files: %i / %i' % (Nbad, nfiles))
    if print_res:
        gresid_fac = (2.0 * np.pi * freqs[drive_ind])**2 / 9.8

        print('      mass    [pg]: {:0.1f}'.format(final_mass * 1e15))
        print('      st.err  [pg]: {:0.2f}'.format(final_err_stat * 1e15))
        print('      sys.err [pg]: {:0.2f}'.format(final_err_sys * 1e15))
        print('      qbead    [e]: {:d}'.format(
            int(round(q_bead / constants.elementary_charge))))
        print('      P     [mbar]: {:0.2e}'.format(final_pressure))
        print('      <P>    [arb]: {:0.2e}'.format(power_avg / power_N))
        print('      zresid   [g]: {:0.3e}'.format(
            (zamp_avg / zamp_N) * gresid_fac))
        print('      zphase [rad]: {:0.3e}'.format(zphase_avg / zamp_N))
        print('      zfb    [arb]: {:0.3e}'.format(zfb_avg / zfb_N))
        outarr = [ final_mass*1e15, final_err_stat*1e15, final_err_sys*1e15, \
                   q_bead/constants.elementary_charge, \
                   final_pressure, power_avg / power_N, \
                   (zamp_avg / zamp_N) * gresid_fac, \
                   zphase_avg / zamp_N, zfb_avg / zfb_N ]
        return outarr
    else:
        scaled_params = np.array(all_param)
        scaled_params[:, 0] *= 1e13

        outdic = {'eforce': all_eforce, 'power': all_power, \
                  'linear_fit_params': scaled_params, \
                  'ext_masses': mass_vec}

        return outdic
test_filename = os.path.join(out_path, 'test.p')
bu.make_all_pardirs(test_filename)

raw_filenames, _ = bu.find_all_fnames(raw_path,
                                      ext='.p',
                                      skip_subdirectories=True)

### Loop over all the simulation outut files and extract the
### simulation parameters used in that file (rbead, sep, height, etc)
seps = []
heights = []
posvec = []
nfiles = len(raw_filenames)
for fil_ind, fil in enumerate(raw_filenames):
    ### Display percent completion
    bu.progress_bar(fil_ind, nfiles, suffix='finding seps/heights')

    sim_out = pickle.load(open(fil, 'rb'))
    keys = list(sim_out.keys())

    ### Avoids the dictionary key that's a string
    for key in keys:
        if type(key) == str:
            continue
        else:
            rbead_key = key

    if not rbead_cond(rbead_key):
        continue
    else:
        rbead = rbead_key
    all_freq = []
    all_freq_err = []

    all_phase = []
    all_phase_err = []

    all_time = []

    nfiles = len(files)
    suffix = '%i / %i' % (pathind + 1, npaths)

    dfdt = 0
    spindown = False
    first = False
    for fileind, file in enumerate(files):
        bu.progress_bar(fileind, nfiles, suffix=suffix)

        fobj = hsDat(file)
        t = fobj.attribs["time"]

        vperp = fobj.dat[:, 0]

        if not spindown:
            vperp_filt = signal.filtfilt(b1, a1, vperp)

            vperp_filt_fft = np.fft.rfft(vperp_filt)
            vperp_filt_asd = np.abs(vperp_filt_fft)

            # if plot_raw_dat:
            #     plt.loglog(freqs, vperp_filt_asd)
            #     plt.loglog(freqs, np.abs(np.fft.rfft(vperp)))
Exemplo n.º 30
0
    def AnalyzeData(self, br_temps = [], single_lambda = True, fit_beta = False, \
            lambda_value = 25E-6, fake_lambda = 25E-6, fake_alpha = 0, noise_data = False,
            same_noise = False, n_fake = 0, same_noise_level = 1E-12):
        """Analyzes the data with a fake signal injected. If white noise is True, replaces measured
           signal with white noise drawn from a distribution consistent with the errors"""

        if not self.grav_loaded:
            print("Must load theory data first...")
            return

        Nobj = len(self.file_data_objs)
        dft = pd.DataFrame()
        lambind_inj = np.argmin((fake_lambda - self.lambdas)**2)

        lambda_inds = np.arange(len(self.lambdas))
        if single_lambda:
            lind = np.argmin((lambda_value - self.lambdas)**2)
            lambda_inds = [lambda_inds[lind]]
            n_lam = 1
        else:
            n_lam = len(self.lambdas)

        #deal with MC from data vs totally fake case
        if n_fake:
            objinds = list(range(n_fake))
        else:
            objinds = list(range(len(self.file_data_objs)))

        #first loop over files
        for objind in objinds:

            if n_fake:
                file_data_obj = self.file_data_objs[0]
            else:
                file_data_obj = self.file_data_objs[objind]
            #get file specific information
            bu.progress_bar(objind, Nobj, suffix='Fitting Alpha vs. Time')
            t = file_data_obj.time
            phi = file_data_obj.phi_cm

            ## Get sep and height from axis positi
            full_pts = file_data_obj.generate_pts(self.p0_bead)
            yukfft_inj = yukfft_template(self.yukfuncs, file_data_obj.ginds,
                                         lambind_inj, full_pts)

            #now loop over lambdas
            for i, lambind in enumerate(lambda_inds):
                if same_noise:
                    file_data_obj.daterrs = np.ones_like(
                        file_data_obj.daterrs) * same_noise_level
                yukfft = yukfft_template(self.yukfuncs, file_data_obj.ginds,
                                         lambind, full_pts)
                ##Beware!!!! generating new noise realization for each lambda which is not
                #realistic. Need to fix
                if fit_beta:
                    temps = np.array([np.conj(yukfft)] + br_temps)
                else:
                    temps = np.array([np.conj(yukfft)] + br_temps)

                dfl = file_data_obj.fit_alpha_xyz(temps, \
                        inject = fake_alpha*yukfft_inj, fake_signal = noise_data)
                #figure out which information to keep track of in data frame
                dfl["lambda"] = self.lambdas[lambind]
                dfl["ax1pos"] = file_data_obj.ax1pos
                dfl["ax2pos"] = file_data_obj.ax2pos
                index = [[objind], [lambind]]
                dfl.index = index
                dft = dft.append(dfl)

        return dft