def save(self, suffix='', path='', subdir=''):
        '''Saves the background object with standard name and
           path that can be changed if one so wishes

           INPUTS: suffix, modifier for standard file name

           OUTPUTS: none, plots stuff
        '''

        name = self.bead + '_' + self.parent_dir
        if len(suffix):
            name += '_' + suffix + '.p'
        else:
            name += '.p'

        if not len(path):
            path = '/backgrounds/background_classes/' + self.bead + '/'

        if len(subdir):
            if subdir[0] == '/':
                subdir = subdir[1:]

        path += subdir

        if path[-1] != '/':
            path += '/'

        bu.make_all_pardirs(path + name)

        pickle.dump(self, open(path + name, 'wb'))
def mc(ind):

    xi_0 = xi_init

    seed = seed_init * (ind + 1)
    np.random.seed(seed)

    base = '/data/old_trap_processed/spinsim_data/'
    base = os.path.join(base, savedir)
    base_filename = os.path.join(base, 'mc_{:d}/'.format(ind))

    bu.make_all_pardirs(os.path.join(base_filename, 'derp.txt'))

    for i in range(nfiles):
        #bu.progress_bar(i, nfiles)

        t0 = i * out_file_length
        tf = (i + 1) * out_file_length

        tvec, soln = stepper(xi_0, t0, tf, dt_sim, upsamp, rhs, rk4)

        xi_0 = soln[:, -1]

        tvec = tvec[:-1]
        soln = soln[:, :-1]
        out_arr = np.concatenate((tvec.reshape((1, len(tvec))), soln))

        filename = os.path.join(base_filename, 'outdat_{:d}.npy'.format(i))
        np.save(open(filename, 'wb'), out_arr)

    return seed
    lab_str = stuff[i][2]
    #color = stuff[i][3]
    color = colors[2*i+1]

    if np.max(dat[0]) > maxp:
        maxp = np.max(dat[0])

    fitp = np.linspace(0, np.max(dat[0]), 100)
    fit = np.array(phi_ffun(fitp, pmax, 0))
    ax.scatter(dat[0], dat[1] / np.pi, edgecolors=color, facecolors='none', alpha=0.5)
    ax.plot(fitp, fit / np.pi, '-', color=color, lw=3, label=lab_str)

ax.set_xlim(-0.05*maxp, 1.05*maxp)
#ax.set_xlabel('Pressure, $p$ [mbar]')
ax.set_xlabel('$p$ [mbar]')
ax.set_ylabel('$\phi_{\mathrm{eq}}$ [$\pi$ rad]')
ax.legend(fontsize=10)
plt.tight_layout()
# fig.suptitle(title_str, fontsize=16)
# fig.subplots_adjust(top=0.91)

fig_path = base_plot_path + '.png'
fig_path2 = base_plot_path + '.pdf'
fig_path3 = base_plot_path + '.svg'
bu.make_all_pardirs(fig_path)
fig.savefig(fig_path)
fig.savefig(fig_path2)
fig.savefig(fig_path3)
plt.show()

예제 #4
0
ext = config.extensions['trans_fun']

# Generate automatic paths for saving
savepath = '/data/old_trap_processed/calibrations/transfer_funcs/' + tf_date + ext

if save_charge:
    prefix = '/data/old_trap_processed/calibrations/charges/'
    if recharge:
        charge_path = prefix + step_date + '_recharge.charge'
    else:
        charge_path = prefix + step_date + '.charge'

    if new_trap:
        charge_path = charge_path.replace('old_trap', 'new_trap')
    bu.make_all_pardirs(charge_path)

if new_trap:
    savepath = savepath.replace('old_trap', 'new_trap')
bu.make_all_pardirs(savepath)

use_origin_timestamp = False
# if new_trap:
#     use_origin_timestamp = True

# Find all the relevant files
step_cal_files, lengths = bu.find_all_fnames(step_cal_dir, sort_by_index=sort_by_index, \
                                             sort_time=sort_time, \
                                             use_origin_timestamp=use_origin_timestamp, \
                                             skip_subdirectories=skip_subdirectories)
# for name in step_cal_files:
예제 #5
0
        save_base = '/data/old_trap_processed/spinning/pramp_data/{:s}/{:s}/'.format(
            date, gas)
        save_file_before = save_base + '{:s}_pramp_{:d}_rga_before.p'.format(
            gas, pramp_index + 0)
        save_file_flush = save_base + '{:s}_pramp_{:d}_rga_flush.p'.format(
            gas, pramp_index + 0)

        fig_filename_base = '/home/cblakemore/plots/{:s}/pramp/{:s}/flush{:d}_'.format(
            date, gas, pramp_index + 0)

        measurements.append([gas, rga_data_file1, rga_data_file2, \
                             save_file_before, save_file_flush, \
                             save_base, fig_filename_base, pramp_index])

        bu.make_all_pardirs(save_file_before)
        bu.make_all_pardirs(save_base)
        bu.make_all_pardirs(fig_filename_base)

#rga_data_file1 = base + 'He_20190607_measurement_2/meas2_He-leak_2_000001.txt'
#rga_data_file2 = base + 'He_20190607_measurement_2/meas2_He-leak_3_000001.txt'

# base1 = '/daq2/20190514/bead1/spinning/pramp3/He/rga_scans/'
# rga_data_file1 = base1 + 'He_20190607_measurement_1/meas1_pre-He-leak_2_000001.txt'

# base2 = '/daq2/20190625/rga_scans/'
# rga_data_file2 = base2 + 'reseat_with-grease_000002.txt'

plot_scan = False

plot_many_scans = False

for ddir in data_dirs:
    # Skip the ones I've already calculated
    #if ddir == data_dirs[0]:
    #    continue
    print()

    paths = gu.build_paths(ddir, opt_ext=opt_ext, new_trap=new_trap)
    p0_bead = p0_bead_dict[paths['date']]

    agg_dat = gu.AggregateData([], p0_bead=p0_bead, harms=harms)
    if load_agg:
        agg_dat.load(paths['agg_path'])
        agg_dat.gfuncs_class.reload_grav_funcs()
        bu.make_all_pardirs(paths['alpha_dict_path'])
        agg_dat.save_alpha_dict(paths['alpha_dict_path'])
        #agg_dat.save_alpha_arr(alpha_arr_path)
    elif load_alpha_arr:
        agg_dat.load_alpha_dict(paths['alpha_dict_path'])

    if load_agg and plot:
        agg_dat.plot_force_plane(resp=0, fig_ind=1, show=False)
        agg_dat.plot_force_plane(resp=1, fig_ind=2, show=False)
        agg_dat.plot_force_plane(resp=2, fig_ind=3, show=True)

    #print(agg_dat.alpha_xyz_dict[0.0][370.0].keys())#.keys())
    #input()

    keys0 = list(agg_dat.alpha_xyz_dict.keys())
    keys0.sort()
def run_mc(params):

    ind = params[0]
    pressure = params[1]
    drive_freq = params[2]
    drive_voltage = params[3]
    drive_voltage_noise = params[4]
    drive_phase_noise = params[5]
    init_angle = params[6]

    beta_rot = pressure * np.sqrt(m0) / kappa
    drive_amp = np.abs(bu.trap_efield([0, 0, 0, drive_voltage, -1.0*drive_voltage, \
                                        0, 0, 0], nsamp=1)[0])
    drive_amp_noise = drive_voltage_noise * (drive_amp / drive_voltage)

    lib_freq = np.sqrt(drive_amp * p0 * dipole_units / Ibead) / (2.0 * np.pi)

    xi_0 = np.array([p0*np.cos(init_angle), p0*np.sin(init_angle), 0.0, \
                        0.0, 0.0, 2.0 * np.pi * drive_freq])

    seed = seed_init * (ind + 1)

    np.random.seed(seed)

    values_to_save = {}
    values_to_save['mbead'] = mbead
    values_to_save['Ibead'] = Ibead
    values_to_save['p0'] = p0
    values_to_save['fsamp'] = fsamp
    values_to_save['seed'] = seed
    values_to_save['xi_0'] = xi_0
    values_to_save['pressure'] = pressure
    values_to_save['drive_freq'] = drive_freq
    values_to_save['drive_amp'] = drive_amp
    values_to_save['drive_amp_noise'] = drive_amp_noise
    values_to_save['drive_phase_noise'] = drive_phase_noise

    base_filename = os.path.join(base, 'mc_{:d}/'.format(ind))

    bu.make_all_pardirs(os.path.join(base_filename, 'derp.txt'))

    param_path = os.path.join(base_filename, 'params.p')
    pickle.dump(values_to_save, open(param_path, 'wb'))

    @jit()
    def rhs(t, xi):
        '''This function represents the right-hand side of the differential equation
           d(xi)/dt = rhs(t, xi), where xi is a 6-dimensional vector representing the 
           system of a rotating microsphere: {px, py, pz, omegax, omegay, omegaz}, 
           with p the dipole moment and omega the angular velocity. The system is 
           solved in Cartesian coordinates to avoid the branch cuts inherent to 
           integrating phase angles.

           The function computes the following torques:
                thermal torque, white noise with power computed from above global
                                    parameters and fluctuation dissipation theorem 
                drag torque, computed as (- beta * omega)
                drive torque, computed as (-1.0) * {px, py, pz} (cross) {Ex, Ey, Ez}
                optical torque, constant torque about the z axis
        '''
        drag_torque = -1.0 * beta_rot * xi[3:]

        #### Construct the rotating Efield drive
        Efield = np.array([drive_amp * np.cos(2.0 * np.pi * drive_freq * t), \
                           drive_amp * np.sin(2.0 * np.pi * drive_freq * t), \
                           0.0])

        drive_torque = np.cross(xi[:3] * dipole_units, Efield)
        optical_torque = np.array([0.0, 0.0, N_opt])

        total_torque = drive_torque + drag_torque + optical_torque

        return np.concatenate(
            (-1.0 * np.cross(xi[:3], xi[3:]), total_torque / Ibead))

    @jit()
    def rhs_stochastic(t, xi):
        '''Basically the same as above rhs() function, but this only includes the 
           stochastic forcing terms. Doesn't update the dipole moment projections,
           just adds more (Delta omega)

           The function computes the following torques:
                thermal torque, white noise with power computed from above global
                                    parameters and fluctuation dissipation theorem 
                drive torque, computed as (-1.0) * {px, py, pz} (cross) {Ex, Ey, Ez}
                                where the Efield only includes noise terms
        '''
        thermal_torque = np.sqrt(
            4.0 * kb * T * beta_rot * fsim) * np.random.randn(3)

        ### Amplitude noise for all three axes
        an = drive_amp_noise * np.random.randn(3)

        ### Phase noise for the two drive axes
        pn = drive_phase_noise * np.random.randn(2)

        #### Construct the rotating Efield drive
        Efield1 = np.array([drive_amp * np.cos(2.0 * np.pi * drive_freq * t), \
                            drive_amp * np.sin(2.0 * np.pi * drive_freq * t), \
                            0.0])
        Efield2 = np.array([drive_amp * np.cos(2.0 * np.pi * drive_freq * t + pn[0]), \
                            drive_amp * np.sin(2.0 * np.pi * drive_freq * t + pn[1]), \
                            0.0])
        Efield = Efield2 - Efield1 + an

        drive_torque = np.cross(xi[:3] * dipole_units, Efield)

        total_torque = drive_torque + thermal_torque

        return np.concatenate((np.zeros(3), total_torque / Ibead))

    for i in range(nfiles):
        #bu.progress_bar(i, nfiles)

        t0 = i * out_file_length
        tf = (i + 1) * out_file_length

        tvec, soln = stepper(xi_0, t0, tf, dt_sim, upsamp, rk4, rhs, \
                             system_stochastic=rhs_stochastic)

        xi_0 = soln[:, -1]

        tvec = tvec[:-1]
        soln = soln[:, :-1]
        out_arr = np.concatenate((tvec.reshape((1, len(tvec))), soln))

        filename = os.path.join(base_filename, 'outdat_{:d}.h5'.format(i))
        fobj = h5py.File(filename, 'w')
        # group = fobj.create_group('sim_data')
        fobj.create_dataset('sim_data', data=out_arr, compression='gzip', \
                            compression_opts=9)
        fobj.close()

        # filename = os.path.join(base_filename, 'outdat_{:d}.npy'.format(i))
        # np.save(open(filename, 'wb'), out_arr)

    return seed
    else:
        print('Loading aggregate data from:')
        print('     {:s}'.format(agg_load_path))

    print('----------------------------------')
    print('Will save to:')
    print('     {:s}'.format(agg_path))

    print('----------------------------------')
    print('Will save plots to:')
    print('     {:s}'.format(plot_dir))
    print('----------------------------------')
    print()

    if save:
        bu.make_all_pardirs(agg_path)


    if reprocess:

        datafiles, lengths = bu.find_all_fnames(ddir, ext=config.extensions['data'], \
                                                substr=substr, sort_by_index=True, \
                                                sort_time=False)
        datafiles = datafiles[:Nfiles]

        agg_dat = gu.AggregateData(datafiles, p0_bead=p0_bead, harms=harms, \
                                   plot_harm_extraction=plot_harms, new_trap=new_trap, \
                                   step_cal_drive_freq=71.0, ncore=ncore, noisebins=10, \
                                   aux_data=aux_data, suppress_off_diag=suppress_off_diag, \
                                   fake_attractor_data=fake_attractor_data, \
                                   fake_attractor_data_amp=fake_attractor_data_amp, \
yticks = [-np.pi / 2.0, 0.0, np.pi / 2.0]
yticklabels = ['$-\\pi/2$', '0', '$\\pi/2$']

fit_ringdown = True
initial_offset = 0.0

plot_rebin = False
plot_ringdown_fit = True
close_xlim = True
show = False

########################################################################
########################################################################
########################################################################

bu.make_all_pardirs(ringdown_data_path)


def gauss(x, A, mu, sigma, c):
    return A * np.exp(-1.0 * (x - mu)**2 / (2.0 * sigma**2)) + c


def ngauss(x, A, mu, sigma, c, n):
    return A * np.exp(-1.0 * np.abs(x - mu)**n / (2.0 * sigma**n)) + c


if plot_carrier_demod or plot_libration_demod or plot_downsample:
    ncore = 1

files, _ = bu.find_all_fnames(dir_name, ext='.h5', sort_time=True)
files = files[file_inds[0]:file_inds[1]:file_step]
    pickle.dump(spectra_dict, open(spectra_save_path, 'wb'))

if make_image_sequence:

    if plot_features:
        phase_feature_lists = pickle.load(open(phase_feature_savepath, 'rb'))

    if plot_drive_features:
        drive_feature_lists = pickle.load(open(drive_feature_savepath, 'rb'))

    for i, fft in enumerate(phase_results):

        figname = os.path.join(sideband_basepath, 'image_{:04d}.png'.format(i))
        if i == 0:
            bu.make_all_pardirs(figname)

        fig, axarr = plt.subplots(2, 1, figsize=figsize)

        axarr[0].loglog(full_freqs[out_inds], np.abs(fft), \
                  label='{:d} s'.format(int(times[i])))
        axarr[1].loglog(full_freqs[out_inds], np.abs(fft), \
                  label='{:d} s'.format(int(times[i])))

        # axarr[1].set_title('Zoom on Libration Feature')

        axarr[0].set_xlabel('Frequency [Hz]')
        axarr[1].set_xlabel('Frequency [Hz]')
        axarr[0].set_ylabel('Sideband ASD\n[rad / $\\sqrt{\\rm Hz}$]')
        axarr[1].set_ylabel('Sideband ASD\n[rad / $\\sqrt{\\rm Hz}$]')
import bead_util as bu
import calib_util as cal
import transfer_func_util as tf
import configuration as config

### Specify path with data, or folder with many picomotor subfolders

dir1 = '/data/20180220/bead1/gravity_data/grav_data_noshield/'
#dir1 = '/data/20180215/bead1/grav_data_withshield/'

parts = dir1.split('/')
save_path1 = '/force_v_pos/%s/%s/%s_force_v_pos_dic.p' % (parts[2], parts[3],
                                                          parts[-2])
save_path2 = '/force_v_pos/%s/%s/%s_diagforce_v_pos_dic.p' % (
    parts[2], parts[3], parts[-2])
bu.make_all_pardirs(save_path1)
bu.make_all_pardirs(save_path2)

save = False  #True
load = True
load_path = '/force_v_pos/%s/%s/%s_force_v_pos_dic.p' % (parts[2], parts[3],
                                                         parts[-2])

# If True, this will trigger the script to use the following inputs
# and process many directories simultaneously
picomotors = False
picodir = '/data/20171106/bead1/grav_data_10picopos/'
date = picodir.split('/')[2]
numdirs = bu.count_dirs(picodir)
parent_savepath = '/force_v_pos/' + date + '_' + str(numdirs) + 'picopos_2/'
if not os.path.exists(parent_savepath):
results = Parallel(n_jobs=ncore)(delayed(proc_file)(file) for file in tqdm(files))


phase_peaks_all = []
drive_peaks_all = []
for phase_peaks, drive_peaks in results:
    phase_peaks_all.append(phase_peaks)
    drive_peaks_all.append(drive_peaks)




phase_feature_lists = \
    bu.track_spectral_feature(phase_peaks_all, first_fft=first_fft, \
                              init_features=init_features, \
                              allowed_jumps=allowed_jumps)

bu.make_all_pardirs(phase_feature_savepath)
pickle.dump(phase_feature_lists, open(phase_feature_savepath, 'wb'))

if track_drive_features:
    drive_feature_lists = \
        bu.track_spectral_feature(drive_peaks_all, first_fft=first_drive_fft, \
                                  init_features=[fspin, 2.0*fspin], \
                                  allowed_jumps=0.01)

    pickle.dump(drive_feature_lists, open(drive_feature_savepath, 'wb'))



예제 #13
0
dx = np.abs(xx[1] - xx[0])
dy = np.abs(yy[1] - yy[0])
dz = np.abs(zz[1] - zz[0])

### Assuming rectangular volume elements, convert the density grid
### to a grid of point masses
cell_volume = dx * dy * dz
m = rho * cell_volume
m2 = rho2 * cell_volume
m3 = rho3 * cell_volume

### Establish a path to save the data, and create the directory if it
### isn't already there
results_path = os.path.abspath('../raw_results/')
test_filename = os.path.join(results_path, 'test.p')
bu.make_all_pardirs(test_filename)

### Assuming n_goldfinger is an odd integer, this just sets up some indices
### of the fingers for use in the periodic sampling part
finger_inds = np.linspace(-1.0 * int( 0.5*n_goldfinger ), \
                           1.0 * int( 0.5*n_goldfinger ), \
                           n_goldfinger)


### Function to determine which finger you're in front of, and then
### compute the equivalent coordinate assuming you're in front of the
### central finger. Part of the perdicity
def find_ind(ypos):
    if np.abs(ypos) <= 0.5 * full_period:
        ind = 0
    else:
for pathind, path in enumerate(paths):

    parts = path.split('/')
    name = parts[-1][:-4]

    if name[-4:] == 'DATA':
        name = name[:-5]

    xx_path = '/processed_data/comsol_data/patch_potentials/' + name + '.xx'
    yy_path = '/processed_data/comsol_data/patch_potentials/' + name + '.yy'
    zz_path = '/processed_data/comsol_data/patch_potentials/' + name + '.zz'
    field_path = '/processed_data/comsol_data/patch_potentials/' + name + '.field'
    pot_path = '/processed_data/comsol_data/patch_potentials/' + name + '.potential'

    bu.make_all_pardirs(pot_path)

    # Load a regularly-gridded dataset
    fil = open(path, 'r')
    lines = fil.readlines()
    fil.close()


    ### CONVERT STUPID COMSOL OUTPUT TO SENSIBLE FORM
    # Load grid points
    linenum = 0
    for line in lines:
        linenum += 1

        if line[0] == '%':
            continue
예제 #15
0
#gases = ['He', 'N2']
gases = ['He', 'N2']
inds = [1, 2, 3]

# date = '20190905'
# gases = ['He', 'N2']
# inds = [1, 2, 3]



# base_path = '/processed_data/spinning/wobble/20190626/'
# base_path = '/processed_data/spinning/wobble/20190626/long_wobble/'
base_path = '/data/old_trap_processed/spinning/wobble/{:s}/'.format(date)

base_plot_path = '/home/cblakemore/plots/{:s}/pramp/'.format(date)
bu.make_all_pardirs(base_plot_path)
savefig = False

baselen = len(base_path)

# gas = 'N2'
# paths = [base_path + '%s_pramp_1/' % gas, \
#          base_path + '%s_pramp_2/' % gas, \
#          base_path + '%s_pramp_3/' % gas, \
#         ]

path_dict = {}
for meas in itertools.product(gases, inds):
    gas, pramp_ind = meas
    if gas not in list(path_dict.keys()):
        path_dict[gas] = []
예제 #16
0
paths, save_paths = (list(t) for t in zip(*sorted(zip(paths, save_paths))))

path_dict['XX'] = {}
path_dict['XX'][1] = (paths, save_paths)
gases = ['XX']
inds = [1]

save = True
load = False

#####################################################

if plot_raw_dat or plot_demod or plot_phase or plot_sideband_fit:
    ncore = 1

bu.make_all_pardirs(save_paths[0])

Ibead = bu.get_Ibead(date=date)
# Ibead = bu.get_Ibead(date=date, rhobead={'val': 1850.0, 'sterr': 0.0, 'syserr': 0.0})


def sqrt(x, A, x0, b):
    return A * np.sqrt(x - x0)  #+ b


def gauss(x, A, mu, sigma, c):
    return A * np.exp(-1.0 * (x - mu)**2 / (2.0 * sigma**2)) + c


def lorentzian(x, A, mu, gamma, c):
    return (A / np.pi) * (gamma**2 / ((x - mu)**2 + gamma**2)) + c
예제 #17
0
def run_mc(params):

    ind = params[0]
    pressure = params[1]
    drive_freq = params[2]
    drive_voltage = params[3]
    drive_voltage_noise = params[4]
    drive_phase_noise = params[5]
    init_angle = params[6]
    discretized_phase = params[7]

    beta_rot = pressure * np.sqrt(m0) / kappa
    drive_amp = np.abs(bu.trap_efield([0, 0, 0, drive_voltage, -1.0*drive_voltage, \
                                       0, 0, 0], nsamp=1)[0])
    drive_amp_noise = drive_voltage_noise * (drive_amp / drive_voltage)

    seed = seed_init * (ind + 1)

    xi_0 = np.array([np.pi/2.0, 0.0, 0.0, \
                     0.0, 2.0*np.pi*drive_freq, 0.0])

    time_constant = Ibead / beta_rot

    np.random.seed(seed)

    ### If desired, set a thermalization time equal to 10x the time constant
    ### for this particular pressure and Ibead combination
    if variable_thermalization:
        t_therm = np.min([10.0 * time_constant, 300.0])
        nthermfiles = int(t_therm / out_file_length) + 1
    else:
        t_therm = user_t_therm
        nthermfiles = user_nthermfiles

    values_to_save = {}
    values_to_save['mbead'] = mbead
    values_to_save['Ibead'] = Ibead
    values_to_save['kappa'] = kappa
    values_to_save['beta_rot'] = beta_rot
    values_to_save['p0'] = p0
    values_to_save['fsamp'] = fsamp
    values_to_save['fsim'] = fsim
    values_to_save['seed'] = seed
    values_to_save['xi_0'] = xi_0
    values_to_save['init_angle'] = init_angle
    values_to_save['pressure'] = pressure
    values_to_save['m0'] = m0
    values_to_save['drive_freq'] = drive_freq
    values_to_save['drive_amp'] = drive_amp
    values_to_save['drive_amp_noise'] = drive_amp_noise
    values_to_save['drive_phase_noise'] = drive_phase_noise
    values_to_save['discretized_phase'] = discretized_phase
    values_to_save['t_therm'] = t_therm

    if not TEST:
        base_filename = os.path.join(base, 'mc_{:d}/'.format(ind))

        bu.make_all_pardirs(os.path.join(base_filename, 'derp.txt'))

        param_path = os.path.join(base_filename, 'params.p')
        pickle.dump(values_to_save, open(param_path, 'wb'))


    def E_phi_func(t, t_therm=0.0, init_angle=0.0):
        raw_val = 2.0 * np.pi * drive_freq * (t + t_therm) + init_angle
        if discretized_phase:
            n_disc = int(raw_val / discretized_phase)
            return n_disc * discretized_phase
        else:
            return raw_val

    ### Matrix for the stochastic driving processes
    torque_noise = np.sqrt(4.0 * kb * T * beta_rot)
    # B = np.array([[0, 0,   0,   0],
    #               [0, 0,   0,   0],
    #               [0, 0, 1.0,   0],
    #               [0, 0,   0, 1.0]])
    B = np.array([[0, 0, 0,   0,   0,   0],
                  [0, 0, 0,   0,   0,   0],
                  [0, 0, 0,   0,   0,   0],
                  [0, 0, 0, 1.0,   0,   0],
                  [0, 0, 0,   0, 1.0,   0],
                  [0, 0, 0,   0,   0, 1.0]])
    B *= torque_noise / Ibead

    ### Define the system such that d(xi) = f(xi, t) * dt
    # @jit()
    def f(x, t):
        torque_theta = drive_amp * p0 * np.sin(0.5 * np.pi - x[0]) \
                            - 1.0 * beta_rot * x[3]

        c_amp = drive_amp
        E_phi = E_phi_func(t)
        if fterm_noise:
            c_amp += drive_amp_noise * np.random.randn()
            E_phi += drive_phase_noise * np.random.randn()

        torque_phi = c_amp * p0 * np.sin(E_phi - x[1]) * np.sin(x[0]) \
                            - 1.0 * beta_rot * x[4]

        torque_psi = -1.0 * beta_rot * x[5]

        return np.array([x[3], x[4], x[5], \
                         torque_theta / Ibead, \
                         torque_phi / Ibead, \
                         torque_psi / Ibead])

    ### Define the stochastic portion of the system
    # @jit()
    def G(x, t):
        newB = np.zeros((6,6))

        if gterm_noise:
            E_phi = E_phi_func(t)
            amp_noise_term = drive_amp_noise * p0 * np.sin(E_phi - x[1]) * np.sin(x[0])

            E_phi_rand = drive_phase_noise * np.random.randn()
            phase_noise_term = drive_amp * p0  * np.sin(E_phi_rand) * np.sin(x[0])
            newB[4,4] += amp_noise_term + phase_noise_term

        return B + newB


    ### Thermalize
    xi_init = np.copy(xi_0)
    for i in range(nthermfiles):
        t0 = i*out_file_length
        tf = (i+1)*out_file_length

        nsim = int(out_file_length * fsim)
        tvec = np.linspace(t0, tf, nsim+1)

        result = sdeint.itoint(f, G, xi_init, tvec).T
        xi_init = np.copy(result[:,-1])


    ### Redefine the system taking into account the thermalization time
    ### and the desired phase offset
    # @jit()
    def f(x, t):
        torque_theta = drive_amp * p0 * np.sin(0.5 * np.pi - x[0]) \
                            - 1.0 * beta_rot * x[3]

        c_amp = drive_amp
        E_phi = E_phi_func(t, t_therm=t_therm, init_angle=init_angle)
        if fterm_noise:
            c_amp += drive_amp_noise * np.random.randn()
            E_phi += drive_phase_noise * np.random.randn()

        torque_phi = c_amp * p0 * np.sin(E_phi - x[1]) * np.sin(x[0]) \
                            - 1.0 * beta_rot * x[4]

        torque_psi = -1.0 * beta_rot * x[5]

        return np.array([x[3], x[4], x[5], \
                         torque_theta / Ibead, \
                         torque_phi / Ibead, \
                         torque_psi / Ibead])


    # @jit()
    # def f(x, t):
    #     torque_theta = - 1.0 * beta_rot * x[2]
    #     torque_phi = - 1.0 * beta_rot * x[3]

    #     return np.array([x[2], x[3], torque_theta / Ibead, torque_phi / Ibead])

    ### Define the stochastic portion of the system
    def G(x, t):
        newB = np.zeros((6,6))
        if gterm_noise:
            E_phi = E_phi_func(t, t_therm=t_therm, init_angle=init_angle)
            amp_noise_term = drive_amp_noise * p0 * np.sin(E_phi - x[1]) * np.sin(x[0])

            E_phi_rand = drive_phase_noise * np.random.randn()
            phase_noise_term = drive_amp * p0  * np.sin(E_phi_rand) * np.sin(x[0])

        newB[4,4] += amp_noise_term + phase_noise_term

        return B + newB




    ### Run the simulation with the thermalized solution
    for i in range(nfiles):
        # start = time.time()
        t0 = i*out_file_length
        tf = (i+1)*out_file_length

        nsim = int(out_file_length * fsim)
        tvec = np.linspace(t0, tf, nsim+1)

        ### Solve!
        # print('RUNNING SIM')
        result = sdeint.itoint(f, G, xi_init, tvec).T
        xi_init = np.copy(result[:,-1])

        tvec = tvec[:-1]
        soln = result[:,:-1]

        # print('DOWNSAMPLING')
        nsamp = int(out_file_length * fsamp)
        # soln_ds, tvec_ds = signal.resample(soln, t=tvec, \
        #                                    num=nsamp, axis=-1)
        # soln_ds = signal.decimate(soln, int(upsamp))

        tvec_ds = tvec[::int(upsamp)]
        soln_ds = soln[:,::int(upsamp)]

        # plt.plot(tvec, soln[1])
        # plt.plot(tvec_ds, soln_ds[1])
        # plt.plot(tvec_ds, soln_ds_2[1])

        # plt.show()

        if not TEST:
            out_arr = np.concatenate( (tvec_ds.reshape((1, len(tvec_ds))), soln_ds) )

            filename = os.path.join(base_filename, 'outdat_{:d}.h5'.format(i)) 

            fobj = h5py.File(filename, 'w')
            fobj.create_dataset('sim_data', data=out_arr, compression='gzip', \
                                compression_opts=9)
            fobj.close()

        # stop = time.time()
        # print('Time for one file: {:0.1f}'.format(stop-start))

    return seed
# raw_input()

outdict = {}
for pathind, path in enumerate(paths):

    fc = 2.0 * f_rot
    wc = 2.0 * np.pi * fc

    strs = path.split('/')
    if len(strs[-1]) == 0:
        dirname = strs[-2]
    else:
        dirname = strs[-1]

    out_f = save_base + dirname
    bu.make_all_pardirs(out_f)

    if load:
        outdict[out_f] = pickle.load(open(out_f + '_all.p', 'rb'))
        all_time = outdict[out_f]['all_time']
        all_freq = outdict[out_f]['all_freq']
        all_freq_err = outdict[out_f]['all_freq_err']
        plt.errorbar(all_time.flatten(),
                     all_freq.flatten(),
                     yerr=all_freq_err.flatten())
        plt.show()
        continue

    files, lengths = bu.find_all_fnames(path, sort_time=True)

    files = files[:1000]
예제 #19
0
def weigh_bead_efield(files, elec_ind, pow_ind, colormap='plasma', sort='time',\
                      file_inds=(0,10000), plot=True, print_res=False, pos=False, \
                      save_mass=False, new_trap=False, correct_phase_shift=False):
    '''Loops over a list of file names, loads each file, diagonalizes,
       then plots the amplitude spectral density of any number of data
       or cantilever/electrode drive signals

       INPUTS: files, list of files names to extract data
               data_axes, list of pos_data axes to plot
               cant_axes, list of cant_data axes to plot
               elec_axes, list of electrode_data axes to plot
               diag, boolean specifying whether to diagonalize

       OUTPUTS: none, plots stuff
    '''
    date = re.search(r"\d{8,}", files[0])[0]
    suffix = files[0].split('/')[-2]

    if new_trap:
        trap_str = 'new_trap'
    else:
        trap_str = 'old_trap'

    charge_file = '/data/{:s}_processed/calibrations/charges/'.format(
        trap_str) + date
    save_filename = '/data/{:s}_processed/calibrations/masses/'.format(trap_str) \
                            + date + '_' + suffix + '.mass'
    bu.make_all_pardirs(save_filename)

    if pos:
        charge_file += '_recharge.charge'
    else:
        charge_file += '.charge'

    try:
        nq = np.load(charge_file)[0]
        found_charge = True
    except:
        found_charge = False

    if not found_charge or manual_charge:
        user_nq = input('No charge file or manual requested. Guess q: ')
        nq = int(user_nq)

    if correct_phase_shift:
        print('Correcting anomalous phase-shift during analysis.')

    # nq = -16
    print('qbead: {:d} e'.format(int(nq)))
    q_bead = nq * constants.elementary_charge

    run_index = 0

    masses = []

    nfiles = len(files)
    if not print_res:
        print("Processing %i files..." % nfiles)

    all_eforce = []
    all_power = []

    all_param = []

    mass_vec = []

    p_ac = []
    p_dc = []

    e_ac = []
    e_dc = []

    pressure_vec = []

    zamp_avg = 0
    zphase_avg = 0
    zamp_N = 0
    zfb_avg = 0
    zfb_N = 0
    power_avg = 0
    power_N = 0

    Nbad = 0

    powpsd = []

    for fil_ind, fil in enumerate(files):  # 15-65

        # 4
        # if fil_ind == 16 or fil_ind == 4:
        #     continue

        bu.progress_bar(fil_ind, nfiles)

        # Load data
        df = bu.DataFile()
        try:
            if new_trap:
                df.load_new(fil)
            else:
                df.load(fil, load_other=True)
        except Exception:
            traceback.print_exc()
            continue

        try:
            # df.calibrate_stage_position()
            df.calibrate_phase()
        except Exception:
            traceback.print_exc()
            continue

        if ('20181129' in fil) and ('high' in fil):
            pressure_vec.append(1.5)
        else:
            try:
                pressure_vec.append(df.pressures['pirani'])
            except Exception:
                pressure_vec.append(0.0)

        ### Extract electrode data
        if new_trap:
            top_elec = df.electrode_data[1]
            bot_elec = df.electrode_data[2]
        else:
            top_elec = mon_fac * df.other_data[elec_ind]
            bot_elec = mon_fac * df.other_data[elec_ind + 1]

        fac = 1.0
        if np.std(top_elec) < 0.5 * np.std(bot_elec) \
                or np.std(bot_elec) < 0.5 * np.std(top_elec):
            print(
                'Adjusting electric field since only one electrode was digitized.'
            )
            fac = 2.0

        nsamp = len(top_elec)
        zeros = np.zeros(nsamp)

        voltages = [zeros, top_elec, bot_elec, zeros, \
                    zeros, zeros, zeros, zeros]
        efield = bu.trap_efield(voltages, new_trap=new_trap)
        eforce2 = fac * sign * efield[2] * q_bead

        tarr = np.arange(0, df.nsamp / df.fsamp, 1.0 / df.fsamp)

        # fig, axarr = plt.subplots(2,1,sharex=True,figsize=(10,8))

        # axarr[0].plot(tarr, top_elec, label='Top elec.')
        # axarr[0].plot(tarr, bot_elec, label='Bottom elec.')
        # axarr[0].set_ylabel('Apparent Voltages [V]')
        # axarr[0].legend(fontsize=12, loc='upper right')

        # axarr[1].plot(tarr, efield[2])
        # axarr[1].set_xlabel('Time [s]')
        # axarr[1].set_ylabel('Apparent Electric Field [V/m]')

        # fig.tight_layout()

        # plt.show()
        # input()

        freqs = np.fft.rfftfreq(df.nsamp, d=1.0 / df.fsamp)
        drive_ind = np.argmax(np.abs(np.fft.rfft(eforce2)))
        drive_freq = freqs[drive_ind]

        zamp = np.abs( np.fft.rfft(df.zcal) * bu.fft_norm(df.nsamp, df.fsamp) * \
                       np.sqrt(freqs[1] - freqs[0]) )
        zamp *= (1064.0e-9 / 2.0) * (1.0 / (2.9 * np.pi))
        zphase = np.angle(np.fft.rfft(df.zcal))
        zamp_avg += zamp[drive_ind]
        zamp_N += 1

        #plt.loglog(freqs, zamp)
        #plt.scatter(freqs[drive_ind], zamp[drive_ind], s=10, color='r')
        #plt.show()


        zfb = np.abs(np.fft.rfft(df.pos_fb[2]) * bu.fft_norm(df.nsamp, df.fsamp) * \
                      np.sqrt(freqs[1] - freqs[0]) )
        zfb_avg += zfb[drive_ind]
        zfb_N += 1

        #eforce2 = (top_elec * e_top_func(0.0) + bot_elec * e_bot_func(0.0)) * q_bead
        if noise:
            e_dc.append(np.mean(eforce2))
            e_ac_val = np.abs(np.fft.rfft(eforce2))[drive_ind]
            e_ac.append(e_ac_val * bu.fft_norm(df.nsamp, df.fsamp) \
                        * np.sqrt(freqs[1] - freqs[0]) )

        zphase_avg += (zphase[drive_ind] - np.angle(eforce2)[drive_ind])

        if np.sum(df.power) == 0.0:
            current = np.abs(df.other_data[pow_ind]) / trans_gain
        else:
            fac = 1e-6
            current = fac * df.power / trans_gain

        power = current / pd_gain
        power = power / line_filter_trans
        power = power / bs_fac

        power_avg += np.mean(power)
        power_N += 1
        if noise:
            p_dc.append(np.mean(power))
            p_ac_val = np.abs(np.fft.rfft(power))[drive_ind]
            p_ac.append(p_ac_val * bu.fft_norm(df.nsamp, df.fsamp) \
                        * np.sqrt(freqs[1] - freqs[0]) )

        fft1 = np.fft.rfft(power)
        fft2 = np.fft.rfft(df.pos_fb[2])

        if not len(powpsd):
            powpsd = np.abs(fft1)
            Npsd = 1
        else:
            powpsd += np.abs(fft1)
            Npsd += 1

        # freqs = np.fft.rfftfreq(df.nsamp, d=1.0/df.fsamp)
        # plt.loglog(freqs, np.abs(np.fft.rfft(eforce2)))
        # plt.loglog(freqs, np.abs(np.fft.rfft(power)))
        # plt.show()
        # input()

        # fig, axarr = plt.subplots(2,1,sharex=True,figsize=(10,8))

        # axarr[0].plot(tarr, power)
        # axarr[0].set_ylabel('Measured Power [Arb.]')

        # axarr[1].plot(tarr, power)
        # axarr[1].set_xlabel('Time [s]')
        # axarr[1].set_ylabel('Measured Power [Arb.]')

        # bot, top = axarr[1].get_ylim()
        # axarr[1].set_ylim(1.05*bot, 0)

        # fig.tight_layout()

        # plt.show()
        # input()


        bins, dat, errs = bu.spatial_bin(eforce2, power, nbins=200, width=0.0, #width=0.05, \
                                         dt=1.0/df.fsamp, harms=[1], \
                                         add_mean=True, verbose=False, \
                                         correct_phase_shift=correct_phase_shift, \
                                         grad_sign=0)

        dat = dat / np.mean(dat)

        #plt.plot(bins, dat, 'o')
        #plt.show()

        popt, pcov = opti.curve_fit(line, bins*1.0e13, dat, \
                                    absolute_sigma=False, maxfev=10000)
        test_vals = np.linspace(np.min(eforce2 * 1.0e13),
                                np.max(eforce2 * 1.0e13), 100)

        fit = line(test_vals, *popt)

        lev_force = -popt[1] / (popt[0] * 1.0e13)
        mass = lev_force / (9.806)

        #umass = ulev_force / 9.806
        #lmass = llev_force / 9.806

        if mass > upper_outlier or mass < lower_outlier:
            print('Crazy mass: {:0.2f} pg.... ignoring'.format(mass * 1e15))
            # fig, axarr = plt.subplots(3,1,sharex=True)
            # axarr[0].plot(eforce2)
            # axarr[1].plot(power)
            # axarr[2].plot(df.pos_data[2])
            # ylims = axarr[1].get_ylim()
            # axarr[1].set_ylim(ylims[0], 0)
            # plt.show()
            continue

        all_param.append(popt)

        all_eforce.append(bins)
        all_power.append(dat)

        mass_vec.append(mass)

    if noise:
        print('DC power: ', np.mean(p_dc), np.std(p_dc))
        print('AC power: ', np.mean(p_ac), np.std(p_ac))
        print('DC field: ', np.mean(e_dc), np.std(e_dc))
        print('AC field: ', np.mean(e_ac), np.std(e_ac))
        return

    #plt.plot(mass_vec)

    mean_popt = np.mean(all_param, axis=0)

    mean_lev = np.mean(mass_vec) * 9.806
    plot_vec = np.linspace(np.min(all_eforce), mean_lev, 100)

    if plot:
        fig = plt.figure(dpi=200, figsize=(6, 4))
        ax = fig.add_subplot(111)
        ### Plot force (in pN / g = pg) vs power
        plt.plot(np.array(all_eforce).flatten()[::5]*1e15*(1.0/9.806), \
                 np.array(all_power).flatten()[::5], \
                 'o', alpha = 0.5)
        #for params in all_param:
        #    plt.plot(plot_vec, line(plot_vec, params[0]*1e13, params[1]), \
        #             '--', color='r', lw=1, alpha=0.05)
        plt.plot(plot_vec*1e12*(1.0/9.806)*1e3, \
                 line(plot_vec, mean_popt[0]*1e13, mean_popt[1]), \
                 '--', color='k', lw=2, \
                 label='Implied mass: %0.1f pg' % (np.mean(mass_vec)*1e15))
        left, right = ax.get_xlim()
        # ax.set_xlim((left, 500))
        ax.set_xlim(*xlim)

        bot, top = ax.get_ylim()
        ax.set_ylim((0, top))

        plt.legend()
        plt.xlabel('Applied electrostatic force/$g$ (pg)')
        plt.ylabel('Optical power (arb. units)')
        plt.grid()
        plt.tight_layout()
        if save_example:
            fig.savefig(example_filename)
            fig.savefig(example_filename[:-4] + '.pdf')
            fig.savefig(example_filename[:-4] + '.svg')

        x_plotvec = np.array(all_eforce).flatten()
        y_plotvec = np.array(all_power).flatten()

        yresid = (y_plotvec - line(x_plotvec, mean_popt[0] * 1e13,
                                   mean_popt[1])) / y_plotvec

        plt.figure(dpi=200, figsize=(3, 2))
        plt.hist(yresid * 100, bins=30)
        plt.legend()
        plt.xlabel('Resid. Power [%]')
        plt.ylabel('Counts')
        plt.grid()
        plt.tight_layout()

        plt.figure(dpi=200, figsize=(3, 2))
        plt.plot(x_plotvec * 1e15, yresid * 100, 'o')
        plt.legend()
        plt.xlabel('E-Force [pN]')
        plt.ylabel('Resid. Pow. [%]')
        plt.grid()
        plt.tight_layout()

        derpfig = plt.figure(dpi=200, figsize=(3, 2))
        #derpfig.patch.set_alpha(0.0)
        plt.hist(np.array(mass_vec) * 1e15, bins=10)
        plt.xlabel('Mass (pg)')
        plt.ylabel('Count')
        plt.grid()
        #plt.title('Implied Masses, Each from 50s Integration')
        #plt.xlim(0.125, 0.131)
        plt.tight_layout()
        if save_example:
            derpfig.savefig(example_filename[:-4] + '_hist.png')
            derpfig.savefig(example_filename[:-4] + '_hist.pdf')
            derpfig.savefig(example_filename[:-4] + '_hist.svg')

        plt.show()

    final_mass = np.mean(mass_vec)
    final_err_stat = 0.5 * np.std(mass_vec)  #/ np.sqrt(len(mass_vec))
    final_err_sys = np.sqrt((0.015**2 + 0.01**2) * final_mass**2)
    final_pressure = np.mean(pressure_vec)

    if save_mass:
        save_arr = [final_mass, final_err_stat, final_err_sys]
        np.save(open(save_filename, 'wb'), save_arr)

    print('Bad Files: %i / %i' % (Nbad, nfiles))
    if print_res:
        gresid_fac = (2.0 * np.pi * freqs[drive_ind])**2 / 9.8

        print('      mass    [pg]: {:0.1f}'.format(final_mass * 1e15))
        print('      st.err  [pg]: {:0.2f}'.format(final_err_stat * 1e15))
        print('      sys.err [pg]: {:0.2f}'.format(final_err_sys * 1e15))
        print('      qbead    [e]: {:d}'.format(
            int(round(q_bead / constants.elementary_charge))))
        print('      P     [mbar]: {:0.2e}'.format(final_pressure))
        print('      <P>    [arb]: {:0.2e}'.format(power_avg / power_N))
        print('      zresid   [g]: {:0.3e}'.format(
            (zamp_avg / zamp_N) * gresid_fac))
        print('      zphase [rad]: {:0.3e}'.format(zphase_avg / zamp_N))
        print('      zfb    [arb]: {:0.3e}'.format(zfb_avg / zfb_N))
        outarr = [ final_mass*1e15, final_err_stat*1e15, final_err_sys*1e15, \
                   q_bead/constants.elementary_charge, \
                   final_pressure, power_avg / power_N, \
                   (zamp_avg / zamp_N) * gresid_fac, \
                   zphase_avg / zamp_N, zfb_avg / zfb_N ]
        return outarr
    else:
        scaled_params = np.array(all_param)
        scaled_params[:, 0] *= 1e13

        outdic = {'eforce': all_eforce, 'power': all_power, \
                  'linear_fit_params': scaled_params, \
                  'ext_masses': mass_vec}

        return outdic