elif g == 'own': # Take each experiment on its own experiments_groups.append([[i] for i in range(nexperiments[i])]) del i, g #%% LOAD DATA # Define variables and begin loop all_groups = [] all_data = [] for i, pi in enumerate(paths): these_groups = [] this_data = [] for j, pij in enumerate(pi): # Load data results, header, footer = ivs.loadTxt(os.path.join(pij, 'Results.txt')) # Export data outside loop these_groups.append(footer['experiments_groups']) this_data.append(results) all_groups.append(these_groups) all_data.append(this_data) del i, pi, footer, results, these_groups, this_data ''' # Make list of failed groups too all_failed_groups = [] for groups, egroups in zip(all_groups, experiments_groups): failed_groups = [] for g in groups: for eg in egroups: if g not in groups: print(str(g) + '\n')
index = [params_filenames.index(f) for f in filenames] params = params[index, :] params_header = [ 'Amplitud (mVpp)', 'Potencia Pump post-MOA (muW)', 'Longitud de onda (nm)', 'Ancho medio de la campana (nm)' ] del params_filenames, index, amplitude, power, wavelength, spectral_width # Now create a list of folders for each filename fits_filenames = [ivs.filenameToFitsFilename(file, home) for file in filenames] # Load data from each fit fits_data = [] fits_footer = [] for file in fits_filenames: data, fits_header, footer = ivs.loadTxt(file) fits_data.append(data) fits_footer.append(footer) del file, data, footer, fits_filenames # Keep only the fit term that has the closest frequency to the desired one fits_new_data = [] for rod, fit in zip(rods, fits_data): try: i = np.argmin( abs(fit[:, 0] - desired_frequency * np.ones(fit.shape[0]))) fits_new_data.append([*fit[i, :]]) except IndexError: fits_new_data.append([*fit]) fits_data = np.array(fits_new_data) frequency = fits_data[:, 0] * 1e9 # Hz
Created on Wed Oct 23 15:26:46 2019 @author: Valeria """ import iv_save_module as ivs import iv_utilities_module as ivu import matplotlib.pyplot as plt #%% Parameters this_filename = 'C:\\Users\\Valeria\\OneDrive\\Labo 6 y 7\\Análisis\\Potencia_M_20191018_10\\Resultados.txt' #%% Load data this_data, this_header, this_footer = ivs.loadTxt(this_filename) #%% Plot # Plot results for the different rods fig, ax1 = plt.subplots() # Frequency plot, right axis ax1.set_xlabel('Repetición') ax1.set_ylabel('Frecuencia (GHz)', color='tab:red') ax1.plot(this_data[:, 1], 'ro') ax1.tick_params(axis='y', labelcolor='tab:red') # Quality factor, left axis ax2 = ax1.twinx() # Second axes that shares the same x-axis ax2.set_ylabel('Tiempo de decaimiento (ps)', color='tab:blue')
elif groups_mode == 'own': # Take each experiment on its own experiments_groups = [[i] for i in range(nexperiments)] #%% LOAD DATA # Define variables and begin loop all_groups = [] all_results = [] all_other_results = [] all_fit_params = [] #all_tables = [] for file in filenames: # Load data from a base fit made by hand results, header, footer = ivs.loadTxt(file) # Reorganize data other_results_keys = ['Nsingular_values', 'chi_squared'] other_results = {k: footer[k] for k in other_results_keys} fit_params = dict(footer) for k in other_results_keys: fit_params.pop(k) del k, other_results_keys fit_params = ivu.InstancesDict(fit_params) del footer # # Generate fit tables # tables = iva.linearPredictionTables(fit_params, # results, # other_results))
params_header = [ 'Amplitud (mVpp)', 'Potencia Pump post-MOA (muW)', 'Longitud de onda (nm)', 'Ancho medio de la campana (nm)' ] del params_filenames, index, amplitude, power, wavelength, spectral_width # Now create a list of folders for each filename fits_filenames = [ ivs.filenameToFitsFilename(file, home) for file in sfilenames ] # Load data from each fit sfits_data = [] sfits_footer = [] for file in fits_filenames: data, fits_header, footer = ivs.loadTxt(file) sfits_data.append(data) sfits_footer.append(footer) del file, data, footer, fits_filenames # Keep only the fit term that has the closest frequency to the desired one fits_new_data = [] for rod, fit in zip(srods, sfits_data): try: i = np.argmin(abs(fit[:, 0] - f * np.ones(fit.shape[0]))) fits_new_data.append([*fit[i, :]]) except IndexError: fits_new_data.append([*fit]) sfits_data = np.array(fits_new_data) sfrequency = sfits_data[:, 0] * 1e9 # Hz sdamping_time = sfits_data[:, 1] * 1e-12 # s
#%% LOAD DATA # Load data from files data = [] for f in filenames: t, V, details = ivs.loadNicePumpProbe(f) data.append(np.array([t, *V.T]).T) del t, V, details, f # Load data from fit filenames other_results_keys = ['Nsingular_values', 'chi_squared'] fit_params = [] for f in fit_filenames: # Load data from a base fit made by hand r, fit_header, ft = ivs.loadTxt(f) # Reorganize data others = {k: ft[k] for k in other_results_keys} fp = dict(ft) for k in other_results_keys: fp.pop(k) del k fp = ivu.InstancesDict(fp) del ft # Add data to external variables fit_params.append(fp) del r, others, fp, f
#%% DATA home = r'C:\Users\Valeria\OneDrive\Labo 6 y 7' figs_folder = 'Informe L7\Figuras\Figuras análisis\Modelos (G, E, etc)' data_folder = 'Informe L7\Datos Iván' file = os.path.join(home, data_folder, 'Resultados_Comparados_LIGO1 sin outl.txt') file2 = os.path.join(home, data_folder, 'Resultados_Comparados_LIGO1_PostUSA sin outl.txt') file3 = os.path.join(home, data_folder, 'Resultados_Comparados_LIGO5bis.txt') # Load data data, header, footer = ivs.loadTxt(file) # Fused Silica + Air data2, header, footer2 = ivs.loadTxt(file2) # Fused Silica + Ta2O5 data3, header, footer3 = ivs.loadTxt(file3) # Ta2O5 + Air # Parameters rhoAu = 19300 # kg/m3 rhoTa = 8180 # kg/m3 gammaAu = 2e-3 # Pa/s cLTa = 4920 # m/s f0 = data[:, 6] * 1e9 # from GHz to Hz d = data[:, 0] * 1e-9 L = data[:, 2] * 1e-9 # from nm to m f = data2[:, 6] * 1e9 d2 = data2[:, 0] * 1e-9
make_boxplot_of = [[0], [1], [0, 1]] overwrite = True #%% LOAD DATA # Organize paths paths = [os.path.join(home, r'Muestras\SEM', f) for f in folders] filenames = [ os.path.join(home, r'Muestras\SEM', f, 'Resultados_SEM_{}.txt'.format(s)) for f, s in zip(folders, series) ] data = [] rods = [] for f in filenames: d, header, ft = ivs.loadTxt(f) data.append(d) rods.append(ft['rods']) del d, ft if filter_ta2o5_outliers: index = np.argsort(data[1][:, 2])[:-1] rods[1] = [rods[1][i] for i in index] data[1] = data[1][index, :] #%% VALUES variables = ['Longitud L', 'Diámetro d', 'Relación de aspecto', 'Ángulo'] variables_units = ['nm', 'nm', '', 'º'] variables_data = lambda i: [ iva.getValueError(data[i][:, 2], data[i][:, 3]),
# Plot parameters plot_params = dict(plot=True, interactive=False, autoclose=True, extension='.png') plot_params = ivu.InstancesDict(plot_params) #%% LOAD DATA # Make filenames routs filename = ivs.filenameToMeasureFilename(name, home=home) fit_filename = ivs.filenameToFitsFilename(name, home=home) other_fit_filename = os.path.join(path, name + '.txt') # Load data from a base fit made by hand results, header, footer = ivs.loadTxt(fit_filename) # Reorganize data other_results_keys = ['Nsingular_values', 'chi_squared'] other_results = {k: footer[k] for k in other_results_keys} fit_params = dict(footer) for k in other_results_keys: fit_params.pop(k) fit_params = ivu.InstancesDict(fit_params) del footer # New parameters fit_params.use_full_mean = False fit_params.choose_t0 = False fit_params.choose_tf = False t0 = fit_params.time_range[0]
@author: Lec """ import numpy as np import iv_save_module as ivs import matplotlib.pyplot as plt from scipy.optimize import curve_fit #%% DATA file = r'C:\Users\Usuario\OneDrive\Labo 6 y 7\OneDrive\Labo 6 y 7\Análisis\ComparedAnalysis_FusedSilica\Resultados_Comparados_LIGO1.txt' file2 = r'C:\Users\Usuario\OneDrive\Labo 6 y 7\OneDrive\Labo 6 y 7\Análisis\ComparedAnalysis_FusedSilica\Resultados_Comparados_LIGO1_PostUSA.txt' # Load data data, header, footer = ivs.loadTxt(file) # In air data2, header, footer2 = ivs.loadTxt(file2) # In Ta2O5 filter_outliers = False filter_notcommon = False # Parameters rhoAu = 19300 # kg/m3 rhoTa = 8180 # kg/m3 gammaAu = 2e-3 # Pa/s r = data[:, 0] * 1e-9 / 2 A = np.pi * (r**2) L = data[:, 2] * 1e-9 # from nm to m L2 = data2[:, 2] * 1e-9 # from nm to m
chi = [] # Chi Squared meanqdiff = [] # Mean Squared Difference nterms = [] # Number of fit terms fit_params = [] # Now, begin iteration on files for n in names: print("---> File {}/{}".format(names.index(n) + 1, len(names))) # Load data t_n, V, details = ivs.loadNicePumpProbe( ivs.filenameToMeasureFilename(n, home)) # Load fit parameters results, header, fit_params_n = ivs.loadTxt( ivs.filenameToFitsFilename(n, home)) fit_params_n = ivu.InstancesDict(fit_params_n) del results, header # Choose data to fit if fit_params_n.use_full_mean: data_n = np.mean(V, axis=1) else: data_n = np.mean(V[:, fit_params_n.use_experiments], axis=1) # Make a vertical shift data_n = data_n - fit_params_n.voltage_zero # Choose time interval to fit t0_n = fit_params_n.time_range[0] # Initial time assumed to optimize it i = np.argmin(np.abs(t_n -