def get_labeled_collection( meas_file_base: str, iter_count: Iterable[int], samp_file: str, filepath: Union[str, None] = DATA_DIR) -> Iterable[LabeledPeakCollection]: """Returns Iterable for LabeledPeakCluster data.""" fetch_func = get_file_fetch_func(file_base_name=meas_file_base, filepath=filepath) for i in iter_count: try: meas_file = fetch_func(iteration=i) measurement_container = FileToMeasData(meas_file=meas_file, samp_file=samp_file, filepath=filepath) measurement_container = get_converted_measurement_data( meas_class=measurement_container, q_offset=Q_OFFSET) labeled_collection = LabeledPeakCollection( transmission_peak_collection=identify_peaks( meas_data=measurement_container), q_offset=Q_OFFSET) # normalized_collection = NormalizedPeakCollection(transmission_peak_collection=identify_peaks(meas_data=measurement_container), q_offset=Q_OFFSET) yield labeled_collection except FileNotFoundError: continue
def single_source_analysis(meas_file: str, samp_file: str, filepath: Union[str, None] = DATA_DIR): # Create measurement container instance measurement_container = FileToMeasData(meas_file=meas_file, samp_file=samp_file, filepath=filepath) # # # (Report specific) Plot classification process # plot_mode_classification(meas_data=measurement_container) # Plot # Apply voltage to length conversion measurement_container = get_converted_measurement_data( meas_class=measurement_container, q_offset=Q_OFFSET, verbose=False) # Peak Identification peak_collection = identify_peaks(meas_data=measurement_container) peak_ax = plot_peak_identification( collection=peak_collection, meas_class=measurement_container) # Plot # Peak clustering and mode labeling labeled_collection = LabeledPeakCollection( transmission_peak_collection=peak_collection, q_offset=Q_OFFSET) rela_ax = plot_peak_relation(collection=labeled_collection, meas_class=measurement_container) # Plot
xs[j] = float('nan') ys[j] = float('nan') _ax.plot(xs, ys, ls='-', label=f'q={i}') _ax.set_title(f'linearity of transverse mode distances') _ax.grid(True) plt.legend() if __name__ == '__main__': # Define file name to retrieve from predefined data path file_meas = 'transrefl_hene_1s_10V_PMT5_rate1300000.0itteration0' file_samp = 'samples_1s_10V_rate1300000.0' # Store plot figure and axis fig, ax = plt.subplots() # Optional, define data_slice slice = (1050000, 1150000) # Construct measurement class measurement_class = FileToMeasData(meas_file=file_meas, samp_file=file_samp) # Apply axis draw/modification # ax = plot_npy(axis=ax, measurement_file=file_meas, sample_file=file_samp, data_slice=None) ax = plot_class(axis=ax, measurement_class=measurement_class) fig2, ax2 = plt.subplots() measurement_class.slicer = slice # ax2 = plot_npy(axis=ax2, measurement_file=file_meas, sample_file=file_samp, data_slice=data_slice) ax2 = plot_class(axis=ax2, measurement_class=measurement_class) # Show figure plot plt.show()
# data_class = FileToMeasData(meas_file=file_meas, samp_file=file_samp, filepath='data/Trans/20210104') # collection_class = LabeledPeakCollection(identify_peaks(meas_data=data_class)) # # cluster_collection = collection_class.get_q_clusters # collection_class.get_clusters # piezo_response = fit_piezo_response(cluster_collection=cluster_collection, sample_wavelength=SAMPLE_WAVELENGTH) # piezo_response = fit_collection() # fit_variables = fit_calibration(voltage_array=data_class.samp_array, reference_transmission_array=import_npy(filename_base)[0], response_func=piezo_response) # print(f'TiSaph transmission: T = {1 - fit_variables[1]} (R = {fit_variables[1]})') # print(f'Cavity length delta between HeNe and TiSaph measurement: {fit_variables[2]} [nm]') for i in range(5): _filename = 'transrefl_hene_1s_10V_PMT4_rate1300000.0itteration{}'.format( i) data_class = FileToMeasData(meas_file=_filename, samp_file=file_samp, filepath='data/Trans/20210104') identified_peaks = identify_peaks(meas_data=data_class) collection_class = LabeledPeakCollection(identified_peaks) cluster_collection = collection_class.get_q_clusters # collection_class.get_clusters piezo_response = fit_piezo_response( cluster_collection=cluster_collection, sample_wavelength=SAMPLE_WAVELENGTH, verbose=True) # # Obtain mean and root-mean-square # y_values = [value for line in ax2.lines for value in line.get_ydata()] # y_mean = np.mean(y_values) # y_rms = np.sqrt(np.sum((y_values - y_mean)**2) / len(y_values)) # ax2.axhline(y=y_mean, ls='--', color='darkorange') # ax2.axhline(y=y_mean+y_rms, ls='--', color='orange', label=r'$\mu + \sigma$' + f': {round(y_mean+y_rms, 2)} [nm]')
def get_polarized_comparison(filename_func: Callable[[int, int], Union[str, FileNotFoundError]], sample_file: str, long_mode: int, trans_mode: Union[int, None]): warnings.filterwarnings(action='once') height_data = [] diff_data = [] # Collect data polarization_iterator = range(0, 19) for polarization in tqdm(polarization_iterator, desc=f'Collecting data over polarization sweep'): _filenames = [] for iteration in range(0, 10): try: _filename = filename_func(iteration, polarization) except FileNotFoundError: break # Breaks out of the inner for-loop # Successfully retrieved file name _filenames.append(_filename) # Time for processing into normalized collection _meas_iterations = [ get_converted_measurement_data( FileToMeasData(meas_file=file_meas, samp_file=sample_file)) for file_meas in _filenames ] # get_converted_measurement_data _identified_peaks = [ identify_peaks(meas_data=data) for data in _meas_iterations ] # Temp solution # _norm_peaks = [NormalizedPeakCollection(optical_mode_collection=collection) for collection in _identified_peaks] _norm_peaks = [] for collection in _identified_peaks: try: normalaized_data = NormalizedPeakCollection( transmission_peak_collection=collection) except IndexError: # Sneaky catch for improper normalization (self._get_data_class) print(f'Skipped data') continue _norm_peaks.append(normalaized_data) # Process peak data # expected_number_of_peaks = 4 # Temp _mean_std_height_array = list( get_peak_height(collection_classes=_norm_peaks, long_mode=long_mode, trans_mode=trans_mode)) _mean_std_diff_array = list( get_peak_differences(collection_classes=_norm_peaks, long_mode=long_mode, trans_mode=trans_mode)) for i, (mean, std) in enumerate(_mean_std_height_array): print( f'\nPeak height {i}: mean = {round(mean, 4)}[Transmission], std = {round(std, 4)}[Transmission]' ) for i, (mean, std) in enumerate(_mean_std_diff_array): print( f'Peak difference {i}-{i + 1}: mean = {round(mean, 4)}[nm], std = {round(std, 4)}[nm]' ) # Collect data height_data.append(_mean_std_height_array) diff_data.append(_mean_std_diff_array) def get_match_index( value_array: List[float]) -> Callable[[float], List[int]]: _look_up = [] for i in range(len(value_array)): for j in range(1, 2): # len(value_array) - i + 1 _look_up.append((np.sum(np.nan_to_num(value_array[i:(i + j)])), list(range(i, i + j)))) def match_func(value_input: float) -> List[int]: look_up_array = np.array([value for value, indices in _look_up]) index = min( range(len(look_up_array)), key=lambda k: abs(look_up_array[k] - value_input) ) # find_nearest_index(array=look_up_array, value=value_input) # print(f'Compare {value_input} to:\n{_look_up}\nFinds index {index}, corresponding to {_look_up[index]}') return _look_up[index][1] return match_func def get_iterator_on_max_elements(data: List[Any], correct_len: int) -> List[int]: _iterator = list(range(len(data))) for i in _iterator: if len(data[i]) == correct_len: start = _iterator[i:] end = _iterator[0:i] _iterator = start _iterator.extend(end) break return _iterator def estimate_missing_height_data( height_data: List[List[Tuple[float, float]]], diff_data: List[List[Tuple[float, float]]] ) -> Tuple[List[List[Tuple[float, float]]], List[List[Tuple[float, float]]]]: """ Handles undetected peaks throughout data. Expects data format: List[ Polarization(0): List[ Peak(0): Tuple[ height_mean, height_std ] Peak(1): ... ] Polarization(1): ... ] The number of peaks per polarization (averaged over multiple iterations) can vary from polarization to polarization. Either through peak degeneracy or undetectability. Solution approach: - establish the max frequency number of peaks - use peak-difference data to establish missing peak indices """ # Step 1 max_peak_freq = int( max_frequent([len(polar_group) for polar_group in height_data ])) # Max number of consistent peaks found max_diff_freq = int( max_frequent([ len(polar_group) for polar_group in diff_data ])) # Max number of consistent peaks differences found # Step 2 missing_diff_data = [] # Define data iterator to correct missing data diff_iterator = get_iterator_on_max_elements( data=diff_data, correct_len=max_diff_freq) # list(range(len(diff_data))) for i, iter_index in enumerate(diff_iterator): if len(diff_data[iter_index]) != max_diff_freq: if len(diff_data[diff_iterator[i - 1]]) == max_diff_freq: # Normal execution logic corresponding_index_func = get_match_index(value_array=[ mean for mean, std in diff_data[diff_iterator[i - 1]] ]) for j, (curr_mean, curr_std) in enumerate(diff_data[iter_index]): index_array = corresponding_index_func( curr_mean) # Retrieves the reference mean-indices for k in range(j, index_array[0]): diff_data[iter_index].insert(k, (np.nan, np.nan)) missing_diff_data.append((iter_index, k)) # if len(index_array) > 1: # One or both bounding peaks are missing # for k in index_array[1:]: # diff_data[i].insert(k, (np.nan, np.nan)) # missing_diff_data.append((i, k)) else: # Problem # raise NotImplementedError? continue print(missing_diff_data) for (pol_index, list_index) in missing_diff_data: height_data[pol_index].insert(list_index, (0, np.nan)) return height_data, diff_data height_data, diff_data = estimate_missing_height_data( height_data=height_data, diff_data=diff_data) # Display data x = [i * 10 for i in polarization_iterator] height_array_mean_std = [ list(map(list, zip(*mean_std_list))) for mean_std_list in height_data ] # Polar[ Peaks[] ] y_height = list( map(list, zip(*[mean for (mean, std) in height_array_mean_std]))) y_height_err = list( map(list, zip(*[std for (mean, std) in height_array_mean_std]))) diff_array_mean_std = [ list(map(list, zip(*mean_std_list))) for mean_std_list in diff_data ] # Polar[ Peaks[] ] y_diff = list( map(list, zip(*[mean for (mean, std) in diff_array_mean_std]))) y_diff_err = list( map(list, zip(*[std for (mean, std) in diff_array_mean_std]))) # Define plot fig, (ax0, ax1) = plt.subplots(2, 1) for i in range(len(y_height)): ax0.errorbar(x=x, y=y_height[i], yerr=y_height_err[i], fmt='', label=f'Peak ({i})') for i in range(len(y_diff)): ax1.errorbar(x=x, y=y_diff[i], yerr=y_diff_err[i], fmt='', label=f'Peak ({i})-to-({i+1})') # Set labels fig.suptitle( f'Peak relations focused on (q={long_mode}, m+n={trans_mode})\n(peak labeling: min-max resonance distance)' ) ax0.set_xlabel('Polarization [Degrees]') ax0.set_ylabel('Transmission [a.u.]') # ax0.set_yscale('log') ax0.grid(True) ax0.legend(bbox_to_anchor=(1.01, 1), loc='upper left') ax1.set_xlabel('Polarization [Degrees]') ax1.set_ylabel('Peak Distance [nm]') # ax1.set_yscale('log') ax1.grid(True) ax1.legend(bbox_to_anchor=(1.01, 1), loc='upper left')