def get_labeled_collection( meas_file_base: str, iter_count: Iterable[int], samp_file: str, filepath: Union[str, None] = DATA_DIR) -> Iterable[LabeledPeakCollection]: """Returns Iterable for LabeledPeakCluster data.""" fetch_func = get_file_fetch_func(file_base_name=meas_file_base, filepath=filepath) for i in iter_count: try: meas_file = fetch_func(iteration=i) measurement_container = FileToMeasData(meas_file=meas_file, samp_file=samp_file, filepath=filepath) measurement_container = get_converted_measurement_data( meas_class=measurement_container, q_offset=Q_OFFSET) labeled_collection = LabeledPeakCollection( transmission_peak_collection=identify_peaks( meas_data=measurement_container), q_offset=Q_OFFSET) # normalized_collection = NormalizedPeakCollection(transmission_peak_collection=identify_peaks(meas_data=measurement_container), q_offset=Q_OFFSET) yield labeled_collection except FileNotFoundError: continue
def plot_cross_sections(): # Test index = 0 ax_full, measurement_class = prepare_measurement_plot(filenames[index]) measurement_class = get_converted_measurement_data(measurement_class) ax_full = plot_class(axis=ax_full, measurement_class=measurement_class) fig, ax_array = plt.subplots(3, 3) ax_array = np.ndarray.flatten(ax_array) for index in range(len(ax_array)): # Plot specific longitudinal mode ax_array[index] = plot_isolated_long_mode( axis=ax_array[index], data_class=meas_iterations[index], collection=labeled_peaks[index], long_mode=long_mode, trans_mode=trans_mode) if index == 2: ax_array[index].legend(bbox_to_anchor=(1.05, 1), loc='upper left') # Plot mode separation cluster_array, value_slice = labeled_peaks[ index].get_mode_sequence(long_mode=long_mode) # Temporary coloring ax_full.axvline(x=value_slice[0], color=f'C{long_mode+1}') ax_full.axvline(x=value_slice[1], color=f'C{long_mode+1}', label=f'Long. Mode={long_mode}') ax_full.legend()
def single_source_analysis(meas_file: str, samp_file: str, filepath: Union[str, None] = DATA_DIR): # Create measurement container instance measurement_container = FileToMeasData(meas_file=meas_file, samp_file=samp_file, filepath=filepath) # # # (Report specific) Plot classification process # plot_mode_classification(meas_data=measurement_container) # Plot # Apply voltage to length conversion measurement_container = get_converted_measurement_data( meas_class=measurement_container, q_offset=Q_OFFSET, verbose=False) # Peak Identification peak_collection = identify_peaks(meas_data=measurement_container) peak_ax = plot_peak_identification( collection=peak_collection, meas_class=measurement_container) # Plot # Peak clustering and mode labeling labeled_collection = LabeledPeakCollection( transmission_peak_collection=peak_collection, q_offset=Q_OFFSET) rela_ax = plot_peak_relation(collection=labeled_collection, meas_class=measurement_container) # Plot
def __init__(self, meas_file: str, samp_file: str, collection: Optional[NormalizedPeakCollection] = None): self._meas_file = meas_file self._samp_file = samp_file if collection is None: # Construct synchronized measurement object self._meas_data = get_converted_measurement_data( FileToMeasData(meas_file=meas_file, samp_file=samp_file)) self._peak_collection = NormalizedPeakCollection( identify_peaks(meas_data=self._meas_data)) else: self._peak_collection = collection self._meas_data = collection._get_data_class
if __name__ == '__main__': from src.plot_functions import prepare_measurement_plot # Reference files file_samp = 'samples_0_3s_10V_rate1300000.0' # 'samples_1s_10V_rate1300000.0' # Measurement files # filenames = ['transrefl_hene_1s_10V_PMT5_rate1300000.0itteration{}'.format(i) for i in range(10)] filenames = [ 'transrefl_hene_0_3s_10V_PMT4_rate1300000.0itteration1_pol{:0=2d}0'. format(i) for i in range(19) ] meas_iterations = [ get_converted_measurement_data( FileToMeasData(meas_file=file_meas, samp_file=file_samp)) for file_meas in filenames ] identified_peaks = [ identify_peaks(meas_data=data) for data in meas_iterations ] labeled_peaks = [ LabeledPeakCollection(transmission_peak_collection=collection) for collection in identified_peaks ] trans_mode = 2 long_mode = 0 plot_3d_sequence(data_classes=meas_iterations, long_mode=long_mode, trans_mode=trans_mode)
def plot_mode_classification(meas_data: SyncMeasData) -> plt.axes: """Plots report paper figure for entire classification process""" from src.peak_identifier import identify_peaks from src.peak_relation import LabeledPeakCollection, get_converted_measurement_data from src.main import Q_OFFSET _fig, ((_ax00, _ax01), (_ax10, _ax11)) = plt.subplots(2, 2, sharey='all') colors = plt.cm.jet(np.linspace(0, 1, 10)) # Plot raw data _ax00.text(1.05, 1., '(a)', horizontalalignment='center', verticalalignment='top', transform=_ax00.transAxes) _ax00 = plot_class(axis=_ax00, measurement_class=meas_data) _ax00.set_xlabel('Voltage [V]') # Plot peak locations _ax01.text(1.05, 1., '(b)', horizontalalignment='center', verticalalignment='top', transform=_ax01.transAxes) peak_collection = identify_peaks(meas_data=meas_data) _ax01 = plot_class(axis=_ax01, measurement_class=meas_data, alpha=0.2) for i, peak_data in enumerate(peak_collection): if peak_data.relevant: _ax01.plot(peak_data.get_x, peak_data.get_y, 'x', color='r', alpha=1) _ax01.set_xlabel('Voltage [V]') # Plot q mode separation and mode ordering _ax10.text(1.05, 1., '(c)', horizontalalignment='center', verticalalignment='top', transform=_ax10.transAxes) labeled_collection = LabeledPeakCollection(peak_collection) _ax10 = get_standard_axis(axis=_ax10) min_q = min(labeled_collection.q_dict.keys()) mode_sequence_range = range(min_q, max(labeled_collection.q_dict.keys()) + 2) for i in mode_sequence_range: try: cluster_array, value_slice = labeled_collection.get_mode_sequence( long_mode=i) # Get normalized measurement x_sample, y_measure = labeled_collection.get_measurement_data_slice( union_slice=value_slice) _ax10.plot( x_sample, y_measure, alpha=1, color=colors[(cluster_array[0].get_longitudinal_mode_id - min_q) % len(colors)]) except AttributeError: break for i, peak_data in enumerate(labeled_collection): if peak_data.relevant: _ax10.plot( peak_data.get_x, peak_data.get_y, 'x', color=colors[(peak_data.get_transverse_mode_id - min_q) % len(colors)], alpha=1) _ax10.set_xlabel('Voltage [V]') # Plot finalized labeled peaks _ax11.text(1.05, 1., '(d)', horizontalalignment='center', verticalalignment='top', transform=_ax11.transAxes) meas_data = get_converted_measurement_data(meas_class=meas_data, q_offset=Q_OFFSET, verbose=False) labeled_collection = LabeledPeakCollection( identify_peaks(meas_data=meas_data), q_offset=Q_OFFSET) # _ax11 = plot_class(axis=_ax11, measurement_class=meas_data, alpha=0.2) # _ax11 = plot_cluster_collection(axis=_ax11, data=labeled_collection) min_q = min(labeled_collection.q_dict.keys()) mode_sequence_range = range(min_q, max(labeled_collection.q_dict.keys()) + 2) for i in mode_sequence_range: try: cluster_array, value_slice = labeled_collection.get_mode_sequence( long_mode=i) # Get normalized measurement x_sample, y_measure = labeled_collection.get_measurement_data_slice( union_slice=value_slice) _ax11.plot( x_sample, y_measure, alpha=.2, color=colors[(cluster_array[0].get_longitudinal_mode_id - min_q) % len(colors)]) except AttributeError: print(i, f'break out of mode sequence') break for cluster in labeled_collection.get_clusters: if cluster.get_transverse_mode_id == 0: plt.gca().set_prop_cycle(None) for peak_data in cluster: if peak_data.relevant: _ax11.plot( peak_data.get_x, peak_data.get_y, 'x', color=colors[(peak_data.get_transverse_mode_id - min_q) % len(colors)], alpha=1) _ax11.text( x=cluster.get_avg_x, y=cluster.get_max_y, s= f'({cluster.get_longitudinal_mode_id}, {cluster.get_transverse_mode_id})', fontsize=10, horizontalalignment='center', verticalalignment='bottom') _ax11 = get_standard_axis(axis=_ax11) _ax11.set_xlabel('Cavity Length [nm]')
def get_polarized_comparison(filename_func: Callable[[int, int], Union[str, FileNotFoundError]], sample_file: str, long_mode: int, trans_mode: Union[int, None]): warnings.filterwarnings(action='once') height_data = [] diff_data = [] # Collect data polarization_iterator = range(0, 19) for polarization in tqdm(polarization_iterator, desc=f'Collecting data over polarization sweep'): _filenames = [] for iteration in range(0, 10): try: _filename = filename_func(iteration, polarization) except FileNotFoundError: break # Breaks out of the inner for-loop # Successfully retrieved file name _filenames.append(_filename) # Time for processing into normalized collection _meas_iterations = [ get_converted_measurement_data( FileToMeasData(meas_file=file_meas, samp_file=sample_file)) for file_meas in _filenames ] # get_converted_measurement_data _identified_peaks = [ identify_peaks(meas_data=data) for data in _meas_iterations ] # Temp solution # _norm_peaks = [NormalizedPeakCollection(optical_mode_collection=collection) for collection in _identified_peaks] _norm_peaks = [] for collection in _identified_peaks: try: normalaized_data = NormalizedPeakCollection( transmission_peak_collection=collection) except IndexError: # Sneaky catch for improper normalization (self._get_data_class) print(f'Skipped data') continue _norm_peaks.append(normalaized_data) # Process peak data # expected_number_of_peaks = 4 # Temp _mean_std_height_array = list( get_peak_height(collection_classes=_norm_peaks, long_mode=long_mode, trans_mode=trans_mode)) _mean_std_diff_array = list( get_peak_differences(collection_classes=_norm_peaks, long_mode=long_mode, trans_mode=trans_mode)) for i, (mean, std) in enumerate(_mean_std_height_array): print( f'\nPeak height {i}: mean = {round(mean, 4)}[Transmission], std = {round(std, 4)}[Transmission]' ) for i, (mean, std) in enumerate(_mean_std_diff_array): print( f'Peak difference {i}-{i + 1}: mean = {round(mean, 4)}[nm], std = {round(std, 4)}[nm]' ) # Collect data height_data.append(_mean_std_height_array) diff_data.append(_mean_std_diff_array) def get_match_index( value_array: List[float]) -> Callable[[float], List[int]]: _look_up = [] for i in range(len(value_array)): for j in range(1, 2): # len(value_array) - i + 1 _look_up.append((np.sum(np.nan_to_num(value_array[i:(i + j)])), list(range(i, i + j)))) def match_func(value_input: float) -> List[int]: look_up_array = np.array([value for value, indices in _look_up]) index = min( range(len(look_up_array)), key=lambda k: abs(look_up_array[k] - value_input) ) # find_nearest_index(array=look_up_array, value=value_input) # print(f'Compare {value_input} to:\n{_look_up}\nFinds index {index}, corresponding to {_look_up[index]}') return _look_up[index][1] return match_func def get_iterator_on_max_elements(data: List[Any], correct_len: int) -> List[int]: _iterator = list(range(len(data))) for i in _iterator: if len(data[i]) == correct_len: start = _iterator[i:] end = _iterator[0:i] _iterator = start _iterator.extend(end) break return _iterator def estimate_missing_height_data( height_data: List[List[Tuple[float, float]]], diff_data: List[List[Tuple[float, float]]] ) -> Tuple[List[List[Tuple[float, float]]], List[List[Tuple[float, float]]]]: """ Handles undetected peaks throughout data. Expects data format: List[ Polarization(0): List[ Peak(0): Tuple[ height_mean, height_std ] Peak(1): ... ] Polarization(1): ... ] The number of peaks per polarization (averaged over multiple iterations) can vary from polarization to polarization. Either through peak degeneracy or undetectability. Solution approach: - establish the max frequency number of peaks - use peak-difference data to establish missing peak indices """ # Step 1 max_peak_freq = int( max_frequent([len(polar_group) for polar_group in height_data ])) # Max number of consistent peaks found max_diff_freq = int( max_frequent([ len(polar_group) for polar_group in diff_data ])) # Max number of consistent peaks differences found # Step 2 missing_diff_data = [] # Define data iterator to correct missing data diff_iterator = get_iterator_on_max_elements( data=diff_data, correct_len=max_diff_freq) # list(range(len(diff_data))) for i, iter_index in enumerate(diff_iterator): if len(diff_data[iter_index]) != max_diff_freq: if len(diff_data[diff_iterator[i - 1]]) == max_diff_freq: # Normal execution logic corresponding_index_func = get_match_index(value_array=[ mean for mean, std in diff_data[diff_iterator[i - 1]] ]) for j, (curr_mean, curr_std) in enumerate(diff_data[iter_index]): index_array = corresponding_index_func( curr_mean) # Retrieves the reference mean-indices for k in range(j, index_array[0]): diff_data[iter_index].insert(k, (np.nan, np.nan)) missing_diff_data.append((iter_index, k)) # if len(index_array) > 1: # One or both bounding peaks are missing # for k in index_array[1:]: # diff_data[i].insert(k, (np.nan, np.nan)) # missing_diff_data.append((i, k)) else: # Problem # raise NotImplementedError? continue print(missing_diff_data) for (pol_index, list_index) in missing_diff_data: height_data[pol_index].insert(list_index, (0, np.nan)) return height_data, diff_data height_data, diff_data = estimate_missing_height_data( height_data=height_data, diff_data=diff_data) # Display data x = [i * 10 for i in polarization_iterator] height_array_mean_std = [ list(map(list, zip(*mean_std_list))) for mean_std_list in height_data ] # Polar[ Peaks[] ] y_height = list( map(list, zip(*[mean for (mean, std) in height_array_mean_std]))) y_height_err = list( map(list, zip(*[std for (mean, std) in height_array_mean_std]))) diff_array_mean_std = [ list(map(list, zip(*mean_std_list))) for mean_std_list in diff_data ] # Polar[ Peaks[] ] y_diff = list( map(list, zip(*[mean for (mean, std) in diff_array_mean_std]))) y_diff_err = list( map(list, zip(*[std for (mean, std) in diff_array_mean_std]))) # Define plot fig, (ax0, ax1) = plt.subplots(2, 1) for i in range(len(y_height)): ax0.errorbar(x=x, y=y_height[i], yerr=y_height_err[i], fmt='', label=f'Peak ({i})') for i in range(len(y_diff)): ax1.errorbar(x=x, y=y_diff[i], yerr=y_diff_err[i], fmt='', label=f'Peak ({i})-to-({i+1})') # Set labels fig.suptitle( f'Peak relations focused on (q={long_mode}, m+n={trans_mode})\n(peak labeling: min-max resonance distance)' ) ax0.set_xlabel('Polarization [Degrees]') ax0.set_ylabel('Transmission [a.u.]') # ax0.set_yscale('log') ax0.grid(True) ax0.legend(bbox_to_anchor=(1.01, 1), loc='upper left') ax1.set_xlabel('Polarization [Degrees]') ax1.set_ylabel('Peak Distance [nm]') # ax1.set_yscale('log') ax1.grid(True) ax1.legend(bbox_to_anchor=(1.01, 1), loc='upper left')