コード例 #1
0
def plot_peak_normalization_overlap(
        collection: NormalizedPeakCollection) -> plt.axes:
    # Store plot figure and axis
    def peak_inclusion(peak: NormalizedPeak) -> bool:
        return peak.get_norm_x is not None and 0 <= peak.get_norm_x <= 1

    alpha = 0.5
    _, _ax = plt.subplots()
    _ax = get_standard_axis(axis=_ax)
    for i in range(max(collection.q_dict.keys())):
        try:
            cluster_array, value_slice = collection.get_mode_sequence(
                long_mode=i)
            # Get normalized measurement
            x_sample, y_measure = collection.get_normalized_meas_data_slice(
                union_slice=value_slice)
            _ax.plot(x_sample, y_measure, alpha=alpha)
            # Get normalized peaks
            peak_array = flatten_clusters(data=cluster_array)
            y = [peak.get_y for peak in peak_array if peak_inclusion(peak)]
            x = [
                peak.get_norm_x for peak in peak_array if peak_inclusion(peak)
            ]
            _ax.plot(x, y, 'x', alpha=alpha)
        except AttributeError:
            break
    _ax.set_xlabel('Normalized distance ' + r'$[\lambda / 2]$')
    return _ax
コード例 #2
0
 def __init__(self, transmission_peak_collection: PeakCollection, **kwargs):
     super().__init__(transmission_peak_collection,
                      **kwargs)  # Constructs q_dict
     # Update internals to represent normalized peak data
     self._mode_clusters = self._set_norm_peaks(self._list)
     self._list = flatten_clusters(
         data=self._mode_clusters
     )  # Update internal collection with normalized data
コード例 #3
0
 def get_peak_info(self) -> str:
     result = f'Average peaks detected per mode (m + n):'
     for _trans_mode in range(1, self.get_max_transverse_mode + 1):
         _trans_cluster = self._peak_collection.get_labeled_clusters(
             long_mode=None, trans_mode=_trans_mode)
         _average_peaks_per_trans_mode = len(
             flatten_clusters(data=_trans_cluster)) / len(_trans_cluster)
         result += f'\n(m + n = {_trans_mode}) Average peaks: {round(_average_peaks_per_trans_mode, 2)}'
     return result
コード例 #4
0
 def filter_collection(iterator: Iterable[LabeledPeakCollection]) -> Iterable[LabeledPeakCollection]:
     """Includes filter for undetermined value slice and uncommon peak count"""
     result_tuple = []
     peak_count = []
     for i, _collection in tqdm(enumerate(iterator), desc=f'Pre-Processing'):
         try:
             # Pin value
             pin_cluster_array, _ = _collection.get_mode_sequence(long_mode=pin[0], trans_mode=pin[1])
         except IndexError:
             # specific mode could not be found
             continue
         count = len(flatten_clusters(data=pin_cluster_array))
         peak_count.append(count)
         result_tuple.append((count, _collection))
     # Filter count entries
     _most_freq_nr = most_frequent(peak_count)
     for (count, _collection) in result_tuple:
         if count == _most_freq_nr:
             yield _collection
コード例 #5
0
def get_peak_most_frequent_peaks(
        collection_classes: List[NormalizedPeakCollection],
        long_mode: int,
        trans_mode: Union[int, None],
        force_number: Union[int, None] = None) -> List[List[LabeledPeak]]:
    peak_clusters = []
    for norm_collection in collection_classes:
        cluster_array = norm_collection.get_labeled_clusters(
            long_mode=long_mode, trans_mode=trans_mode)
        peak_array = flatten_clusters(cluster_array)
        peak_clusters.append(peak_array)

    max_peaks = int(most_frequent([len(cluster) for cluster in peak_clusters
                                   ]))  # Most frequently found number of peaks
    if force_number is not None:  # Temporary force
        max_peaks = force_number
    peak_clusters = [
        cluster for cluster in peak_clusters if len(cluster) == max_peaks
    ]
    return peak_clusters  # Trust me I know for certain these are labeled peaks (Since they originate from a normalized collection)
コード例 #6
0
def plot_3d_sequence(data_classes: List[SyncMeasData], long_mode: int,
                     trans_mode: Union[int, None]) -> plt.axis:
    # Set 3D plot
    fig = plt.figure()
    axis = fig.gca(projection='3d')

    # Store slices to ensure equally size arrays
    cluster_arrays = []
    q_mode_peaks = []
    data_slices = []
    data_slices_range = []
    for data_class in data_classes:
        collection = LabeledPeakCollection(identify_peaks(data_class))
        try:
            cluster_array, value_slice = collection.get_mode_sequence(
                long_mode=long_mode, trans_mode=trans_mode)
            cluster_arrays.append(cluster_array)
            q_mode_peaks.append(collection.q_dict[long_mode])
        except ValueError:
            logging.warning(f'Longitudinal mode {long_mode} not well defined')
            return axis
        data_slice = get_value_to_data_slice(data_class=data_class,
                                             value_slice=value_slice)
        data_slices.append(data_slice)
        data_slices_range.append(get_slice_range(data_slice))  # Store range

    # Prepare plot data
    leading_index = data_slices_range.index(max(data_slices_range))
    leading_slice = data_slices[leading_index]
    for i, slice in enumerate(data_slices):
        range_diff = get_slice_range(leading_slice) - get_slice_range(slice)
        padding = whole_integer_divider(num=range_diff, div=2)
        data_slices[i] = (slice[0] - padding[0], slice[1] + padding[1])

    sliced_xs = data_classes[leading_index].x_boundless_data[
        leading_slice[0]:leading_slice[1]]
    xs = np.arange(get_slice_range(leading_slice))  # sliced_xs  #
    zs = np.arange(len(data_slices))
    verts = []
    peaks = []
    for i, slice in enumerate(data_slices):
        data_class = data_classes[i]
        data_class.slicer = slice
        ys = data_class.y_data
        verts.append(list(zip(xs, ys)))
        # Peak scatter plot
        peak_list = flatten_clusters(data=cluster_arrays[i])
        peaks.append(peak_list)  # Collect peaks for polarisation cross section
        yp = [peak.get_y for peak in peak_list]
        xp = [peak.get_relative_index for peak in peak_list]
        zp = [zs[i] for j in range(len(peak_list))]
        axis.scatter(xp, zp, yp, marker='o')

    # Draw individual measurement polygons
    poly = PolyCollection(verts)
    poly.set_alpha(.7)
    axis.add_collection3d(poly, zs=zs, zdir='y')

    # Draw polarisation cross section
    cross_section_count = len(peaks[0])
    if all(len(peak_array) == cross_section_count
           for peak_array in peaks):  # Able to build consistent cross sections
        cross_peaks = list(
            map(list, zip(*peaks)
                ))  # Transposes peaks-list to allow for cross section ordering
        xc = []
        # Insert 0 bound values
        zc = list(zs)
        zc.insert(0, zc[0])
        zc.append(zc[-1])
        peak_verts = []
        face_colors = [[v, .3, .3]
                       for v in np.linspace(.5, 1., len(cross_peaks))]
        for i, cross_section in enumerate(cross_peaks):
            yc = [peak.get_y for peak in cross_section]
            # Insert 0 bound values
            yc.insert(0, 0)
            yc.append(0)
            xc.append(
                int(
                    np.mean([
                        peak.get_relative_index for peak in cross_section
                    ])))  # np.mean([peak.get_x for peak in cross_section]))  #
            peak_verts.append(list(zip(zc, yc)))

            poly = PolyCollection([list(zip(zc, yc))])  # peak_verts
            poly.set_alpha(1)
            poly.set_facecolor(face_colors[i])
            axis.add_collection3d(poly, zs=xc[-1], zdir='x')

        # poly = PolyCollection(peak_verts)
        # poly.set_alpha(1)
        # axis.add_collection3d(poly, zs=xc, zdir='x')
        print('plotting')
    else:
        logging.warning(f'Cross section (peak) count is not consistent')

    axis.set_xlabel('Relative cavity length [nm]')
    axis.set_xlim3d(0, len(xs))
    # axis.set_xticks(xs)
    axis.set_ylabel('Polarisation [10 Degree]')  # 'Measurement iterations')
    axis.set_ylim3d(-1, len(zs) + 1)
    # axis.set_yticks([str(10 * angle) for angle in zs])
    axis.set_zlabel('Transmission [a.u.]')
    axis.set_zlim3d(0, 1)
    # Set viewport
    axis.view_init(elev=22, azim=-15)
    return axis
コード例 #7
0
def plot_pinned_focus_top(collection_iterator: Iterable[LabeledPeakCollection], pin: Tuple[int, int], focus: List[Tuple[int, int]]):  #  -> plt.axes
    """Create color plot which pins on single mode and displays one or more reference modes"""

    # Filter consistent measurement peaks
    def filter_collection(iterator: Iterable[LabeledPeakCollection]) -> Iterable[LabeledPeakCollection]:
        """Includes filter for undetermined value slice and uncommon peak count"""
        result_tuple = []
        peak_count = []
        for i, _collection in tqdm(enumerate(iterator), desc=f'Pre-Processing'):
            try:
                # Pin value
                _collection.get_mode_sequence(long_mode=pin[0], trans_mode=pin[1])
                # for j, sub_focus in enumerate(focus):
                #     _collection.get_mode_sequence(long_mode=sub_focus[0], trans_mode=sub_focus[1])
            except IndexError:
                # specific mode could not be found
                continue
            yield _collection
            # count = len(flatten_clusters(data=pin_cluster_array))
            # peak_count.append(count)
            # result_tuple.append((count, _collection))

        # # Filter count entries
        # _most_freq_nr = most_frequent(peak_count)
        # for (count, _collection) in result_tuple:
        #     if count == _most_freq_nr:
        #         yield _collection

    # Prepare parameters
    iter_count = 0
    peak_dict = {}  # Dict[Tuple(long, trans, index), Tuple[List[pos_value], List[pos_index]]
    total_value_slice_array = []
    total_data_slice_array = []
    trans_array = []
    value_focus_array = []
    data_focus_array = []
    data_class_array = []
    total_pin_peak_index = []
    for i, collection in tqdm(enumerate(filter_collection(collection_iterator)), desc=f'Process collections'):
        iter_count += 1  # Temp
        data_class = collection._get_data_class  # Access SyncMeasData class corresponding to peak data
        # Pin value
        pin_cluster_array, pin_value_slice = collection.get_mode_sequence(long_mode=pin[0], trans_mode=pin[1])
        # Store peak data
        peak_dict = store_peak_data(dictionary=peak_dict, cluster_array=pin_cluster_array, data_class=data_class)

        # Get corresponding data_slice (index slice) and pin index
        # Focus value
        for j, sub_focus in enumerate(focus):
            try:
                foc_cluster_array, foc_value_slice = collection.get_mode_sequence(long_mode=sub_focus[0], trans_mode=sub_focus[1])
            except IndexError:
                # specific mode could not be found
                continue
            # Store peak data
            peak_dict = store_peak_data(dictionary=peak_dict, cluster_array=foc_cluster_array, data_class=data_class)
            # Get data slice
            foc_data_slice = get_value_to_data_slice(data_class=data_class, value_slice=foc_value_slice)

            # Store value slice
            # print(sub_focus, foc_value_slice)
            if len(value_focus_array) <= j:
                value_focus_array.insert(j, foc_value_slice)
                data_focus_array.insert(j, foc_data_slice)
            else:
                value_focus_array[j] = pad_slice(original=value_focus_array[j], additional=foc_value_slice)
                data_focus_array[j] = pad_slice(original=data_focus_array[j], additional=foc_data_slice)

        # total min-max value slice based on pin and focus slices
        total_value_slice = pin_value_slice
        for value_bound in value_focus_array:
            total_value_slice = pad_slice(original=total_value_slice, additional=value_bound)
        # Value to data slice
        total_data_slice = get_value_to_data_slice(data_class=data_class, value_slice=total_value_slice)
        # Define focus peak index offset
        # TODO: Get largest peak position
        key_height_tuple = [((pin[0], pin[1], k), peak.get_y) for k, peak in enumerate(flatten_clusters(pin_cluster_array))]
        sorted_key_height_tuple = list(sorted(key_height_tuple, key=lambda pair: pair[1]))
        first_peak_offset = peak_dict[sorted_key_height_tuple[-1][0]][1][-1] - total_data_slice[0]
        total_pin_peak_index.append(first_peak_offset)

        total_value_slice_array.append(total_value_slice)  # Store value slice
        total_data_slice_array.append(total_data_slice)  # Store data slice
        trans_array.append(np.asarray(collection.get_measurement_data_slice(union_slice=total_data_slice)[1]))  # Store y-data
        # Store data class of largest array len
        data_class_array.append(data_class)

    # Find data class corresponding to average length
    value_slice_length_array = [value_slice[1] - value_slice[0] for value_slice in total_value_slice_array]
    sorted_data_class_array = [data for _, data in sorted(zip(value_slice_length_array, data_class_array), key=lambda pair: pair[0])]  # rearange
    data_class_index = find_nearest_index(array=value_slice_length_array, value=np.max(value_slice_length_array))
    lead_data_class = sorted_data_class_array[data_class_index]

    # Clear pin shift
    total_pin_peak_index = np.asarray(total_pin_peak_index)
    min_data_offset = np.min(total_pin_peak_index)
    pre_skip = np.max(total_pin_peak_index) - total_pin_peak_index  # - min_data_offset)
    # Align front of arrays
    # trans_array = [_array[pre_skip[i]:] for i, _array in enumerate(trans_array)]

    # Format data
    lead_index = trans_array.index(max(trans_array, key=len))
    index_offset = total_data_slice_array[lead_index][0]  # + pre_skip[lead_index]
    trans_array, array_shape = pad_to_pinned(nested_array=trans_array, pre_skip=pre_skip)  # , dictionary=peak_dict, key=(pin[0], pin[1], 0))
    trans_array = np.transpose(trans_array)  # Transpose

    # Define font
    font_size = 22
    plt.rcParams.update({'font.size': font_size})

    # Total slice
    x, y = np.mgrid[0:array_shape[0], 0:array_shape[1]]
    plt.pcolormesh(x, y, trans_array, norm=colors.LogNorm())

    locs, labels = plt.xticks()
    xticks = [int(lead_data_class.x_boundless_data[int(index_offset + index)]) for index in locs[0:-1]]
    plt.xticks(locs[0:-1], xticks)
    plt.title(f'Pinned plot over {iter_count} sample iterations')
    plt.ylabel(f'Different Iterations [a.u.]', fontsize=font_size)
    plt.xlabel(f'Cavity length (based on average) [nm]', fontsize=font_size)
    plt.grid(True)