def get_outlier_based_variance(
    ref_array: Iterable[Tuple[List[float], List[float]]]
) -> Tuple[List[float], List[float]]:
    # Sort based on delta time
    _ref_array = [
        (sqrt_diff[i], dt[i])
        for sqrt_diff, dt in sorted(ref_array, key=lambda pair: pair[1])
        for i in range(len(dt))
    ]

    # Cluster data points together
    sorted_distances = [sqrt_diff for sqrt_diff, _ in _ref_array]
    sorted_delta_time = [time for _, time in _ref_array]

    time_distances = [(sorted_delta_time[i + 1] - sorted_delta_time[i])
                      for i in range(len(sorted_delta_time) - 1)]
    mean = np.mean(time_distances)
    std = np.std(time_distances)
    cut_off = mean + 1.5 * std  # TODO: Hardcoded time interval separation
    # Detect statistical outliers
    outlier_indices = LabeledPeakCollection._get_outlier_indices(
        values=time_distances, cut_off=cut_off)
    # Construct cluster splitting
    split_indices = (0, ) + tuple(
        data + 1 for data in outlier_indices) + (len(time_distances) + 1, )
    y_array = [
        np.var(sorted_distances[start:end])
        for start, end in zip(split_indices, split_indices[1:])
    ]
    x_array = [
        np.mean(sorted_delta_time[start:end])
        for start, end in zip(split_indices, split_indices[1:])
    ]
    return x_array, y_array
Esempio n. 2
0
def plot_mode_distances(collection: LabeledPeakCollection) -> plt.axes:
    _, _ax = plt.subplots()
    # Cycle longitudinal modes
    min_q = min(collection.q_dict.keys())
    mode_sequence_range = range(min_q + 1, max(collection.q_dict.keys()) + 2)
    for i in mode_sequence_range:
        cluster_array = collection.get_labeled_clusters(long_mode=i,
                                                        trans_mode=None)
        cluster_dict = {
            cluster.get_transverse_mode_id: cluster
            for cluster in cluster_array
        }
        xs = list(range(max(cluster_dict.keys()) + 1))
        ys = list(range(len(xs)))
        for j in range(len(xs)):
            if j in cluster_dict:
                xs[j] = j
                ys[j] = cluster_dict[j].get_avg_x - cluster_dict[0].get_avg_x
            else:
                xs[j] = float('nan')
                ys[j] = float('nan')
        _ax.plot(xs, ys, ls='-', label=f'q={i}')
    _ax.set_title(f'linearity of transverse mode distances')
    _ax.grid(True)
    plt.legend()
Esempio n. 3
0
def plot_peak_relation(collection: LabeledPeakCollection,
                       meas_class: SyncMeasData) -> plt.axes:
    # Store plot figure and axis
    _, _ax = plt.subplots()
    _ax = plot_class(axis=_ax, measurement_class=meas_class)

    # Determine mode sequence corresponding to first FSR
    try:
        cluster_array, value_slice = collection.get_mode_sequence(
            long_mode=collection.get_min_q_id)
        _ax.axvline(x=value_slice[0], color='r', alpha=1)
        _ax.axvline(x=value_slice[1], color='g', alpha=1)
    except ValueError:
        pass
    _ax = plot_cluster_collection(axis=_ax, data=collection)
    # temp
    from src.generate_luk_predictions import get_mode_groups, get_predicted_cavity_length_function
    from src.peak_relation import find_nearest_index
    radius = 69477  # nm
    data_class = collection._get_data_class
    for cluster in collection.get_clusters:
        try:
            q_cluster = collection.q_dict[cluster.get_longitudinal_mode_id]
            if q_cluster is None or cluster.get_longitudinal_mode_id - 1 in collection.q_dict:
                continue
        except KeyError:
            continue
        # # Draw predicted modes
        # length_map = get_predicted_cavity_length_function(cav_length=q_cluster.get_avg_x, cav_radius=radius)
        #
        # for p, l in get_mode_groups(trans_mode=cluster.get_transverse_mode_id):
        #     for peak_loc in length_map(cluster.get_longitudinal_mode_id, (p, l)):  # Loc in nm
        #         # x_index = find_nearest_index(array=data_class.x_boundless_data, value=peak_loc)
        #         _ax.axvline(x=peak_loc, color='darkorange', alpha=1)
    return _ax
Esempio n. 4
0
def get_labeled_collection(
        meas_file_base: str,
        iter_count: Iterable[int],
        samp_file: str,
        filepath: Union[str,
                        None] = DATA_DIR) -> Iterable[LabeledPeakCollection]:
    """Returns Iterable for LabeledPeakCluster data."""
    fetch_func = get_file_fetch_func(file_base_name=meas_file_base,
                                     filepath=filepath)
    for i in iter_count:
        try:
            meas_file = fetch_func(iteration=i)
            measurement_container = FileToMeasData(meas_file=meas_file,
                                                   samp_file=samp_file,
                                                   filepath=filepath)
            measurement_container = get_converted_measurement_data(
                meas_class=measurement_container, q_offset=Q_OFFSET)
            labeled_collection = LabeledPeakCollection(
                transmission_peak_collection=identify_peaks(
                    meas_data=measurement_container),
                q_offset=Q_OFFSET)
            # normalized_collection = NormalizedPeakCollection(transmission_peak_collection=identify_peaks(meas_data=measurement_container), q_offset=Q_OFFSET)
            yield labeled_collection
        except FileNotFoundError:
            continue
Esempio n. 5
0
def single_source_analysis(meas_file: str,
                           samp_file: str,
                           filepath: Union[str, None] = DATA_DIR):
    # Create measurement container instance
    measurement_container = FileToMeasData(meas_file=meas_file,
                                           samp_file=samp_file,
                                           filepath=filepath)

    # # # (Report specific) Plot classification process
    # plot_mode_classification(meas_data=measurement_container)  # Plot

    # Apply voltage to length conversion
    measurement_container = get_converted_measurement_data(
        meas_class=measurement_container, q_offset=Q_OFFSET, verbose=False)

    # Peak Identification
    peak_collection = identify_peaks(meas_data=measurement_container)
    peak_ax = plot_peak_identification(
        collection=peak_collection, meas_class=measurement_container)  # Plot

    # Peak clustering and mode labeling
    labeled_collection = LabeledPeakCollection(
        transmission_peak_collection=peak_collection, q_offset=Q_OFFSET)
    rela_ax = plot_peak_relation(collection=labeled_collection,
                                 meas_class=measurement_container)  # Plot
Esempio n. 6
0
def plot_specific_peaks(axis: plt.axes, data: LabeledPeakCollection,
                        long_mode: Union[int, None],
                        trans_mode: Union[int, None]) -> plt.axes:
    filtered_data = data.get_labeled_peaks(long_mode=long_mode,
                                           trans_mode=trans_mode)
    return plot_peak_collection(
        axis=axis,
        data=filtered_data,
        label=
        f'Mode(L:{"all" if long_mode is None else long_mode}, T:{"all" if trans_mode is None else trans_mode})'
    )
Esempio n. 7
0
def plot_isolated_long_mode(axis: plt.axes, data_class: SyncMeasData,
                            collection: LabeledPeakCollection, long_mode: int,
                            trans_mode: Union[int,
                                              None], **kwargs) -> plt.axis:
    # Plot array
    try:
        cluster_array, value_slice = collection.get_mode_sequence(
            long_mode=long_mode, trans_mode=trans_mode)
    except ValueError:
        logging.warning(f'Longitudinal mode {long_mode} not well defined')
        return axis

    data_class.slicer = get_value_to_data_slice(data_class=data_class,
                                                value_slice=value_slice)
    # Plot data (data_slice)
    axis = plot_class(axis=axis, measurement_class=data_class, **kwargs)
    # Plot cluster array
    axis = plot_cluster_collection(axis=axis, data=cluster_array)
    # # Plot peak array
    # axis = plot_peak_collection(axis=axis, data=flatten_clusters(data=cluster_array))
    return get_standard_axis(axis=axis)
Esempio n. 8
0
def plot_3d_sequence(data_classes: List[SyncMeasData], long_mode: int,
                     trans_mode: Union[int, None]) -> plt.axis:
    # Set 3D plot
    fig = plt.figure()
    axis = fig.gca(projection='3d')

    # Store slices to ensure equally size arrays
    cluster_arrays = []
    q_mode_peaks = []
    data_slices = []
    data_slices_range = []
    for data_class in data_classes:
        collection = LabeledPeakCollection(identify_peaks(data_class))
        try:
            cluster_array, value_slice = collection.get_mode_sequence(
                long_mode=long_mode, trans_mode=trans_mode)
            cluster_arrays.append(cluster_array)
            q_mode_peaks.append(collection.q_dict[long_mode])
        except ValueError:
            logging.warning(f'Longitudinal mode {long_mode} not well defined')
            return axis
        data_slice = get_value_to_data_slice(data_class=data_class,
                                             value_slice=value_slice)
        data_slices.append(data_slice)
        data_slices_range.append(get_slice_range(data_slice))  # Store range

    # Prepare plot data
    leading_index = data_slices_range.index(max(data_slices_range))
    leading_slice = data_slices[leading_index]
    for i, slice in enumerate(data_slices):
        range_diff = get_slice_range(leading_slice) - get_slice_range(slice)
        padding = whole_integer_divider(num=range_diff, div=2)
        data_slices[i] = (slice[0] - padding[0], slice[1] + padding[1])

    sliced_xs = data_classes[leading_index].x_boundless_data[
        leading_slice[0]:leading_slice[1]]
    xs = np.arange(get_slice_range(leading_slice))  # sliced_xs  #
    zs = np.arange(len(data_slices))
    verts = []
    peaks = []
    for i, slice in enumerate(data_slices):
        data_class = data_classes[i]
        data_class.slicer = slice
        ys = data_class.y_data
        verts.append(list(zip(xs, ys)))
        # Peak scatter plot
        peak_list = flatten_clusters(data=cluster_arrays[i])
        peaks.append(peak_list)  # Collect peaks for polarisation cross section
        yp = [peak.get_y for peak in peak_list]
        xp = [peak.get_relative_index for peak in peak_list]
        zp = [zs[i] for j in range(len(peak_list))]
        axis.scatter(xp, zp, yp, marker='o')

    # Draw individual measurement polygons
    poly = PolyCollection(verts)
    poly.set_alpha(.7)
    axis.add_collection3d(poly, zs=zs, zdir='y')

    # Draw polarisation cross section
    cross_section_count = len(peaks[0])
    if all(len(peak_array) == cross_section_count
           for peak_array in peaks):  # Able to build consistent cross sections
        cross_peaks = list(
            map(list, zip(*peaks)
                ))  # Transposes peaks-list to allow for cross section ordering
        xc = []
        # Insert 0 bound values
        zc = list(zs)
        zc.insert(0, zc[0])
        zc.append(zc[-1])
        peak_verts = []
        face_colors = [[v, .3, .3]
                       for v in np.linspace(.5, 1., len(cross_peaks))]
        for i, cross_section in enumerate(cross_peaks):
            yc = [peak.get_y for peak in cross_section]
            # Insert 0 bound values
            yc.insert(0, 0)
            yc.append(0)
            xc.append(
                int(
                    np.mean([
                        peak.get_relative_index for peak in cross_section
                    ])))  # np.mean([peak.get_x for peak in cross_section]))  #
            peak_verts.append(list(zip(zc, yc)))

            poly = PolyCollection([list(zip(zc, yc))])  # peak_verts
            poly.set_alpha(1)
            poly.set_facecolor(face_colors[i])
            axis.add_collection3d(poly, zs=xc[-1], zdir='x')

        # poly = PolyCollection(peak_verts)
        # poly.set_alpha(1)
        # axis.add_collection3d(poly, zs=xc, zdir='x')
        print('plotting')
    else:
        logging.warning(f'Cross section (peak) count is not consistent')

    axis.set_xlabel('Relative cavity length [nm]')
    axis.set_xlim3d(0, len(xs))
    # axis.set_xticks(xs)
    axis.set_ylabel('Polarisation [10 Degree]')  # 'Measurement iterations')
    axis.set_ylim3d(-1, len(zs) + 1)
    # axis.set_yticks([str(10 * angle) for angle in zs])
    axis.set_zlabel('Transmission [a.u.]')
    axis.set_zlim3d(0, 1)
    # Set viewport
    axis.view_init(elev=22, azim=-15)
    return axis
Esempio n. 9
0
    # filenames = ['transrefl_hene_1s_10V_PMT5_rate1300000.0itteration{}'.format(i) for i in range(10)]
    filenames = [
        'transrefl_hene_0_3s_10V_PMT4_rate1300000.0itteration1_pol{:0=2d}0'.
        format(i) for i in range(19)
    ]

    meas_iterations = [
        get_converted_measurement_data(
            FileToMeasData(meas_file=file_meas, samp_file=file_samp))
        for file_meas in filenames
    ]
    identified_peaks = [
        identify_peaks(meas_data=data) for data in meas_iterations
    ]
    labeled_peaks = [
        LabeledPeakCollection(transmission_peak_collection=collection)
        for collection in identified_peaks
    ]

    trans_mode = 2
    long_mode = 0
    plot_3d_sequence(data_classes=meas_iterations,
                     long_mode=long_mode,
                     trans_mode=trans_mode)

    def plot_cross_sections():
        # Test
        index = 0

        ax_full, measurement_class = prepare_measurement_plot(filenames[index])
        measurement_class = get_converted_measurement_data(measurement_class)
Esempio n. 10
0
def plot_mode_classification(meas_data: SyncMeasData) -> plt.axes:
    """Plots report paper figure for entire classification process"""
    from src.peak_identifier import identify_peaks
    from src.peak_relation import LabeledPeakCollection, get_converted_measurement_data
    from src.main import Q_OFFSET
    _fig, ((_ax00, _ax01), (_ax10, _ax11)) = plt.subplots(2, 2, sharey='all')
    colors = plt.cm.jet(np.linspace(0, 1, 10))

    # Plot raw data
    _ax00.text(1.05,
               1.,
               '(a)',
               horizontalalignment='center',
               verticalalignment='top',
               transform=_ax00.transAxes)
    _ax00 = plot_class(axis=_ax00, measurement_class=meas_data)
    _ax00.set_xlabel('Voltage [V]')

    # Plot peak locations
    _ax01.text(1.05,
               1.,
               '(b)',
               horizontalalignment='center',
               verticalalignment='top',
               transform=_ax01.transAxes)
    peak_collection = identify_peaks(meas_data=meas_data)
    _ax01 = plot_class(axis=_ax01, measurement_class=meas_data, alpha=0.2)
    for i, peak_data in enumerate(peak_collection):
        if peak_data.relevant:
            _ax01.plot(peak_data.get_x,
                       peak_data.get_y,
                       'x',
                       color='r',
                       alpha=1)
    _ax01.set_xlabel('Voltage [V]')

    # Plot q mode separation and mode ordering
    _ax10.text(1.05,
               1.,
               '(c)',
               horizontalalignment='center',
               verticalalignment='top',
               transform=_ax10.transAxes)
    labeled_collection = LabeledPeakCollection(peak_collection)
    _ax10 = get_standard_axis(axis=_ax10)
    min_q = min(labeled_collection.q_dict.keys())
    mode_sequence_range = range(min_q,
                                max(labeled_collection.q_dict.keys()) + 2)
    for i in mode_sequence_range:
        try:
            cluster_array, value_slice = labeled_collection.get_mode_sequence(
                long_mode=i)
            # Get normalized measurement
            x_sample, y_measure = labeled_collection.get_measurement_data_slice(
                union_slice=value_slice)
            _ax10.plot(
                x_sample,
                y_measure,
                alpha=1,
                color=colors[(cluster_array[0].get_longitudinal_mode_id -
                              min_q) % len(colors)])
        except AttributeError:
            break
    for i, peak_data in enumerate(labeled_collection):
        if peak_data.relevant:
            _ax10.plot(
                peak_data.get_x,
                peak_data.get_y,
                'x',
                color=colors[(peak_data.get_transverse_mode_id - min_q) %
                             len(colors)],
                alpha=1)
    _ax10.set_xlabel('Voltage [V]')

    # Plot finalized labeled peaks
    _ax11.text(1.05,
               1.,
               '(d)',
               horizontalalignment='center',
               verticalalignment='top',
               transform=_ax11.transAxes)
    meas_data = get_converted_measurement_data(meas_class=meas_data,
                                               q_offset=Q_OFFSET,
                                               verbose=False)
    labeled_collection = LabeledPeakCollection(
        identify_peaks(meas_data=meas_data), q_offset=Q_OFFSET)
    # _ax11 = plot_class(axis=_ax11, measurement_class=meas_data, alpha=0.2)
    # _ax11 = plot_cluster_collection(axis=_ax11, data=labeled_collection)
    min_q = min(labeled_collection.q_dict.keys())
    mode_sequence_range = range(min_q,
                                max(labeled_collection.q_dict.keys()) + 2)
    for i in mode_sequence_range:
        try:
            cluster_array, value_slice = labeled_collection.get_mode_sequence(
                long_mode=i)
            # Get normalized measurement
            x_sample, y_measure = labeled_collection.get_measurement_data_slice(
                union_slice=value_slice)
            _ax11.plot(
                x_sample,
                y_measure,
                alpha=.2,
                color=colors[(cluster_array[0].get_longitudinal_mode_id -
                              min_q) % len(colors)])
        except AttributeError:
            print(i, f'break out of mode sequence')
            break

    for cluster in labeled_collection.get_clusters:
        if cluster.get_transverse_mode_id == 0:
            plt.gca().set_prop_cycle(None)
        for peak_data in cluster:
            if peak_data.relevant:
                _ax11.plot(
                    peak_data.get_x,
                    peak_data.get_y,
                    'x',
                    color=colors[(peak_data.get_transverse_mode_id - min_q) %
                                 len(colors)],
                    alpha=1)
        _ax11.text(
            x=cluster.get_avg_x,
            y=cluster.get_max_y,
            s=
            f'({cluster.get_longitudinal_mode_id}, {cluster.get_transverse_mode_id})',
            fontsize=10,
            horizontalalignment='center',
            verticalalignment='bottom')
    _ax11 = get_standard_axis(axis=_ax11)
    _ax11.set_xlabel('Cavity Length [nm]')
    #
    # cluster_collection = collection_class.get_q_clusters  # collection_class.get_clusters
    # piezo_response = fit_piezo_response(cluster_collection=cluster_collection, sample_wavelength=SAMPLE_WAVELENGTH)
    # piezo_response = fit_collection()
    # fit_variables = fit_calibration(voltage_array=data_class.samp_array, reference_transmission_array=import_npy(filename_base)[0], response_func=piezo_response)
    # print(f'TiSaph transmission: T = {1 - fit_variables[1]} (R = {fit_variables[1]})')
    # print(f'Cavity length delta between HeNe and TiSaph measurement: {fit_variables[2]} [nm]')

    for i in range(5):
        _filename = 'transrefl_hene_1s_10V_PMT4_rate1300000.0itteration{}'.format(
            i)
        data_class = FileToMeasData(meas_file=_filename,
                                    samp_file=file_samp,
                                    filepath='data/Trans/20210104')
        identified_peaks = identify_peaks(meas_data=data_class)
        collection_class = LabeledPeakCollection(identified_peaks)

        cluster_collection = collection_class.get_q_clusters  # collection_class.get_clusters
        piezo_response = fit_piezo_response(
            cluster_collection=cluster_collection,
            sample_wavelength=SAMPLE_WAVELENGTH,
            verbose=True)
    # # Obtain mean and root-mean-square
    # y_values = [value for line in ax2.lines for value in line.get_ydata()]
    # y_mean = np.mean(y_values)
    # y_rms = np.sqrt(np.sum((y_values - y_mean)**2) / len(y_values))
    # ax2.axhline(y=y_mean, ls='--', color='darkorange')
    # ax2.axhline(y=y_mean+y_rms, ls='--', color='orange', label=r'$\mu + \sigma$' + f': {round(y_mean+y_rms, 2)} [nm]')
    # ax2.axhline(y=y_mean-y_rms, ls='--', color='orange', label=r'$\mu - \sigma$' + f': {round(y_mean-y_rms, 2)} [nm]')

    # plt.tight_layout(pad=.01)