Ejemplo n.º 1
0
def one_mmt_set(times, theta, torque, b, b_prime, k, k_prime, i):
    """Measure one set of frequency, amplitude and phase values, given the 
    values of the relevant parameters."""
    w_res = theory.w_res_gamma(b - b_prime, k - k_prime, i)[0]
    times, theta, torque = h.check_types_lengths(times, theta, torque)
    if b - b_prime >= 0:
        if b - b_prime == 0 and np.isreal(w_res):
            # filter out the transient frequency if it will never
            # decay on its own.
            theta = remove_one_frequency(times, theta, w_res)

        # Will only reach steady state if b - b' >=0, otherwise no
        # point making a response curve. b - b' = 0 has two steady state
        # frequencies, the transient and PI. Eliminate the transient first.
        ss_times = identify_ss(times, theta)

        if ss_times is not False:
            frq, fft_theta = calc_fft(
                times[(times >= ss_times[0]) * (times <= ss_times[1])],
                theta[(times >= ss_times[0]) * (times <= ss_times[1])])
            # for low frequencies, the length of time of the signal
            # must also be sufficiently wrong for the peak position
            # to be measured properly.

            # Half-amplitude of peak used to calculate bandwidth.
            freq = calc_freqs(np.absolute(fft_theta), frq, n_peaks=1)
            amp = calc_one_amplitude(
                theta[(times >= ss_times[0]) * (times <= ss_times[1])])
            phase = calc_phase(theta, torque)
            return np.array([freq, amp, phase])
Ejemplo n.º 2
0
def calculate_sine_pi(t, b, k, i, g_0_mag, w_d, phi):
    """Calculate the particular integral contributions to theta and omega for a 
    sinusoidal forcing torque.
    :param t: Array or single value of time values.
    :param b: Damping coefficient
    :param k: Elastic coefficient.
    :param i: Moment of inertia.
    :param g_0_mag: Amplitude of driving torque.
    :param w_d: Angular frequency.
    :param phi: Phase in radians.
    :return: Theta and omega arrays at the times in t for the PI solution 
    part."""
    # Check all parameters are of correct format.
    g_0_mag, w_d, phi = h.check_types_lengths(g_0_mag, w_d, phi)
    num_times = t.size

    # Calculate the forcing torque's amplitude.
    a_0 = g_0_mag * (k - i * w_d ** 2) / (
        b ** 2 * w_d ** 2 + (k - i * w_d ** 2) ** 2)
    b_0 = -g_0_mag * b * w_d / (b ** 2 * w_d ** 2 + (k - i * w_d ** 2) ** 2)
    a_0, b_0, w_d = np.array([a_0]).T, np.array([b_0]).T, np.array([w_d]).T

    theta_pi = a_0 * np.sin(np.outer(w_d, t) + np.outer(phi, np.ones(
        num_times))) + b_0 * np.cos(np.outer(w_d, t) +
                                    np.outer(phi, np.ones(num_times)))
    theta_pi = np.sum(theta_pi, axis=0)
    omega_pi = a_0 * w_d * np.cos(np.outer(w_d, t) + np.outer(phi, np.ones(
        num_times))) - b_0 * w_d * np.sin(np.outer(w_d, t) +
                                          np.outer(phi, np.ones(num_times)))
    omega_pi = np.sum(omega_pi, axis=0)
    return np.array([theta_pi, omega_pi])
Ejemplo n.º 3
0
def real_norm_correlations(x_axis, y_axis, n_per_segment):
    """Get normalised correlations of consecutive y segments with n_per_segment 
    points per segment using REAL SPACE measurements."""
    x_axis, y_axis = h.check_types_lengths(x_axis.squeeze(), y_axis.squeeze())
    len_y = y_axis.size  # x and y must be 1D and of the same length.

    split_into = int(np.round(len_y / n_per_segment))
    if len_y % split_into != 0:
        go_up_to = n_per_segment * split_into
    else:
        go_up_to = len(y_axis)

    # Calculate correlations of y values and corresponding midpoint of x window.
    combined = np.array([x_axis, y_axis]).T
    splitted = np.array(np.split(combined[:go_up_to, :], split_into, axis=0))

    x_split = splitted[:, :, 0]
    y_split = splitted[:, :, 1]
    x_means = np.mean(x_split, axis=1)
    y_split = np.array(y_split) - np.mean(y_axis)    # subtract mean.
    y_roll = np.roll(y_split, 1, axis=0)
    corr = real_corr(y_split, y_roll)
    norm_corr = corr / np.sqrt(real_corr(y_split, y_split) * real_corr(
        y_roll, y_roll))
    return x_means, norm_corr
Ejemplo n.º 4
0
def get_torque(t, indices, mags, phis, ang_freq):
    """Calculate the torque in Nm given the Fourier components of the torque."""
    indices, mags, phis = h.check_types_lengths(indices, mags, phis)
    num_times = t.size
    torque_parts = np.array([mags]).T * np.sin(np.outer(
        ang_freq * indices, t) + np.outer(phis, np.ones(num_times)))
    torque = np.sum(torque_parts, axis=0)
    return torque
Ejemplo n.º 5
0
def analytic_torque(t, omega_ds, amplitudes, phases):
    """Return the value of the analytic driving torque at time t.
    :param t: Time in seconds - a single value.
    :param omega_ds: Angular frequency of the sinusoid.
    :param amplitudes: Amplitude of sinusoid.
    :param phases: Phase of sinusoid in radians."""
    amplitudes, omega_ds, phases = h.check_types_lengths(
        amplitudes, omega_ds, phases)
    torque = 0
    for i in range(len(amplitudes)):
        torque += amplitudes[i] * np.sin(omega_ds[i] * t + phases[i])
    return torque
Ejemplo n.º 6
0
def fourier_transfer(mags, phis, w_d, g_0, k, i, b, k_pr, b_pr):
    """Calculate the response function for a fourier series input torque. 
    Works for a single driving frequency."""
    mags, phis = h.check_types_lengths(mags, phis)
    w_d = h.convert_to_array(w_d)
    n_terms = len(mags)
    num_freqs = len(w_d)
    term_num = np.arange(1, n_terms + 1, 1)
    print(term_num, w_d)
    k_eff = k - k_pr
    b_eff = b - b_pr
    resps = np.sum(np.outer(mags * np.cos(phis), np.ones(num_freqs)) /
                   ((k_eff / i - np.outer(term_num ** 2, w_d ** 2)) + 1j * (
                       b_eff / i) * np.outer(term_num, w_d)),
                   axis=0) / g_0
    return resps
Ejemplo n.º 7
0
def calc_norm_errs(*datasets):
    """Calculate the normalised error for each dataset in datasets, 
    and return those. *Datasets is a deconstructed list of lists, ie.
    datasets = [[exp_1, theory_1], [exp_2, theory_2], ...]. Each set is 
    normalised by the value of theory_n at that point. Both data series in 
    the dataset must have the same dimensions. Also returns the absolute 
    errors."""
    norm_errs = []
    errs = []
    for dataset in datasets:
        assert len(dataset) == 2, "There can only be two data series per " \
                                  "dataset!"
        exp, theory = h.check_types_lengths(dataset[0], dataset[1])

        err = exp - theory
        errs.append(err)

        # For arrays, Inf values are ignored when plotting.
        norm_err = err / theory
        norm_errs.append(norm_err)

    return norm_errs, errs
Ejemplo n.º 8
0
def g_s(t, angular_freq, delta_t, g_0):
    """Return the value of the digitised analytic torque at time t.
    G_s(t) = g_0 * {0                       for 0  <= t < delta_t, ...
                    sin(angular_freq * t1)  for t1 <= t < t1 + delta_t, ...
                    sin(angular_freq * (T - delta_t)) for T-delta_t <= t < T}.
    :param t: Time array in seconds.
    :param angular_freq: Angular frequency of the driving torque.
    :param delta_t: The measurement time interval. ADVISE to be integer 
    fraction of T.
    :param g_0: Magnitude of the torque."""
    # Format checks
    t = h.convert_to_array(t)
    g_0, delta_t, angular_freq = h.check_types_lengths(g_0, delta_t,
                                                       angular_freq)
    period = 2 * np.pi / angular_freq
    assert len(g_0) == 1

    # Find the position in the cycle by converting everything to 1st period.
    # Then, convert to the value at the beginning of appropriate ranges.
    t = np.fmod(t, period)
    t_floored = np.floor(t / delta_t) * delta_t
    return g_0 * np.sin(angular_freq * t_floored)
Ejemplo n.º 9
0
def two_by_n_plotter(datasets,
                     start,
                     params_dict,
                     savepath=None,
                     show=False,
                     tag=None,
                     x_axes_labels=None,
                     y_top_labels=None,
                     y_bottom_labels=None,
                     **kwargs):
    """Plots a 2 x N series of subplots in a single figure. The format for 
    datasets is:
        [
            [
                [[x1_top1, y1_top1],       [x2_top1, y2_top1],       ...],
                [[x1_bottom1, y1_bottom1], [x2_bottom1, y2_bottom1], ...],
            ], 
            ...
        ].
    x1, y1 is one data series, plotted in either the top or bottom plot of 
    set n of N. Error bars may be allowed by making x or y a 2-column array, 
    with the second column specifying the errors. The x axes of the top and 
    bottom plot are shared in each pair. A third entry in [x1, y1, label] is 
    a string specifying the label of the data series, if one is desired for a 
    legend. A fourth entry after that can be used for the fmt (markerstyle) for 
    that series.
    The axes labels are lists with the appropriate string labels. Axes are 
    labelled from left to right in the figure.
    params_dict specifies the parameters to be written to the text file 
    accompanying the plot, to record the control variables.
    kwargs is a list of custom commands to be passed to the plot, such as 
    legend formats, just before plotting. They are called as methods of the 
    figure object. The 'legend' parameter takes a dictionary of keyword 
    arguments as its value; these are passed into the fig.legend method, 
    for example. """

    # create subplots figure
    fig, ax = plt.subplots(ncols=len(datasets),
                           nrows=2,
                           sharex='col',
                           figsize=(21, 10.5))
    x_axis, y_axis_top, y_axis_bottom = False, False, False
    if x_axes_labels is not None:
        x_axis = True
        assert len(datasets) == len(x_axes_labels)
    if y_top_labels is not None:
        y_axis_top = True
        assert len(datasets) == len(y_top_labels)
    if y_bottom_labels is not None:
        y_axis_bottom = True
        assert len(datasets) == len(y_bottom_labels)

    # Perform length of array and format checks.
    for k in range(len(datasets)):
        for j in range(len(datasets[k])):
            for one_series in datasets[k][j]:
                axes = h.check_types_lengths(*one_series[0:2])
                if len(one_series) >= 3 and type(one_series[2]) is str:
                    # Data label used for the third entry.
                    label = one_series[2]
                else:
                    label = None

                if len(one_series) == 4 and type(one_series[3]) is str:
                    # fmt used for the 4th entry.
                    fmt = one_series[3]
                else:
                    fmt = ':'

                for i in range(len(axes)):
                    if not (axes[i].ndim == 2 and axes[i].shape[-1] == 2):
                        # Data has no error bars - create error bars of zero.
                        axes[i] = np.vstack(
                            (axes[i], np.zeros(axes[i].size))).T
                if len(datasets) == 1:
                    axis = ax[j]
                else:
                    axis = ax[j, k]
                axis.errorbar(axes[0][:, 0],
                              axes[1][:, 0],
                              xerr=axes[0][:, 1],
                              yerr=axes[1][:, 1],
                              fmt=fmt,
                              label=label,
                              markersize=2)
                axis.tick_params(direction='out')
                axis.grid(True)
                if x_axis:
                    if x_axes_labels[k] is not None and j == 1:
                        axis.set_xlabel(x_axes_labels[k],
                                        fontweight='bold',
                                        fontsize=16)
                if j == 0:
                    if y_axis_top:
                        if y_top_labels[k] is not None:
                            axis.set_ylabel(y_top_labels[k],
                                            fontweight='bold',
                                            fontsize=16)
                elif j == 1:
                    if y_axis_bottom:
                        if y_bottom_labels[k] is not None:
                            axis.set_ylabel(y_bottom_labels[k],
                                            fontweight='bold',
                                            fontsize=16)
                axis.ticklabel_format(axis='both', useOffset=False)
                axis.ticklabel_format(axis='y',
                                      style='sci',
                                      scilimits=(0, 0),
                                      labelsize=14)
                t = axis.yaxis.get_offset_text()
                t.set_size(14)
                axis.tick_params(labelsize=14)

                if 'legend' in kwargs and label is not None:
                    legend_params = kwargs['legend']
                    axis.legend(**legend_params)

    plot_name, descrip_name = _create_names(start, tag, filetype='png')
    if savepath is not None:
        # Save only if save path is not none.
        fig.savefig(savepath + plot_name, dpi=300)
        with open(savepath + descrip_name, 'w') as f:
            for key in params_dict:
                if type(params_dict[key]) is np.ndarray:
                    f.write('{}: {}\r\n'.format(key,
                                                np.asscalar(params_dict[key])))
                else:
                    f.write('{}: {}\r\n'.format(key, params_dict[key]))
    if show:
        # fig_manager = plt.get_current_fig_manager()
        # fig_manager.window.showMaximized()
        plt.show()
    plt.close(fig)
    return