Esempio n. 1
0
def plot_hanle_curve(hanle_curve_data, hanle_curve_fit_params, plot_data=True, plot_mean=True, plot_residual=True):
    f2, ax2 = plt.subplots()

    # Plot the data
    if plot_data:
        ax2.plot(
            hanle_curve_data.Field,
            hanle_curve_data.FR,
            "o",
            color=sb.xkcd_rgb["black"],
            rasterized=True,
            alpha=0.3,
            label="Raw Data",
        )

    # Plot the curve mean
    if plot_mean:
        if plot_data:
            color = sb.xkcd_rgb["mango"]
            label = "Average"
        else:
            color = sb.xkcd_rgb["black"]
            label = None

        dm = hanle_curve_data.groupby("Field")
        ax2.plot(dm.Field.mean(), dm.FR.mean(), "o", color=color, alpha=0.5, markersize=4, label=label)

    field_grid = np.linspace(np.min(hanle_curve_data.Field), np.max(hanle_curve_data.Field), 1000)

    # Plot the multiple lorentzian
    count = len(list(hanle_curve_fit_params["amplitude"]))
    model = centered_lorentzian_mixture(count)
    params = np.zeros(2 * count + 1)
    params[:-1:2] = hanle_curve_fit_params["amplitude"]
    params[1:-1:2] = hanle_curve_fit_params["inv_hwhm"]
    params[-1] = hanle_curve_fit_params["offset"]

    if plot_residual:
        ax2.plot(
            dm.Field.mean(),
            10 * (np.array([model(x, *params) for x in dm.Field.mean()]).flatten() - dm.FR.mean()),
            color=sb.xkcd_rgb["dark yellow"],
            linewidth=3,
            label="Residual (10x)",
        )

    ax2.plot(
        field_grid, [model(x, *params) for x in field_grid], color=sb.xkcd_rgb["tomato red"], linewidth=3, label="Fit"
    )

    colors = [sb.xkcd_rgb["cobalt"], sb.xkcd_rgb["azure"]] + list(sb.xkcd_rgb.values())
    for i in range(0, count):
        ax2.plot(
            field_grid,
            [lorentzian(x, *[params[0 + 2 * i], params[1 + 2 * i], 0, 0]) for x in field_grid],
            color=colors[i],
            linewidth=2,
            label=("peak %d" % (i + 1)),
        )

    ax2.set_yticklabels(ax2.get_yticks() / 1e-6)

    plt.legend()

    ax2.set_ylabel("Faraday Rotation ($\mu rad$)")
    ax2.set_xlabel("Field (Gauss)")
Esempio n. 2
0
def global_hanle_curve_fit(
    field_data,
    faraday_rotation_data,
    lorentzian_count,
    niter=100,
    T=100,
    stepsize=500,
    threads_for_repeats=8,
    constant_offset=None,
    penalize_offset=False,  # This precludes regularization
    regularization=None,
    measured_offset=None,
):

    # Construct initial conditions that spread out the widths. We expect some distribution typically
    # so an evenly spaced distribution of widths allows the fitter to search for a range of widths
    # in the source data.
    inv_hwhm_init = 1 / np.arange(0.1, 1, 1 / lorentzian_count)
    amplitudes_init = np.random.rand(lorentzian_count)

    if constant_offset is None:
        # Construct a model with the background constant as a free parameter
        model = centered_lorentzian_mixture(lorentzian_count=lorentzian_count)
        init_p = np.zeros(2 * lorentzian_count + 1)
        init_p[:-1:2] = amplitudes_init
    else:
        # Construct a model with the background as a fixed constant (not available to be optimized)
        model = centered_lorentzian_mixture(
            lorentzian_count=lorentzian_count, constant_offset=constant_offset
        )  # TODO: this needs to be properly scaled or its essentially zero, this feature is useless right now
        init_p = np.zeros(2 * lorentzian_count)
        init_p[::2] = amplitudes_init

    init_p[1::2] = inv_hwhm_init

    # Create scalers to give data zero mean and unit standard deviation
    scaler_Field = StandardScaler().fit(field_data.reshape(-1, 1))
    scaler_FR = StandardScaler().fit(faraday_rotation_data.reshape(-1, 1))

    field = scaler_Field.transform(field_data.reshape(-1, 1))
    fr = scaler_FR.transform(faraday_rotation_data.reshape(-1, 1))

    cost_func_kwargs = {}
    if measured_offset is not None:
        measured_offset = scaler_FR.transform(np.array([measured_offset]).reshape(-1, 1))[0]
        print("using a measured offset penalty for scaled offset %f" % measured_offset)
        cost_func_kwargs["measured_offset"] = measured_offset
    if regularization is not None:
        cost_func_kwargs["regularization"] = regularization

    p = global_curve_fit(
        model,
        field.flatten(),
        fr.flatten(),
        init_p,
        basinhopping_kwargs={"niter": niter, "stepsize": stepsize, "T": T},
        cost_func_kwargs=cost_func_kwargs,
    )

    # Extract the parameters from the solution, and rescale the background
    if constant_offset is None:
        amplitudes_opt = p.x[:-1:2]
        offset_opt = p.x[-1] * scaler_FR.scale_[0] + scaler_FR.mean_[0]
    else:
        amplitudes_opt = p.x[::2]
        offset_opt = constant_offset
    inv_hwhm_opt = np.abs(p.x[1::2])

    # Sort the results with the narrow Lorentzians appearing at the lowest indices
    permute = inv_hwhm_opt.argsort()
    inv_hwhm_opt = inv_hwhm_opt[permute]
    amplitudes_opt = amplitudes_opt[permute]

    # Remove the scaling from the parameters so they are in the units of the source data
    amplitudes_opt = amplitudes_opt * scaler_FR.scale_[0]
    inv_hwhm_opt = inv_hwhm_opt / scaler_Field.scale_[0]

    return {"amplitude": amplitudes_opt, "inv_hwhm": inv_hwhm_opt, "offset": offset_opt, "rms_error": p.fun}