コード例 #1
0
ファイル: test_bsplines.py プロジェクト: Brucechen13/scipy
    def test_basis_element_quadratic(self):
        xx = np.linspace(-1, 4, 20)
        b = BSpline.basis_element(t=[0, 1, 2, 3])
        assert_allclose(b(xx),
                        splev(xx, (b.t, b.c, b.k)), atol=1e-14)
        assert_allclose(b(xx),
                        B_0123(xx), atol=1e-14)

        b = BSpline.basis_element(t=[0, 1, 1, 2])
        xx = np.linspace(0, 2, 10)
        assert_allclose(b(xx),
                np.where(xx < 1, xx*xx, (2.-xx)**2), atol=1e-14)
コード例 #2
0
ファイル: test_bsplines.py プロジェクト: Brucechen13/scipy
    def test_antiderivative_method(self):
        b = _make_random_spline()
        t, c, k = b.tck
        xx = np.linspace(t[k], t[-k-1], 20)
        assert_allclose(b.antiderivative().derivative()(xx),
                        b(xx), atol=1e-14, rtol=1e-14)

        # repeat with n-D array for c
        c = np.c_[c, c, c]
        c = np.dstack((c, c))
        b = BSpline(t, c, k)
        assert_allclose(b.antiderivative().derivative()(xx),
                        b(xx), atol=1e-14, rtol=1e-14)
コード例 #3
0
ファイル: test_bsplines.py プロジェクト: Brucechen13/scipy
def _sum_basis_elements(x, t, c, k):
    n = len(t) - (k+1)
    assert n >= k+1
    assert len(c) >= n
    s = 0.
    for i in range(n):
        b = BSpline.basis_element(t[i:i+k+2], extrapolate=False)(x)
        s += c[i] * np.nan_to_num(b)   # zero out out-of-bounds elements
    return s
コード例 #4
0
ファイル: test_bsplines.py プロジェクト: alchemyst/scipy
    def test_integral(self):
        b = BSpline.basis_element([0, 1, 2])  # x for x < 1 else 2 - x
        assert_allclose(b.integrate(0, 1), 0.5)
        assert_allclose(b.integrate(1, 0), -0.5)

        # extrapolate or zeros outside of [0, 2]; default is yes
        assert_allclose(b.integrate(-1, 1), 0)
        assert_allclose(b.integrate(-1, 1, extrapolate=True), 0)
        assert_allclose(b.integrate(-1, 1, extrapolate=False), 0.5)
コード例 #5
0
ファイル: test_bsplines.py プロジェクト: Brucechen13/scipy
    def test_integral(self):
        b = BSpline.basis_element([0, 1, 2])  # x for x < 1 else 2 - x
        assert_allclose(b.integrate(0, 1), 0.5)
        assert_allclose(b.integrate(1, 0), -1 * 0.5)
        assert_allclose(b.integrate(1, 0), -0.5)

        # extrapolate or zeros outside of [0, 2]; default is yes
        assert_allclose(b.integrate(-1, 1), 0)
        assert_allclose(b.integrate(-1, 1, extrapolate=True), 0)
        assert_allclose(b.integrate(-1, 1, extrapolate=False), 0.5)
        assert_allclose(b.integrate(1, -1, extrapolate=False), -1 * 0.5)

        # Test ``_fitpack._splint()``
        t, c, k = b.tck
        assert_allclose(b.integrate(1, -1, extrapolate=False),
                        _splint(t, c, k, 1, -1)[0])

        # Test ``extrapolate='periodic'``.
        b.extrapolate = 'periodic'
        i = b.antiderivative()
        period_int = i(2) - i(0)

        assert_allclose(b.integrate(0, 2), period_int)
        assert_allclose(b.integrate(2, 0), -1 * period_int)
        assert_allclose(b.integrate(-9, -7), period_int)
        assert_allclose(b.integrate(-8, -4), 2 * period_int)

        assert_allclose(b.integrate(0.5, 1.5), i(1.5) - i(0.5))
        assert_allclose(b.integrate(1.5, 3), i(1) - i(0) + i(2) - i(1.5))
        assert_allclose(b.integrate(1.5 + 12, 3 + 12),
                        i(1) - i(0) + i(2) - i(1.5))
        assert_allclose(b.integrate(1.5, 3 + 12),
                        i(1) - i(0) + i(2) - i(1.5) + 6 * period_int)

        assert_allclose(b.integrate(0, -1), i(0) - i(1))
        assert_allclose(b.integrate(-9, -10), i(0) - i(1))
        assert_allclose(b.integrate(0, -9), i(1) - i(2) - 4 * period_int)
コード例 #6
0
ファイル: spline_fxns.py プロジェクト: neurodata/brainlit
def torsion(
    x: np.ndarray,
    t: np.ndarray,
    c: np.ndarray,
    k: np.integer,
    aux_outputs: bool = False,
) -> np.ndarray:
    r"""Compute the torsion of a B-Spline.

    The torsion measures the failure of a curve, `r(u)`, to be planar.
    If the curvature `k` of a curve is not zero, then the torsion is defined as

    .. math::

        \tau = -n \cdot b',

    where `n` is the principal normal vector, and `b'` the derivative w.r.t. the
    arc length `s` of the binormal vector.

    The torsion can also be computed as

    .. math::
        \tau = \lvert r'(t), r''(t), r'''(t) \rvert / \lVert r'(t) \times r''(t) \rVert^2,

    where `r(u)` is the position vector as a function of time.

    Arguments:
        x: A `1xL` array of parameter values where to evaluate the curve.
            It contains the parameter values where the torsion of the B-Spline will
            be evaluated. It is required to be non-empty, one-dimensional, and
            real-valued.
        t: A `1xm` array representing the knots of the B-spline.
            It is required to be a non-empty, non-decreasing, and one-dimensional
            sequence of real-valued elements. For a B-Spline of degree `k`, at least
            `2k + 1` knots are required.
        c: A `dxn` array representing the coefficients/control points of the B-spline.
            Given `n` real-valued, `d`-dimensional points ::math::`x_k = (x_k(1),...,x_k(d))`,
            `c` is the non-empty matrix which columns are ::math::`x_1^T,...,x_N^T`. For a
            B-Spline of order `k`, `n` cannot be less than `m-k-1`.
        k: A non-negative integer representing the degree of the B-spline.

    Returns:
        torsion: A `1xL` array containing the torsion of the B-Spline evaluated at `x`

    References:
    .. [1] Máté Attila, The Frenet–Serret formulas.
        http://www.sci.brooklyn.cuny.edu/~mate/misc/frenet_serret.pdf
    """

    # convert arguments to desired type
    x = np.ascontiguousarray(x)
    t = np.ascontiguousarray(t)
    c = np.ascontiguousarray(c)
    k = operator.index(k)

    if k < 0:
        raise ValueError("The order of the spline must be non-negative")

    check_type(t, np.ndarray)
    t_dim = t.ndim
    if t_dim != 1:
        raise ValueError("t must be one-dimensional")
    if len(t) == 0:
        raise ValueError("t must be non-empty")
    check_iterable_type(t, (np.integer, np.float))
    if (np.diff(t) < 0).any():
        raise ValueError("t must be a non-decreasing sequence")

    check_type(c, np.ndarray)
    c_dim = c.ndim
    if c_dim > 2:
        raise ValueError("c must be 2D max")
    if len(c.flatten()) == 0:
        raise ValueError("c must be non-empty")
    if c_dim == 1:
        check_iterable_type(c, (np.integer, np.float))
        # expand dims so that we can cycle through a single dimension
        c = np.expand_dims(c, axis=0)
    if c_dim == 2:
        for d in c:
            check_iterable_type(d, (np.integer, np.float))
    n_dim = len(c)

    check_type(x, np.ndarray)
    x_dim = x.ndim
    if x_dim != 1:
        raise ValueError("x must be one-dimensional")
    if len(x) == 0:
        raise ValueError("x must be non-empty")
    check_iterable_type(x, (np.integer, np.float))
    L = len(x)

    # evaluate first, second, and third derivatives
    # deriv, dderiv, ddderiv are (d, L) arrays
    deriv = np.empty((n_dim, L))
    dderiv = np.empty((n_dim, L))
    ddderiv = np.empty((n_dim, L))
    for i, dim in enumerate(c):
        spl = BSpline(t, dim, k)
        deriv[i, :] = spl.derivative(nu=1)(x) if k - 1 >= 0 else np.zeros(L)
        dderiv[i, :] = spl.derivative(nu=2)(x) if k - 2 >= 0 else np.zeros(L)
        ddderiv[i, :] = spl.derivative(nu=3)(x) if k - 3 >= 0 else np.zeros(L)
    # transpose derivs
    deriv = deriv.T
    dderiv = dderiv.T
    ddderiv = ddderiv.T

    cross = np.cross(deriv, dderiv)

    # Could be more efficient by only computing dot products of corresponding rows
    num = np.diag((cross @ ddderiv.T))
    denom = np.linalg.norm(cross, axis=1)**2

    torsion = np.nan_to_num(num / denom)

    if aux_outputs == True:
        return torsion, deriv, dderiv, ddderiv
    else:
        return torsion
コード例 #7
0
class SmoothingSpline(object):
    """ Main cubic smoothing spline class. """
    def __init__(self, x, y, lam=10., max_knots=250, order=3):
        """ Initialize and fit the cubic smoothing spline, using the Bspline basis from
        scipy.

        x: A numpy array of univariate independent variables.
        y: A numpy array of univariate response variables.
        lam: smoothing parameter
        max_knots: max number of knots (i.e. max number of spline basis functions)."""

        ## Start by storing the data
        assert len(x) == len(
            y
        ), "Independent and response variable vectors must have the same length."
        self.x = x
        self.y = y
        self.order = 3
        self.lam = lam

        ## Then compute the knots, refactoring to evenly
        ## spaced knots if the number of unique values is too
        ## large.
        self.knots = np.sort(np.unique(self.x))
        if len(self.knots) > max_knots:
            self.knots = np.linspace(self.knots[0], self.knots[-1], max_knots)

        ## Construct the spline basis given the knots
        self._aug_knots = np.hstack([
            self.knots[0] * np.ones((order, )), self.knots,
            self.knots[-1] * np.ones((order, ))
        ])
        self.splines = [
            BSpline(self._aug_knots, coeffs, order)
            for coeffs in np.eye(len(self.knots) + order - 1)
        ]

        ## Finally, with the basis functions, we can fit the
        ## smoother.
        self._fit()

    def _fit(self):
        """ Subroutine for fitting, called by init. """

        ## Start the fit procedure by constructing the matrix B
        B = np.array([sp(self.x) for sp in self.splines]).T

        ## Then, construct the penalty matrix by first computing second derivatives
        ## on the knots and then approximating the integral of second derivative products
        ## with the trapezoid rule.
        d2B = np.array([sp.derivative(2)(self.knots) for sp in self.splines])
        weights = np.ones(self.knots.shape)
        weights[1:-1] = 2.
        Omega = np.dot(d2B, np.dot(np.diag(weights), d2B.T))

        ## Finally, invert the matrices to construct values of interest.
        self.ridge_op = np.linalg.inv(np.dot(B.T, B) + self.lam * Omega)

        ## From there, we can compute the coefficient matrix H and the
        ## S matrix (i.e. the hat matrix).
        H = np.dot(self.ridge_op, B.T)
        self.S = np.dot(B, H)
        self.edof = np.trace(self.S)
        self.gamma = np.dot(H, self.y)

        ## Finally, construct the smoothing spline
        self.smoother = BSpline(self._aug_knots, self.gamma, self.order)

        ## And compute the covariance matrix of the coefficients
        y_hat = self.smoother(self.x)
        self.rss = np.sum((self.y - y_hat)**2)
        self.var = self.rss / (len(self.y) - self.edof)
        self.cov = self.var * self.ridge_op

        return

    def __call__(self, x, cov=False):
        """ Evaluate it """

        ## if you want the covariance matrix
        if cov:

            ## Set up the matrix of splines evaluations and similarity transform the
            ## covariance in the coefficients
            F = np.array([BSpline(self._aug_knots, g, self.order)(x) \
                          for g in np.eye(len(self.knots) + self.order - 1)]).T
            covariance_matrix = np.dot(F, np.dot(self.cov, F.T))

            ## Evaluate the mean using the point estimate
            ## of the coefficients
            point_estimate = np.dot(F, self.gamma)

            return point_estimate, covariance_matrix

        else:
            return self.smoother(x)

    def derivative(self, x, degree=1):
        """ Evaluate the derivative of degree = degree. """

        return self.smoother.derivative(degree)(x)

    def correlation_time(self):
        """ Use the inferred covariance matrix to compute and estimate of the correlation time
        by approximating the width of correlation for a central knot with it's neighbors. """

        ## Select a central knot
        i_mid = int(len(self.knots) / 2)

        ## Compute a normalized distribution
        distribution = np.abs(self.cov[i_mid][1:-1])
        distribution = distribution / trapz(distribution, x=self.knots)

        ## Compute the mean and variance
        avg = self.knots[i_mid - 1]
        var = trapz(distribution * (self.knots**2), x=self.knots) - avg**2

        return np.sqrt(var)
コード例 #8
0
# Construct the linear spline ``x if x < 1 else 2 - x`` on the base
# interval :math:`[0, 2]`, and integrate it

from scipy.interpolate import BSpline
b = BSpline.basis_element([0, 1, 2])
b.integrate(0, 1)
# array(0.5)

# If the integration limits are outside of the base interval, the result
# is controlled by the `extrapolate` parameter

b.integrate(-1, 1)
# array(0.0)
b.integrate(-1, 1, extrapolate=False)
# array(0.5)

import matplotlib.pyplot as plt
fig, ax = plt.subplots()
ax.grid(True)
ax.axvline(0, c='r', lw=5, alpha=0.5)  # base interval
ax.axvline(2, c='r', lw=5, alpha=0.5)
xx = [-1, 1, 2]
ax.plot(xx, b(xx))
plt.show()
コード例 #9
0
def plotDLP(self, order=2, cutoff=0.05, tof=False, DBDplot=False):
    if DBDplot == False:
        raw_dlp = lplt.get_data(self.fname)
        lowpass_dlp = lplt.butter_filter(raw_dlp, order, cutoff)
        average_dlp = lplt.butter_avg(lowpass_dlp)
        time, density = lplt.density(average_dlp)

        ax, fig = plt.subplots()


        for key in density.keys():
            fig.plot(time, density[key], label=key)
            fig.legend(prop={'size': 7})

            if tof:     # if time of flight is checked
                xmaxPos = np.argmax(density[key], axis=0) / 10
                yminPos = np.min(density[key])
                fig.axvline(x=xmaxPos, color='r', linestyle='--')
                textstr = (xmaxPos)
                props = dict(boxstyle='square', facecolor='white', alpha=0.0)
                fig.text(
                    xmaxPos,
                    yminPos,
                    textstr,
                    fontsize=9,
                    verticalalignment='center',
                    bbox=props)

        plt.xlabel(r'Time ($\mu$s)')
        plt.ylabel('$n_{e}$ ($m^{-3}$)')
        plt.title('Plasma Density')
        plt.minorticks_on()
        plt.grid(which='major', alpha=0.5)
        plt.grid(which='minor', alpha=0.2)
        plt.show()
    else:

        [raw_I_vals, raw_bias_vals] = dlplt.get_data(self.fname)
        peak_I_vals_dic = dlplt.get_peak_vals(raw_I_vals, raw_bias_vals)
        avg_peak_I_vals = dlplt.peak_avg(peak_I_vals_dic, raw_bias_vals)
        data = dlplt.format_data(raw_bias_vals, avg_peak_I_vals)
        sectioned_data = dlplt.split_data(data)
        regression_data = dlplt.calculate_linear_regressions(sectioned_data)

        tol = 1E-8

        sat_vals, outside_tols =  dlplt.calculate_saturation_values(
                regression_data, tol, 2)

        V_sat = sat_vals['V sat']
        I_sat = sat_vals['I sat']

        electron_temp = dlplt.temperature(V_sat)
        electron_number_density = dlplt.density(V_sat, I_sat)

        # Warning handling
        if outside_tols['sat_V_diff'] == True:
            message = ('Average saturated voltage value '
                    + 'was used because the difference between left '
                    + 'and right saturated voltage values was outside the '
                    + 'tolerance of %.2E V.' % tol)
            warnings.warn(message, RuntimeWarning)
        if outside_tols['sat_I_diff'] == True:
            message = ('Average saturated current value '
                    + 'was used because the difference between left '
                    + 'and right saturated voltage values was outside the '
                    + 'tolerance of %.2E uA.' % tol)
            warnings.warn(message, RuntimeWarning)

        ax, fig = plt.subplots()
        # The section below still needs to be cleaned, but it will work for now
###############################################################################
        x_ion = np.linspace(data[0,0], data[-1,0], num=50)
        x_e_ret = np.linspace(data[0,0], data[-1,0], num=50)
        x_e_sat = np.linspace(data[0,0], data[-1,0], num=50)
        y_ion = np.zeros_like(x_ion)
        y_e_ret = np.zeros_like(x_e_ret)
        y_e_sat = np.zeros_like(x_e_sat)

        i_sat = regression_data['i_sat']
        e_ret = regression_data['e_ret']
        e_sat = regression_data['e_sat']

        index = 0
        for val in x_ion:
            y_ion[index] = i_sat['slope']*val + i_sat['intercept']
            index += 1
        index = 0
        for val in x_e_ret:
            y_e_ret[index] = e_ret['slope']*val + e_ret['intercept']
            index += 1
        index = 0
        for val in x_e_sat:
            y_e_sat[index] = e_sat['slope']*val + e_sat['intercept']
            index += 1

        y_ion = np.array(y_ion)
        y_e_ret = np.array(y_e_ret)
        y_e_sat = np.array(y_e_sat)

        v_fine = np.linspace(data[0,0], data[-1,0], 300)
        t, c, k = splrep(
                data[:,0], data[:,1], s=0, k=3)
        I_func = BSpline(t, c, k, extrapolate=False)
        fig.plot(v_fine, I_func(v_fine), color='black',
                linestyle='dashed', linewidth=2)
        fig.scatter(
                data[:,0], data[:,1], color='black', s=10*(2**2))
        fig.plot(x_ion, y_ion, color='red', linewidth=2.0)
        fig.plot(x_e_ret, y_e_ret, color='magenta', linewidth=2.0)
        fig.plot(x_e_sat, y_e_sat, color='green', linewidth=2.0)

        # Construct linear regression equations

        if i_sat['intercept'] < 0:
            i_sat_intercept_negative = True
            i_sat_intercept_zero = False
            i_sat['intercept'] = np.absolute(i_sat['intercept'])
        else:
            i_sat_intercept_negative = False
            if round(i_sat['intercept'], 4) == 0.0000:
                i_sat_intercept_zero = True
            else:
                i_sat_intercept_zero = False


        if e_ret['intercept'] < 0:
            e_ret_intercept_negative = True
            e_ret['intercept'] = np.absolute(e_ret['intercept'])
        else:
             e_ret_intercept_negative = False
             if round(e_ret['intercept'], 4) == 0.0000:
                 e_ret_intercept_zero = True
             else:
                 e_ret_intercept_zero = False

        if e_sat['intercept'] < 0:
            e_sat_intercept_negative = True
            e_sat['intercept'] = np.absolute(e_sat['intercept'])
        else:
             e_sat_intercept_negative = False
             if round(e_sat['intercept'], 4) == 0.0000:
                 e_sat_intercept_zero = True
             else:
                 e_sat_intercept_zero = False


        s_ion_str = str("%.4f" % round(i_sat['slope'], 4))
        inter_ion_str = str("%.4f" % round(i_sat['intercept'], 4))

        s_e_ret_str = str("%.4f" % round(e_ret['slope'], 4))
        inter_e_ret_str = str("%.4f" % round(e_ret['intercept'], 4))

        s_e_sat_str = str("%.4f" % round(e_sat['slope'], 4))
        inter_e_sat_str = str("%.4f" % round(e_sat['intercept'], 4))


        str_ion = r'$I_{ion \,\,\, sat} = $' + s_ion_str
        if i_sat_intercept_negative == False:
            if i_sat_intercept_zero == False:
                str_ion += r'$\cdot V +$' + inter_ion_str
            else:
                str_ion += r'$\cdot V$'
        else:
            str_ion += r'$\cdot V -$' + inter_ion_str

        str_e_ret = r'$I_{e \,\,\, ret} = $' + s_e_ret_str
        if e_ret_intercept_negative == False:
            if e_ret_intercept_zero == False:
                str_e_ret += r'$\cdot V +$' + inter_e_ret_str
            else:
                str_e_ret += r'$\cdot V$'
        else:
            str_e_ret += r'$\cdot V -$' + inter_e_ret_str

        str_e_sat = r'$I_{e \,\,\, sat} = $' + s_e_sat_str
        if e_sat_intercept_negative == False:
            if e_sat_intercept_zero == False:
                str_e_sat += r'$\cdot V +$' + inter_e_sat_str
            else:
                str_e_sat += r'$\cdot V$'
        else:
            str_e_sat += r'$\cdot V -$' + inter_e_sat_str

        # Construct electron temp and number density output

        T_str = (r'$T_e$ $\approx$ '
                + str('%.2f' % round(electron_temp, 2)) + ' eV')
        n_e_str =  (r'$n_e$ $\approx$ '
                + '%.2E' % Decimal(str(electron_number_density))
                + r' $\mathrm{m}^{-3}$')

        # Construct legend
        h = []

        plot_labels = ['Measured Data',
                'Ion Saturation Regression',
                'Electron Retarding Regression',
                'Electron Saturation Regression']

        h.append(mpatches.Patch(color='black', label=plot_labels[0]))
        h.append(mpatches.Patch(color='red', label=plot_labels[1]))
        h.append(mpatches.Patch(color='magenta', label=plot_labels[2]))
        h.append(mpatches.Patch(color='green', label=plot_labels[3]))

        fig.legend(loc=2, borderaxespad=0, handles=h, prop={'size': 18})

  #############################################################################
        ax = plt.gca()

        # fig.text(0.55, 0.30, str_ion, transform=ax.transAxes)
        # fig.text(0.55, 0.25, str_e_ret, transform=ax.transAxes)
        # fig.text(0.55, 0.20, str_e_sat, transform=ax.transAxes)
        # fig.text(0.55, 0.10, T_str, transform=ax.transAxes)
        # fig.text(0.55, 0.5, n_e_str, transform=ax.transAxes)

        txt = (str_ion + "\n" + str_e_ret + "\n" + str_e_sat
            + "\n" + T_str + "\n" + n_e_str)

        props = dict(boxstyle='round', facecolor='white', alpha=0.5)

        fig.text(0.55, 0.25, txt, size=18,
            verticalalignment="top", horizontalalignment="left",
            multialignment="left", bbox=props,  transform=ax.transAxes)

        plt.rcParams.update({'font.size': 22})

        for item in ([ax.title, ax.xaxis.label, ax.yaxis.label] +
                ax.get_xticklabels() + ax.get_yticklabels()):
             item.set_fontsize(20)

        plt.xlabel('Voltage (V)')
        plt.ylabel(r'Peak Current ($\mu$A)')
        plt.title('Bias Voltage vs Peak Current')
        plt.minorticks_on()
        plt.grid(which='major', alpha=0.5)
        plt.grid(which='minor', alpha=0.2)
        plt.show()
コード例 #10
0
class Fitting:
    """
    Fitting of the curve in the initial days to obtain the estimated values
    for day day march, 16th, 2020, of the pandemic.
    p: (tau, sigma, rho, delta, gamma1, gamma2).
    tyme_varying: definitions about beta and mu bspline (knots, number of parameters and order)
    hmax: max value Runge Kutta integration method. 
    """
    def __init__(
        self,
        p,
        time_varying,
        initial_day='2020-03-16',
        final_day='2020-07-15',
        hmax=0.15,
        init_cond={
            'x0': [0.8, 0.3, 0.00001, 0.00001, 0.00001],
            'bounds': [(0, 1), (0, 1), (0, 0.0001), (0, 0.0001), (0, 0.0001)]
        }):

        # parameters pre-determined
        self.p = np.array(p, dtype=np.float64)
        self.hmax = hmax
        self.init_cond = init_cond
        self.initial_day = initial_day
        self.final_day = final_day

        # Reading data
        ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
        filename = os.path.join(ROOT_DIR, "../data/covid_data_organized.csv")
        df = pd.read_csv(filename, index_col=0)
        self.T = df['confirmed'].loc[initial_day:final_day].to_numpy()
        self.D = df['deaths'].loc[initial_day:final_day].to_numpy()
        self.tf = len(self.T)

        # time-varying hiperparameters
        self.sbeta = time_varying['beta']['coefficients']
        self.order_beta = time_varying['beta']['bspline_order']
        self.smu = time_varying['mu']['coefficients']
        self.order_mu = time_varying['mu']['bspline_order']
        self.knots_beta = np.linspace(0, self.tf,
                                      self.sbeta + self.order_beta + 1)
        self.knots_mu = np.linspace(0, self.tf, self.smu + self.order_mu + 1)
        # define the time-varying parameters
        self.beta = BSpline(self.knots_beta, np.zeros(self.sbeta),
                            self.order_beta)
        self.mu = BSpline(self.knots_mu, np.zeros(self.smu), self.order_mu)

        # Calculate initial conditions
        print('Model SEIAQR for Covid-19')
        print('-------------------------')
        print('Estimating initial Conditions...')
        self.initial_conditions()
        print('Initiation done!')

    def derivative(self, x, t, alpha, beta_, mu_, tau, sigma, rho, delta,
                   gamma1, gamma2):
        """
        System of derivatives simplified.
        """

        beta = max(beta_(t), 0)
        mu = max(mu_(t), 0)

        dx = np.zeros(shape=(len(x), ))
        dx[4] = -beta * x[4] * (x[1] + x[2])
        dx[0] = -dx[4] - (rho * delta + tau) * x[0]
        dx[1] = tau * x[0] - (sigma + rho) * x[1]
        dx[2] = sigma * alpha * x[1] - (gamma1 + rho) * x[2]
        dx[5] = gamma1 * x[2] + gamma2 * x[3]
        dx[6] = mu * x[3]
        dx[7] = sigma * (1 - alpha) * x[1] + rho * (delta * x[0] + x[1] + x[2])
        dx[3] = dx[7] - gamma2 * x[3] - dx[6]

        return dx

    def integrate(self, theta, p, time=[]):
        """
        Integrate the system given a tuple of parameters.
        p is the parameters estimated by the literature. time is a list always
        with 0 that indicates where to integrate. 
        """
        if len(time) == 0:
            time = range(self.tf)
        self.beta = self.beta.construct_fast(self.knots_beta,
                                             theta[1:1 + self.sbeta],
                                             self.order_beta)
        self.mu = self.mu.construct_fast(self.knots_mu, theta[-self.smu:],
                                         self.order_mu)
        self.states = odeint(func=self.derivative,
                             y0=self.y0,
                             t=time,
                             args=(theta[0], self.beta, self.mu, *p),
                             hmax=self.hmax)
        return self.states

    def rt_calculation(self, theta):
        """
        Calculate the reproduction number based on the model.
        """
        S = self.states[:, 4]
        self.repro_number = np.zeros(shape=(2, self.tf))

        beta_ = self.beta.construct_fast(self.knots_beta,
                                         theta[1:1 + self.sbeta],
                                         self.order_beta)
        mu_ = self.mu.construct_fast(self.knots_mu, theta[-self.smu:],
                                     self.order_mu)
        alpha = theta[0]
        tau, sigma, rho, delta, gamma1, _ = self.p

        for t in range(self.tf):
            beta = max(beta_(t), 0)
            mu = max(mu_(t), 0)
            varphi = np.array([beta * tau, beta * tau * S[t]
                               ])  # difference between R0 and Rt
            varphi /= ((rho * delta + tau) * (sigma + rho))
            r0_rt = 1 / 2 * (varphi + np.sqrt(varphi**2 + varphi *
                                              (4 * sigma * alpha) /
                                              (rho + gamma1)))
            self.repro_number[:, t] = r0_rt

    def initial_conditions(self):
        """
        Estimate Initial Conditions
        """
        parameters = self.p[[0, 1, 4, 5]]
        model = FittingInitial(parameters, self.initial_day, self.hmax)
        E0, I0, A0, _, Q0, R0 = model.get_initial_values(
            self.init_cond['x0'], self.init_cond['bounds'])
        self.initial_phase = model.y
        T0 = self.T[0]
        D0 = self.D[0]
        S0 = 1 - E0 - I0 - A0 - Q0 - R0
        self.y0 = [E0, I0, A0, Q0 - D0, S0, R0, D0, T0]

    def objective(self, theta, psi):
        # theta = (alpha, beta_1, ..., beta_s, mu_1, ..., mu_r)
        integrate = self.integrate(theta, self.p)
        obj1 = (self.T - integrate[:, 7]) @ self.weights @ (self.T -
                                                            integrate[:, 7])
        obj2 = (self.D - integrate[:, 6]) @ self.weights @ (self.D -
                                                            integrate[:, 6])

        obj = 100 * (obj1 + psi * obj2)

        return obj

    def fit(self, psi, x0, bounds, algorithm='L-BFGS-B'):
        """
        Fits the model to the data and recover the estimated parameters. 
        """
        self.weights = np.array([[min(i, j) + 15 for i in range(self.tf)]
                                 for j in range(self.tf)])
        self.weights = np.linalg.inv(self.weights)

        print('Starting estimation!')
        t0 = time.time()
        res = minimize(fun=self.objective,
                       x0=x0,
                       method=algorithm,
                       bounds=bounds,
                       args=(psi, ))
        self.counter = time.time() - t0
        print('Estimation finished. It took {} seconds'.format(self.counter))

        curve = self.integrate(res.x, self.p)

        # Rt calculation
        self.rt_calculation(res.x)

        # Store important values
        self.obj = res.fun
        self.res = res
        self.theta = res.x
        self.psi = psi
        self.x0 = x0
        self.bounds = bounds
        self.algorithm = algorithm

        n = self.tf
        K = len(self.theta)

        # Estimate variances
        self.sigma2_1 = (self.T - curve[:, 7]) @ self.weights @ (
            self.T - curve[:, 7]) / (n - K)
        self.sigma2_2 = (self.D - curve[:, 6]) @ self.weights @ (
            self.D - curve[:, 6]) / (n - K)

        # Information Criterion
        common = n * np.log(self.obj / n)
        self.aic = common + 2 * K
        self.bic = common + np.log(n) * K
        self.aicc = common + 2 * K * n / (n - K - 1)

        return res.x

    def check_residuals(self):
        """
        Simple residual analysis for the fitting. It must be called after the
        function fit. 
        """
        diary_curves = self.integrate(self.theta, self.p)

        T = diary_curves[:, 7]
        D = diary_curves[:, 6]

        errorT = np.diff(self.T - T)
        errorD = np.diff(self.D - D)

        return errorT, errorD

    def correlation_matrix(self):
        """
        Calculate the correlation matrix with an estimated parameter. It must be called after the
        function fit. 
        """
        def f(parameters, time, curve):
            theta = parameters[0:len(self.theta)]
            #p = parameters[len(self.theta):]
            return self.integrate(theta, self.p, [0, time])[1, curve]

        K = len(self.theta)  #+ len(self.p)
        J1 = np.zeros((self.tf, K))
        J2 = np.zeros((self.tf, K))
        parameters = self.theta  #np.concatenate([self.theta, self.p])
        for i in range(self.tf):
            J1[i, :] = approx_fprime(parameters, f,
                                     np.ones_like(parameters) * 1e-5, i, 7)
            J2[i, :] = approx_fprime(parameters, f,
                                     np.ones_like(parameters) * 1e-5, i, 6)

        # Fisher Information matrix
        FIM = J1.transpose() @ self.weights @ J1 / self.sigma2_1 + J2.transpose(
        ) @ self.weights @ J2 / self.sigma2_2
        # Covariance matrix
        C = np.linalg.inv(FIM)
        # Correlation matrix
        R = [[C[i, j] / np.sqrt(C[i, i] * C[j, j]) for i in range(K)]
             for j in range(K)]

        return np.array(R)

    def _get_exp(self, pathname):

        with open(pathname, 'r') as f:
            line = f.readline()
            while line != '':
                lineold = line
                line = f.readline()
        exp = lineold[:lineold.find(';')]
        exp = 1 if exp == 'exp' else int(exp) + 1

        return exp

    def save_experiment(self, objective_function):
        """
        Save information about the experiment. 
        objective_function: name given to compare, like quadratic and divided.
        """
        pathname = '../experiments/' + objective_function + '.csv'
        if not os.path.exists(pathname):
            with open(pathname, 'w') as f:
                f.write(
                    'exp;tau;sigma;rho;delta;gamma1;gamma2;sbeta;order_beta;smu;order_mu;'
                )
                f.write('initial_day;final_day;hmax;psi;x0;bounds;algorithm;')
                f.write('E0;I0;A0;Q0;R0;D0;T0;alpha;beta;mu;obj;time')
                f.write('\n')
        else:
            with open(pathname, 'a') as f:
                exp = self._get_exp(pathname)
                tau, sigma, rho, delta, gamma1, gamma2 = self.p

                info = [
                    exp, tau, sigma, rho, delta, gamma1, gamma2, self.sbeta,
                    self.order_beta, self.smu, self.order_mu
                ]
                info2 = [self.initial_day, self.final_day, self.hmax, self.psi]

                f.write(';'.join(map(str, info)))
                f.write(';')
                f.write(';'.join(map(str, info2)))
                f.write(';')
                f.write(str(self.x0))
                f.write(';')
                f.write(str(self.bounds))
                f.write(';')
                f.write(self.algorithm + ';')
                f.write(';'.join(map(str, self.y0)))
                f.write(str(self.theta[0]) + ';')
                f.write(str(self.theta[1:1 + self.sbeta]))
                f.write(';')
                f.write(str(self.theta[-self.smu:]))
                f.write(';')
                f.write(str(self.obj) + ';')
                f.write(str(self.counter))
                f.write('\n')


# if __name__ == '__main__':

#     p = [0.3125, 0.5, 2e-5, 1, 1/9.5, 1/18]
#     beta = {'sbeta': 4, 'bspline_order': 3}
#     model = Fitting(p, beta)

#     psi = 0
#     bounds = [(0.7,0.95), (0.05,0.3), (0.05,0.3), (0.05,0.3), (0.05,0.3), (0, 0.2)] # bound the parameters
#     x0 = [0.9, 0.1, 0.1, 0.1, 0.1, 0.12/14]  # initial guess
#     theta = model.fit(psi, x0, bounds)
コード例 #11
0
#take log of V and S
v = [math.log(i[0]) for i in glmv]
s = [math.log(i[1]) for i in glmv]

#reverse order for Bspline()
v.reverse()
s.reverse() 

#convert to array for splrep
x = np.array(s)
y = np.array(v)

#find vector knots for Bspline
t, c, k = interpolate.splrep(x=x, y=y, s= 0, k=4)
spl = BSpline(t, c, k)

#plot the Bspline
print('''\
    t: {}
    c: {}
    k: {}
    '''.format(t, c, k))
N = 100
xmin, xmax = x.min(), x.max()
xx = np.linspace(xmin, xmax, N)
spline = interpolate.BSpline(t, c, k, extrapolate=False)

plt.plot(x, y, 'bo', label='Original points')
plt.plot(xx, spline(xx), 'r', label='BSpline')
plt.grid()
コード例 #12
0
def interpolate(x, y, n=3):
    '''return get order n spline interpolation function fit to array x and array y.'''
    return BSpline(x, y, n, extrapolate=True)
コード例 #13
0
# x=[i for i in range(1,len(score)+1)]
#
# # fig = plt.figure()
# #设置X轴标签
# # plt.xlabel('X')
# #设置Y轴标签
# plt.ylabel('Y')
# plt.scatter(x,score,c='orange',marker = 'o',alpha=0.6,label='theta = 0.3')
# #设置图标
# plt.legend('theta')
# #显示所画的图
# # plt.show()

from datetime import datetime
import matplotlib.dates as mdates
import matplotlib.pyplot as plt

# 生成横纵坐标信息
xs = [datetime.strptime(d, '%Y/%m').date() for d in times]
plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%Y/%m'))
# plt.gca().xaxis.set_major_locator(mdates.DayLocator())

xnew = np.linspace(xs.min(), xs.max(), 300)  # 300 represents number of points to make between T.min and T.max

p3 = BSpline(xs, score3, xnew)

plt.plot(xs, p3,label='0.3')
plt.plot(xs, score5)
plt.plot(xs, score7)
plt.gcf().autofmt_xdate()  # 自动旋转日期标记
plt.show()
コード例 #14
0
def calc_gamma(mp, shotdat, imp0, spdeg, knots):
    """
	Calculate correction value "gamma" in the observation eqs.

	Parameters
	----------
	mp : ndarray
		complete model parameter vector.
	shotdat : DataFrame
		GNSS-A shot dataset.
	imp0 : ndarray (len=5)
		Indices where the type of model parameters change.
	p : int
		spline degree (=3).
	knots : list of ndarray (len=5)
		B-spline knots for each component in "gamma".

	Returns
	-------
	gamma : ndarray
		Values of "gamma". Note that scale facter is not applied.
	a : 2-d list of ndarray
		[a0[<alpha>], a1[<alpha>]] :: a[<alpha>] at transmit/received time.
		<alpha> is corresponding to <0>, <1E>, <1N>, <2E>, <2N>.
	"""

    a0 = []
    a1 = []
    for k, kn in enumerate(knots):
        if len(kn) == 0:
            a0.append(0.)
            a1.append(0.)
            continue
        ct = mp[imp0[k]:imp0[k + 1]]
        bs = BSpline(kn, ct, spdeg, extrapolate=False)
        a0.append(bs(shotdat.ST.values))
        a1.append(bs(shotdat.RT.values))

    ls = 1000.  # m/s/m to m/s/km order for gradient

    de0 = shotdat.de0.values
    de1 = shotdat.de1.values
    dn0 = shotdat.dn0.values
    dn1 = shotdat.dn1.values
    mte = shotdat.mtde.values
    mtn = shotdat.mtdn.values

    gamma0_0 = a0[0]
    gamma0_1 = (a0[1] * de0 + a0[2] * dn0) / ls
    gamma0_2 = (a0[3] * mte + a0[4] * mtn) / ls

    gamma1_0 = a1[0]
    gamma1_1 = (a1[1] * de1 + a1[2] * dn1) / ls
    gamma1_2 = (a1[3] * mte + a1[4] * mtn) / ls

    gamma0 = gamma0_0 + gamma0_1 + gamma0_2
    gamma1 = gamma1_0 + gamma1_1 + gamma1_2

    gamma = (gamma0 + gamma1) / 2.
    a = [a0, a1]

    return gamma, a
コード例 #15
0
y=y[inz]


# iframe1 = np.where(np.diff(y)>0)[0][0]
# iframe2 = iframe+iframe1-2
# x, y = data[iframe2:, 0], data[iframe2:, 1]

iframe
order = 1
dx = 0.001
# dx = np.diff(dx).min()/2

x0, xf = x.min(), x.max()
xx = np.arange(x0, xf, dx)
# scipy.interpolate.interp1d(x, y, kind='linear', axis=-1, copy=True, bounds_error=None, fill_value=nan, assume_sorted=False)
interp = BSpline(x, y, order)
yy = interp(xx)
# dy = interp(xx,1)
# ddy=interp(xx,2)



""" iymax, iymin = np.where(dy==dy.max())[0], np.where(dy==dy.min())[0]
plt.figure()
plt.plot(xx, np.sign(dy))
plt.show()
 """
inz = yy.nonzero()
x0=xx[inz]
y0=yy[inz]
コード例 #16
0
def emisFcn(X, tck):
    sp = BSpline(tck[0], tck[1], tck[2])
    return np.exp(-np.abs(sp(X)))
コード例 #17
0
                    args=(10, knot_vector1),
                    order=3)
 tmp12 = derivative(func=Ni3,
                    x0=u1,
                    dx=h,
                    n=2,
                    args=(11, knot_vector1),
                    order=3)
 tmp13 = derivative(func=Ni3,
                    x0=u1,
                    dx=h,
                    n=2,
                    args=(12, knot_vector1),
                    order=3)
 matrix[-1, -3] = tmp11
 matrix[-1, -2] = tmp12
 matrix[-1, -1] = tmp13
 D = np.array(points)
 D = np.concatenate([[(0, 0)], D, [(0, 0)]], axis=0)
 P = np.linalg.solve(a=matrix, b=D)
 P = np.round(a=P, decimals=8)
 print(P)
 spl = BSpline(t=knot_vector1, c=P, k=k)
 fig, ax = plt.subplots()
 xx = np.linspace(1.5, 4.5, 50)
 ax.plot(xx, spl(xx), 'b-', lw=4, alpha=0.7, label='BSpline')
 ax.grid(True)
 ax.legend(loc='best')
 plt.show()
 a = 1
 ###########################################
コード例 #18
0
import matplotlib.pyplot as plt
from scipy.interpolate import splprep, BSpline

if __name__ == '__main__':
    # Initialize the spline
    c = np.array([100, 100])
    R = 50
    n = 10
    phi = np.linspace(0, 2 * np.pi, n)
    x_init = c[0] + R * np.cos(phi)
    y_init = c[1] + R * np.sin(phi)
    x_init[n - 1] = x_init[0]
    y_init[n - 1] = y_init[0]
    dat = np.array([x_init, y_init])
    tck, _ = splprep(dat, s=0, per=0, k=3)
    knots = tck[0]
    ctrl_pts = np.transpose(tck[1])
    deg = tck[2]
    spl = BSpline(knots, ctrl_pts, deg)
    fig, ax = plt.subplots()
    # Compute the value of each basis function
    for knot_ind in np.arange(len(knots)):
        knots_basis = spl.t[knot_ind:knot_ind + deg + 2]
        b = spl.basis_element(knots_basis, extrapolate=False)
        x = np.linspace(knots_basis[0], knots_basis[-1], 50)
        ax.plot(x, b(x), lw=3)
        ax.plot([knots_basis[0]] * 2, [0, 1], 'k--')
    plt.xlabel('Noeud')
    plt.title('Valeur de fonction de base en fonction de u')
    plt.show()
コード例 #19
0
        dx = dx[:nx]
        # x = np.cumsum(dx)
        print(nx, x.shape)
        # x0=x[0]
        # ymax=y.max()
        # y /=ymax
        # x-=x[0]

        # inz=y.nonzero()[0]
        # x=x[inz]
        # y=y[inz]
        # y /=y.max()
        # x-=x[0]

        # plt.plot(xx,yy);plt.show()
        interp = BSpline(x, y, splorder)
        nx = x.shape[0]
        icycle = y.shape[0] // npeaks
        icycle
        icyclef = y.shape[0] / npeaks
        print(icycle, '\n', icyclef)
        nx2 = nx * dnx
        dx2 = dx.min() / dnx

        # nx3 = int(nx*icycle / np.gcd(nx, icycle))
        # print(nx2,nx3)
        # nx2= nx3
        x[0]
        xx = np.linspace(x[0], dx2 * nx2, nx2, endpoint=True)
        print('xx\n', xx)
        yy = interp(xx)
コード例 #20
0
def roc_paint(roc_path = '.', roc_files = None, filter = True, output_file = None):
    if roc_path is not None and roc_files is not None:
        for rf in range(len(roc_files)):
            roc_files[rf] = roc_path + '/' + roc_files[rf]
    elif roc_path is not None:
        roc_files = glob.glob(roc_path+"/*.npy")
    lines = []
    names = []
    for rf in range(len(roc_files)):
        roc_file = roc_files[rf]
        name = os.path.splitext(os.path.basename(roc_file))[0]
        roc = np.load(roc_file)
        '''
        start = len(roc[0][roc[1]==roc[1][0]])
        TPs = [roc[0][0], roc[0][start-1]]
        FPs = [roc[1][0], roc[1][start-1]]
        for r in range(start, roc.shape[1]-1):
            if roc[0][r] != TPs[-1] and roc[1][r] != FPs[-1]:
                TPs.append(roc[0][r])
                FPs.append(roc[1][r])
        TPs.append(roc[0][-1])
        FPs.append(roc[1][-1])
        xrange = [i for i in range(len(FPs))]
        '''
        filtering = filter
        while filtering:
            filtering = False
            r = 1
            while r < roc.shape[1]-1:
                if roc[1][r]!=roc[1][r-1] and (roc[0][r]-roc[0][r-1])/(roc[1][r]-roc[1][r-1])<(roc[0][r+1]-roc[0][r])/(roc[1][r+1]-roc[1][r]):
                    roc = np.delete(roc, r, axis=1)
                    filtering = True
                else:
                    r += 1
        xstart = len(roc[0][roc[1] == roc[1][0]]) - 1
        xnew1 = np.linspace(roc[1][0], roc[1][-1], 80)
        xnew2 = np.linspace(roc[1][0], roc[1][-1], 20)
        ynew = np.linspace(roc[0][0], roc[0][-1], 40)
        #ysmooth = interp1d(roc[1][xstart:], roc[0][xstart:], 'cubic')(xnew)
        ysmooth1 = interp1d(roc[1][xstart:], roc[0][xstart:], 'linear')(xnew1)
        ysmooth2 = BSpline(xnew1, ysmooth1, 4)(xnew2)
        xsmooth = interp1d(roc[0], roc[1], 'linear')(ynew)
        xfusion = xsmooth.copy()
        yfusion = ynew.copy()
        xsidx = 0
        xi = 0
        #while xi<len(xfusion) and xsidx<len(xnew):
        #    if xfusion[xi]>=xnew[xsidx] and yfusion[xi]>=ysmooth[xsidx]:
        #        xfusion = np.insert(xfusion, xi, xnew[xsidx])
        #        yfusion = np.insert(yfusion, xi, ysmooth[xsidx])
        #        xsidx += 1
        #    xi += 1
        #xfusion = np.concatenate((xfusion, xnew[xsidx:]), axis=0)
        #yfusion = np.concatenate((yfusion, ysmooth[xsidx:]), axis=0)
        line, =plt.plot(roc[1], roc[0], alpha=0.8)
        #line, = plt.plot(xfusion, yfusion)
        #line, = plt.plot(xnew1, ysmooth1)
        #line, = plt.plot(xnew2, ysmooth2)
        #plt.plot(ynew, xsmooth)
        lines.append(line)
        names.append(name)
    plt.legend(lines, names, loc='lower right')
    plt.xlabel("False Positive Rate")
    plt.ylabel("True Positive Rate")
    plt.grid(False)
    if output_file is None:
        plt.show()
    else:
        plt.savefig(output_file, format="pdf")
        plt.close()
コード例 #21
0
 def actualise_splines(self):
     """
     :action: Update the splines re-creating them from the new coefficients
     """
     for i in range(self.attitude_splines.shape[0]):
         self.attitude_splines[i] = BSpline(self.att_knots, self.att_coeffs[i], k=self.k)
コード例 #22
0
    def fit(self, X, y=None):
        """Compute knot positions of splines.

        Parameters
        ----------
        X : array-like of shape (n_samples, n_features)
            The data.

        y : None
            Ignored.

        Returns
        -------
        self : object
            Fitted transformer.
        """
        X = self._validate_data(
            X,
            reset=True,
            accept_sparse=False,
            ensure_min_samples=2,
            ensure_2d=True,
        )
        n_samples, n_features = X.shape

        if not (isinstance(self.degree, numbers.Integral)
                and self.degree >= 0):
            raise ValueError(
                f"degree must be a non-negative integer, got {self.degree}.")

        if isinstance(self.knots, str) and self.knots in [
                "uniform",
                "quantile",
        ]:
            if not (isinstance(self.n_knots, numbers.Integral)
                    and self.n_knots >= 2):
                raise ValueError(
                    f"n_knots must be a positive integer >= 2, got: {self.n_knots}"
                )

            base_knots = self._get_base_knot_positions(X,
                                                       n_knots=self.n_knots,
                                                       knots=self.knots)
        else:
            base_knots = check_array(self.knots, dtype=np.float64)
            if base_knots.shape[0] < 2:
                raise ValueError(
                    "Number of knots, knots.shape[0], must be >= 2.")
            elif base_knots.shape[1] != n_features:
                raise ValueError("knots.shape[1] == n_features is violated.")
            elif not np.all(np.diff(base_knots, axis=0) > 0):
                raise ValueError("knots must be sorted without duplicates.")

        if self.extrapolation not in (
                "error",
                "constant",
                "linear",
                "continue",
                "periodic",
        ):
            raise ValueError("extrapolation must be one of 'error', "
                             "'constant', 'linear', 'continue' or 'periodic'.")

        if not isinstance(self.include_bias, (bool, np.bool_)):
            raise ValueError("include_bias must be bool.")

        # number of knots for base interval
        n_knots = base_knots.shape[0]

        if self.extrapolation == "periodic" and n_knots <= self.degree:
            raise ValueError(
                "Periodic splines require degree < n_knots. Got n_knots="
                f"{n_knots} and degree={self.degree}.")

        # number of splines basis functions
        if self.extrapolation != "periodic":
            n_splines = n_knots + self.degree - 1
        else:
            # periodic splines have self.degree less degrees of freedom
            n_splines = n_knots - 1

        degree = self.degree
        n_out = n_features * n_splines
        # We have to add degree number of knots below, and degree number knots
        # above the base knots in order to make the spline basis complete.
        if self.extrapolation == "periodic":
            # For periodic splines the spacing of the first / last degree knots
            # needs to be a continuation of the spacing of the last / first
            # base knots.
            period = base_knots[-1] - base_knots[0]
            knots = np.r_[base_knots[-(degree + 1):-1] - period, base_knots,
                          base_knots[1:(degree + 1)] + period, ]

        else:
            # Eilers & Marx in "Flexible smoothing with B-splines and
            # penalties" https://doi.org/10.1214/ss/1038425655 advice
            # against repeating first and last knot several times, which
            # would have inferior behaviour at boundaries if combined with
            # a penalty (hence P-Spline). We follow this advice even if our
            # splines are unpenalized. Meaning we do not:
            # knots = np.r_[
            #     np.tile(base_knots.min(axis=0), reps=[degree, 1]),
            #     base_knots,
            #     np.tile(base_knots.max(axis=0), reps=[degree, 1])
            # ]
            # Instead, we reuse the distance of the 2 fist/last knots.
            dist_min = base_knots[1] - base_knots[0]
            dist_max = base_knots[-1] - base_knots[-2]

            knots = np.r_[linspace(
                base_knots[0] - degree * dist_min,
                base_knots[0] - dist_min,
                num=degree,
            ), base_knots,
                          linspace(
                              base_knots[-1] + dist_max,
                              base_knots[-1] + degree * dist_max,
                              num=degree,
                          ), ]

        # With a diagonal coefficient matrix, we get back the spline basis
        # elements, i.e. the design matrix of the spline.
        # Note, BSpline appreciates C-contiguous float64 arrays as c=coef.
        coef = np.eye(n_splines, dtype=np.float64)
        if self.extrapolation == "periodic":
            coef = np.concatenate((coef, coef[:degree, :]))

        extrapolate = self.extrapolation in ["periodic", "continue"]

        bsplines = [
            BSpline.construct_fast(knots[:, i],
                                   coef,
                                   self.degree,
                                   extrapolate=extrapolate)
            for i in range(n_features)
        ]
        self.bsplines_ = bsplines

        self.n_features_out_ = n_out - n_features * (1 - self.include_bias)
        return self
コード例 #23
0
    def __new__(cls, x, y, order=None, s=None, w=None, bbox=[None]*2, k=3,
                ext=0, check_finite=False, outlier_func=sigma_clip,
                niter=3, grow=0, debug=False, **outlier_kwargs):

        # Decide what sort of spline object we're making
        spline_kwargs = {'bbox': bbox, 'k': k, 'ext': ext,
                         'check_finite': check_finite}
        if order is None:
            cls_ = UnivariateSpline
            spline_args = ()
            spline_kwargs['s'] = s
        elif s is None:
            cls_ = LSQUnivariateSpline
        else:
            raise ValueError("Both t and s have been specified")

        # Both spline classes require sorted x, so do that here. We also
        # require unique x values, so we're going to deal with duplicates by
        # making duplicated values slightly larger. But we have to do this
        # iteratively in case of a basket-case scenario like (1, 1, 1, 1+eps, 2)
        # which would become (1, 1+eps, 1+2*eps, 1+eps, 2), which still has
        # duplicates and isn't sorted!
        # I can't think of any better way to cope with this, other than write
        # least-squares spline-fitting code that handles duplicates from scratch
        epsf = np.finfo(float).eps

        orig_mask = np.zeros(y.shape, dtype=bool)
        if isinstance(y, np.ma.masked_array):
            if y.mask is not np.ma.nomask:
                orig_mask = y.mask.astype(bool)
            y = y.data

        if w is not None:
            orig_mask |= (w == 0)

        if debug:
            print(y)
            print(orig_mask)

        iter = 0
        full_mask = orig_mask  # Will include pixels masked because of "grow"
        while iter < niter+1:
            last_mask = full_mask
            x_to_fit = x.astype(float)

            if order is not None:
                # Determine actual order to apply based on fraction of unmasked
                # pixels, and unmask everything if there are too few good pixels
                this_order = int(order * (1 - np.sum(full_mask) / len(full_mask)) + 0.5)
                if this_order == 0:
                    full_mask = np.zeros(x.shape, dtype=bool)
                    if w is not None and not all(w == 0):
                        full_mask |= (w == 0)
                    this_order = int(order * (1 - np.sum(full_mask) / len(full_mask)) + 0.5)
                    if debug:
                        print("FULL MASK", full_mask)

            xgood = x_to_fit[~full_mask]
            while True:
                xunique, indices = np.unique(xgood, return_index=True)
                if len(indices) == len(xgood):
                    # All unique x values so continue
                    break
                if order is None:
                    raise ValueError("Must specify spline order when there are "
                                     "duplicate x values")
                for i in range(len(xgood)):
                    if not (last_mask[i] or i in indices):
                        xgood[i] *= (1.0 + epsf)

            # Space knots equally based on density of unique x values
            if order is not None:
                knots = [xunique[int(xx+0.5)]
                         for xx in np.linspace(0, len(xunique)-1, this_order+1)[1:-1]]
                spline_args = (knots,)
                if debug:
                    print ("KNOTS", knots)

            sort_indices = np.argsort(xgood)
            # Create appropriate spline object using current mask
            try:
                spline = cls_(xgood[sort_indices], y[~full_mask][sort_indices],
                              *spline_args, w=None if w is None else w[~full_mask][sort_indices],
                              **spline_kwargs)
            except ValueError as e:
                if this_order == 0:
                    avg_y = np.average(y[~full_mask],
                                       weights=None if w is None else w[~full_mask])
                    spline = lambda xx: avg_y
                else:
                    raise e
            spline_y = spline(x)
            #masked_residuals = outlier_func(spline_y - masked_y, **outlier_kwargs)
            #mask = masked_residuals.mask

            # When sigma-clipping, only remove the originally-masked points.
            # Note that this differs from the astropy.modeling code because
            # the sigma-clipping and spline-fitting are done independently here.
            d, mask, v = NDStacker.sigclip(spline_y-y, mask=orig_mask, variance=None,
                                           **outlier_kwargs)
            if grow > 0:
                maskarray = np.zeros((grow * 2 + 1, len(y)), dtype=bool)
                for i in range(-grow, grow + 1):
                    mx1 = max(i, 0)
                    mx2 = min(len(y), len(y) + i)
                    maskarray[grow + i, mx1:mx2] = mask[:mx2 - mx1]
                grow_mask = np.logical_or.reduce(maskarray, axis=0)
                full_mask = np.logical_or(mask, grow_mask)
            else:
                full_mask = mask.astype(bool)

            # Check if the mask is unchanged
            if not np.logical_or.reduce(last_mask ^ full_mask):
                break
            iter += 1

        # Create a standard BSpline object
        try:
            bspline = BSpline(*spline._eval_args)
        except AttributeError:
            # Create a spline object that's just a constant
            bspline = BSpline(np.r_[(x[0],)*4, (x[-1],)*4],
                              np.r_[(spline(0),)*4, (0.,)*4], 3)
        # Attach the mask and model (may be useful)
        bspline.mask = full_mask
        bspline.data = spline_y
        return bspline
コード例 #24
0
    def get_price(self, num_dt: int, num_dx: int, center: float,
                  width: float) -> float:
        """
        :param num_dt: represents number of discrete time steps
        :param num_dx: represents number of discrete state-space steps
        (on each side of the center)
        :param center: represents the center of the state space grid. For
        the case of lognormal == True, it should be Mean[log(x_{expiry}].
        For the case of lognormal == False, it should be Mean[x_{expiry}].
        :param width: represents the width of the state space grid. For the
        case of lognormal == True, it should be a multiple of
        Stdev[log(x_{expiry})].
        :return: the price of the American option (this is the discounted
        expected payoff at time 0 at current stock price.
        """
        dt = self.expiry / num_dt
        x_pts = 2 * num_dx + 1
        lsp = np.linspace(center - width, center + width, x_pts)
        prices = np.exp(lsp) if self.lognormal else lsp
        res = np.empty([num_dt, x_pts])
        res[-1, :] = [max(self.payoff(self.expiry, p), 0.) for p in prices]
        sample_points = 201

        final = [(p, max(self.payoff(self.expiry, p), 0.)) for p in prices]
        ex_boundary = [max(p for p, e in final if e > 0)]

        for i in range(num_dt - 2, -1, -1):
            t = (i + 1) * dt
            knots, coeffs, order = splrep(prices, res[i + 1, :], k=3)
            spline_func = BSpline(knots, coeffs, order)
            disc = np.exp(self.ir(t) - self.ir(t + dt))
            stprcs = []
            cp = []
            ep = []
            for j in range(x_pts):
                m, v = get_future_price_mean_var(prices[j], t, dt,
                                                 self.lognormal, self.ir,
                                                 self.isig)
                stdev = np.sqrt(v)
                norm_dist = norm(loc=m, scale=stdev)

                # noinspection PyShadowingNames
                def integr_func(x: float,
                                spline_func=spline_func,
                                norm_dist=norm_dist) -> float:
                    val = np.exp(x) if self.lognormal else x
                    return max(spline_func(val), 0.) * norm_dist.pdf(x)

                low, high = (m - 4 * stdev, m + 4 * stdev)
                disc_exp_payoff = disc * trapz(
                    np.vectorize(integr_func)(np.linspace(
                        low, high, sample_points)),
                    dx=(high - low) /
                    (sample_points - 1)) / (norm_dist.cdf(high) -
                                            norm_dist.cdf(low))
                if prices[j] < 100:
                    stprcs.append(prices[j])
                    cp.append(disc_exp_payoff)
                    ep.append(max(self.payoff(t, prices[j]), 0.))
                res[i, j] = max(self.payoff(t, prices[j]), disc_exp_payoff)

            ex_boundary.append(
                max(p for p, c, e in zip(stprcs, cp, ep) if e > c))

            # if i == int(num_dt / 10) or i == num_dt - int(num_dt / 10) \
            #         or i == int(num_dt / 2):
            #     # print(list(zip(stprcs, cp, ep)))
            #     plt.title("Grid Time = %.3f" % t)
            #     plt.plot(stprcs, cp, 'r', stprcs, ep, 'b')
            #     plt.show()

        # plt.plot([t * dt for t in range(1, num_dt + 1)], ex_boundary[::-1])
        # plt.title("Grid Boundary")
        # plt.savefig(str(Path.home()) + "/Downloads/GridBoundary.png")

        knots, coeffs, order = splrep(prices, res[0, :], k=3)
        spline_func = BSpline(knots, coeffs, order)
        disc = np.exp(-self.ir(dt))
        m, v = get_future_price_mean_var(self.spot_price, 0., dt,
                                         self.lognormal, self.ir, self.isig)
        stdev = np.sqrt(v)
        norm_dist = norm(loc=m, scale=stdev)

        # noinspection PyShadowingNames
        def integr_func0(x: float,
                         spline_func=spline_func,
                         norm_dist=norm_dist) -> float:
            val = np.exp(x) if self.lognormal else x
            return max(spline_func(val), 0.) * norm_dist.pdf(x)

        low, high = (m - 4 * stdev, m + 4 * stdev)
        disc_exp_payoff = disc * trapz(
            np.vectorize(integr_func0)(np.linspace(low, high, sample_points)),
            dx=(high - low) /
            (sample_points - 1)) / (norm_dist.cdf(high) - norm_dist.cdf(low))
        return max(self.payoff(0., self.spot_price), disc_exp_payoff)
# Construct a cubic b-spline:

from scipy.interpolate import BSpline
b = BSpline.basis_element([0, 1, 2, 3, 4])
k = b.k
b.t[k:-k]
# array([ 0.,  1.,  2.,  3.,  4.])
k
# 3

# Construct a second order b-spline on ``[0, 1, 1, 2]``, and compare
# to its explicit form:

t = [-1, 0, 1, 1, 2]
b = BSpline.basis_element(t[1:])


def f(x):
    return np.where(x < 1, x * x, (2. - x)**2)


import matplotlib.pyplot as plt
fig, ax = plt.subplots()
x = np.linspace(0, 2, 51)
ax.plot(x, b(x), 'g', lw=3)
ax.plot(x, f(x), 'r', lw=8, alpha=0.4)
ax.grid(True)
plt.show()
コード例 #26
0
 def evaluate(self, x_values_seq: Iterable[X]) -> np.ndarray:
     spline_func: Callable[[Sequence[float]], np.ndarray] = BSpline(
         self.knots, self.coeffs, self.degree
     )
     return spline_func(self.get_feature_values(x_values_seq))
コード例 #27
0
    return sigma_bar

def Delta_Sigma_model(m, c, r_mpc, Lz):
    return (Sigma_nfw_bar(m, c, r_mpc) - Sigma_nfw(m, c, r_mpc)) + (Sigma_nfw_bar(m, c, r_mpc) - Sigma_nfw(m, c, r_mpc)) * Sigma_nfw(m, c, r_mpc) * Lz


################################ off-centered term ##################################
perfil_off_sigma = []
Profs = Table.read("Profiles_lookup_table.fits")
xi = np.logspace(-1,1,len(Profs))
radial_x_bins = np.logspace(-2,2,500)
for i in range(len(Profs)):
    perfil_off_sigma.append(list(Profs[i]))
perfil_off_sigma= np.array(perfil_off_sigma)
print("Lookup table loaded!")
Delta_Sigma_NFW_off_x = BSpline(xi,radial_x_bins,perfil_off_sigma,s=0)
del perfil_off_sigma, xi, radial_x_bins


def sigma_off(m, c, s_off, r_mpc):
    r = np.power(((m * 3) / (800 * np.pi * rho_c_cl_mpc)), (1 / 3))
    r_s = r/c
    fact = 2*r_s*delta_c(c)*rho_c_cl_mpc
    xi = s_off/r_s
    X = r_mpc/r_s

    return fact*Delta_Sigma_NFW_off_x(xi, X)[0]


############################### likelihood ################################
if stack == True:
コード例 #28
0
def _make_random_spline(n=35, k=3):
    np.random.seed(123)
    t = np.sort(np.random.random(n + k + 1))
    c = np.random.random(n)
    return BSpline.construct_fast(t, c, k)
コード例 #29
0
ファイル: astromodels.py プロジェクト: DBerke/DRAGONS
def table_to_model(table):
    """
    Convert a Table instance, as created by model_to_table(), back into a
    callable function. Some backward compatibility has been introduced, so
    the domain can be specified in the Table, rather than the meta, and a
    Chebyshev1D model will be assumed if not found in the meta.

    Parameters
    ----------
    table : `~astropy.table.Table` or `~astropy.table.Row`
        Table describing the model

    Returns
    -------
    callable : either a `~astropy.modeling.core.Model` or a
               `~scipy.interpolate.BSpline` instance
    """
    meta = table.meta["header"]
    model_class = meta.get("MODEL", "Chebyshev1D")
    try:
        cls = getattr(models, model_class)
    except:  # it's a spline
        k = int(model_class[-1])
        knots, coeffs = table["knots"], table["coefficients"]
        model = BSpline(knots.data, coeffs.data, k)
        setattr(model, "meta", {"xunit": knots.unit, "yunit": coeffs.unit})
    else:
        if isinstance(table, Table):
            if len(table) != 1:
                raise ValueError(
                    "Can only convert single-row Tables to a model")
            else:
                table = table[0]  # now a Row
        ndim = int(model_class[-2])
        table_dict = dict(zip(table.colnames, table))
        if ndim == 1:
            r = re.compile("c([0-9]+)")
            param_names = list(filter(r.match, table.colnames))
            # Handle cases (e.g., APERTURE tables) where the number of
            # columns must be the same for all rows but the degree of
            # polynomial might be different
            degree = max([
                int(r.match(p).groups()[0]) for p in param_names
                if table[p] is not np.ma.masked
            ])
            domain = [
                table_dict.get("domain_start", meta.get("DOMAIN_START", 0)),
                table_dict.get("domain_end", meta.get("DOMAIN_END", 1))
            ]
            model = cls(degree=degree, domain=domain)
        elif ndim == 2:
            r = re.compile("c([0-9]+)_([0-9]+)")
            param_names = list(filter(r.match, table.colnames))
            xdegree = max([int(r.match(p).groups()[0]) for p in param_names])
            ydegree = max([int(r.match(p).groups()[1]) for p in param_names])
            xdomain = [
                table_dict.get("xdomain_start", meta.get("XDOMAIN_START", 0)),
                table_dict.get("xdomain_end", meta.get("XDOMAIN_END", 1))
            ]
            ydomain = [
                table_dict.get("ydomain_start", meta.get("YDOMAIN_START", 0)),
                table_dict.get("ydomain_end", meta.get("YDOMAIN_END", 1))
            ]
            model = cls(x_degree=xdegree,
                        y_degree=ydegree,
                        x_domain=xdomain,
                        y_domain=ydomain)
        else:
            raise ValueError(
                f"Invalid dimensionality of model '{model_class}'")

        for k, v in table_dict.items():
            if k in param_names:
                setattr(model, k, v)
            elif not ("domain" in k or k in ("ndim", "degree")):
                # other columns go in the meta
                model.meta[k] = v
        for unit in ("xunit", "yunit", "zunit"):
            value = meta.get(unit.upper())
            if value:
                model.meta[unit] = u.Unit(value)

    return model
コード例 #30
0
 def test_nan(self):
     # nan in, nan out.
     b = BSpline.basis_element([0, 1, 1, 2])
     assert_(np.isnan(b(np.nan)))
コード例 #31
0
def bump_cbs(x):
    y = np.zeros_like(x)
    index = (-2 < x) & (x < 2)
    y[index] = BSpline.basis_element([-2, -1, 0, 1, 2],
                                     extrapolate=False)(x[index])
    return y
コード例 #32
0
ファイル: test_bsplines.py プロジェクト: Brucechen13/scipy
 def test_nan(self):
     # nan in, nan out.
     b = BSpline.basis_element([0, 1, 1, 2])
     assert_(np.isnan(b(np.nan)))
コード例 #33
0
def d_bump_cbs(x):
    dy = np.zeros_like(x)
    index = (-2 < x) & (x < 2)
    dy[index] = BSpline.basis_element([-2, -1, 0, 1, 2],
                                      extrapolate=False).derivative()(x[index])
    return dy
コード例 #34
0
def CrystalBragg_plot_data_vs_fit(xi,
                                  xj,
                                  bragg,
                                  lamb,
                                  phi,
                                  data,
                                  mask=None,
                                  lambfit=None,
                                  phifit=None,
                                  spect1d=None,
                                  dfit1d=None,
                                  dfit2d=None,
                                  lambfitbins=None,
                                  cmap=None,
                                  vmin=None,
                                  vmax=None,
                                  fs=None,
                                  dmargin=None,
                                  angunits='deg',
                                  dmoments=None):

    # Check inputs
    # ------------

    if fs is None:
        fs = (16, 9)
    if cmap is None:
        cmap = plt.cm.viridis
    if dmargin is None:
        dmargin = {
            'left': 0.03,
            'right': 0.99,
            'bottom': 0.05,
            'top': 0.92,
            'wspace': None,
            'hspace': 0.4
        }
    assert angunits in ['deg', 'rad']
    if angunits == 'deg':
        bragg = bragg * 180. / np.pi
        phi = phi * 180. / np.pi
        phifit = phifit * 180. / np.pi

    # pre-compute
    # ------------

    # extent
    extent = (xi.min(), xi.max(), xj.min(), xj.max())
    extent2 = (lambfit.min(), lambfit.max(), phifit.min(), phifit.max())

    ind = np.digitize(lamb[mask].ravel(), lambfitbins)
    spect2dmean = np.zeros((lambfitbins.size + 1, ))
    for ii in range(lambfitbins.size + 1):
        indi = ind == ii
        if np.any(indi):
            spect2dmean[ii] = np.nanmean(dfit2d['fit'][indi])

    # Plot
    # ------------

    fig = plt.figure(figsize=fs)
    gs = gridspec.GridSpec(4, 6, **dmargin)
    ax0 = fig.add_subplot(gs[:3, 0], aspect='equal', adjustable='datalim')
    ax1 = fig.add_subplot(gs[:3, 1],
                          aspect='equal',
                          adjustable='datalim',
                          sharex=ax0,
                          sharey=ax0)
    axs1 = fig.add_subplot(gs[3, 1], sharex=ax0)
    ax2 = fig.add_subplot(gs[:3, 2])
    axs2 = fig.add_subplot(gs[3, 2], sharex=ax2, sharey=axs1)
    ax3 = fig.add_subplot(gs[:3, 3], sharex=ax2, sharey=ax2)
    axs3 = fig.add_subplot(gs[3, 3], sharex=ax2)  #, sharey=axs1)
    ax4 = fig.add_subplot(gs[:3, 4], sharex=ax2, sharey=ax2)
    axs4 = fig.add_subplot(gs[3, 3], sharex=ax2)  #, sharey=axs1)
    ax5 = fig.add_subplot(gs[:3, 5], sharey=ax2)

    ax0.set_title('Coordinates transform')
    ax1.set_title('Camera image')
    ax2.set_title('Camera image transformed')
    ax3.set_title('2d spectral fit')
    ax4.set_title('2d error')
    ax5.set_title('Moments')

    ax4.set_xlabel('%s' % angunits)
    ax0.set_ylabel(r'incidence angle ($deg$)')

    ax0.contour(xi, xj, bragg, 10, cmap=cmap)
    ax0.contour(xi, xj, phi, 10, cmap=cmap, ls='--')
    ax1.imshow(data,
               extent=extent,
               aspect='equal',
               origin='lower',
               vmin=vmin,
               vmax=vmax)
    axs1.plot(xi, np.nanmean(data, axis=0), c='k', ls='-')
    ax2.scatter(lamb.ravel(),
                phi.ravel(),
                c=data.ravel(),
                s=1,
                marker='s',
                edgecolors='None',
                cmap=cmap,
                vmin=vmin,
                vmax=vmax)
    axs2.plot(lambfit, spect1d, c='k', ls='None', marker='.', ms=4)
    axs2.plot(lambfit, dfit1d['fit'].ravel(), c='r', ls='-', label='fit')
    for ll in dfit1d['lamb0']:
        axs2.axvline(ll, c='k', ls='--')

    # dfit2d
    ax3.scatter(lamb[mask].ravel(),
                phi[mask].ravel(),
                c=dfit2d['fit'],
                s=1,
                marker='s',
                edgecolors='None',
                cmap=cmap,
                vmin=vmin,
                vmax=vmax)
    axs3.plot(lambfit, spect1d, c='k', ls='None', marker='.')
    axs3.plot(lambfit, spect2dmean, c='b', ls='-')
    err = dfit2d['fit'] - data[mask].ravel()
    errmax = np.max(np.abs(err))
    ax4.scatter(lamb[mask].ravel(),
                phi[mask].ravel(),
                c=err,
                s=1,
                marker='s',
                edgecolors='None',
                cmap=plt.cm.seismic,
                vmin=-errmax,
                vmax=errmax)

    # Moments
    if dmoments is not None:
        if dmoments.get('ratio') is not None:
            ind = dmoments['ratio'].get('ind')
            if ind is None:
                ind = [
                    np.argmin(np.abs(dfit2d['lamb0'] - ll))
                    for ll in dmoments['ratio']['lamb']
                ]
            for indi in ind:
                axs3.axvline(dfit2d['lamb0'][indi], c='k', ls='--')
            amp0 = BSpline(dfit2d['knots'], dfit2d['camp'][ind[0], :],
                           dfit2d['deg'])(phifit)
            amp1 = BSpline(dfit2d['knots'], dfit2d['camp'][ind[1], :],
                           dfit2d['deg'])(phifit)
            lab = dmoments['ratio']['name'] + '{} / {}'
            ratio = (amp0 / amp1) / np.nanmax(amp0 / amp1)
            ax5.plot(amp0 / amp1, phifit, ls='-', c='k', label=lab)
        if dmoments.get('sigma') is not None:
            ind = dmoments['sigma'].get('ind')
            if ind is None:
                ind = np.argmin(
                    np.abs(dfit2d['lamb0'] - dmoments['sigma']['lamb']))
            axs3.axvline(dfit2d['lamb0'][ind], c='b', ls='--')
            sigma = BSpline(dfit2d['knots'], dfit2d['csigma'][ind, :],
                            dfit2d['deg'])(phifit)
            lab = r'$\sigma({} A)$'.format(
                np.round(dfit2d['lamb0'][ind] * 1.e10), 4)
            ax5.plot(sigma / np.nanmax(sigma),
                     phifit,
                     ls='-',
                     c='b',
                     label=lab)

    ax2.set_xlim(extent2[0], extent2[1])
    ax2.set_ylim(extent2[2], extent2[3])
    return [ax0, ax1]
コード例 #35
0
ファイル: spline_fxns.py プロジェクト: neurodata/brainlit
def speed(
    x: np.ndarray,
    t: np.ndarray,
    c: np.ndarray,
    k: np.integer,
    aux_outputs: bool = False,
) -> np.ndarray:
    r"""Compute the speed of a B-Spline.

    The speed is the norm of the first derivative of the B-Spline.

    Arguments:
        x: A `1xL` array of parameter values where to evaluate the curve.
            It contains the parameter values where the speed of the B-Spline will
            be evaluated. It is required to be non-empty, one-dimensional, and
            real-valued.
        t: A `1xm` array representing the knots of the B-spline.
            It is required to be a non-empty, non-decreasing, and one-dimensional
            sequence of real-valued elements. For a B-Spline of degree `k`, at least
            `2k + 1` knots are required.
        c: A `dxn` array representing the coefficients/control points of the B-spline.
            Given `n` real-valued, `d`-dimensional points ::math::`x_k = (x_k(1),...,x_k(d))`,
            `c` is the non-empty matrix which columns are ::math::`x_1^T,...,x_N^T`. For a
            B-Spline of order `k`, `n` cannot be less than `m-k-1`.
        k: A non-negative integer representing the degree of the B-spline.

    Returns:
        speed: A `1xL` array containing the speed of the B-Spline evaluated at `x`

    References:
    .. [1] Kouba, Parametric Equations.
        https://www.math.ucdavis.edu/~kouba/Math21BHWDIRECTORY/ArcLength.pdf
    """

    # convert arguments to desired type
    x = np.ascontiguousarray(x)
    t = np.ascontiguousarray(t)
    c = np.ascontiguousarray(c)
    k = operator.index(k)

    if k < 0:
        raise ValueError("The order of the spline must be non-negative")

    check_type(t, np.ndarray)
    t_dim = t.ndim
    if t_dim != 1:
        raise ValueError("t must be one-dimensional")
    if len(t) == 0:
        raise ValueError("t must be non-empty")
    check_iterable_type(t, (np.integer, np.float))
    if (np.diff(t) < 0).any():
        raise ValueError("t must be a non-decreasing sequence")

    check_type(c, np.ndarray)
    c_dim = c.ndim
    if c_dim > 2:
        raise ValueError("c must be 2D max")
    if len(c.flatten()) == 0:
        raise ValueError("c must be non-empty")
    if c_dim == 1:
        check_iterable_type(c, (np.integer, np.float))
        # expand dims so that we can cycle through a single dimension
        c = np.expand_dims(c, axis=0)
    if c_dim == 2:
        for d in c:
            check_iterable_type(d, (np.integer, np.float))
    n_dim = len(c)

    check_type(x, np.ndarray)
    x_dim = x.ndim
    if x_dim != 1:
        raise ValueError("x must be one-dimensional")
    if len(x) == 0:
        raise ValueError("x must be non-empty")
    check_iterable_type(x, (np.integer, np.float))
    L = len(x)

    # evaluate first and second derivatives
    # deriv, dderiv are (d, L) arrays
    deriv = np.empty((n_dim, L))
    for i, dim in enumerate(c):
        spl = BSpline(t, dim, k)
        deriv[i, :] = spl.derivative(nu=1)(x) if k - 1 >= 0 else np.zeros(L)
    # tranpose deriv
    deriv = deriv.T

    speed = np.linalg.norm(deriv, axis=1)
    if aux_outputs == False:
        return speed
    else:
        return speed, deriv
コード例 #36
0
ファイル: test_bsplines.py プロジェクト: Brucechen13/scipy
def _make_random_spline(n=35, k=3):
    np.random.seed(123)
    t = np.sort(np.random.random(n+k+1))
    c = np.random.random(n)
    return BSpline.construct_fast(t, c, k)
コード例 #37
0
import numpy as np
import scipy.special
import pickle
from scipy.interpolate import BSpline
import atmosphere as atm
import os

dir_path = os.path.dirname(os.path.realpath(__file__))

with open(os.path.join(dir_path, "pickle/geo_rcut_b_splines.pickle"),
          "r") as fin:
    spl_rcut_geo_params, spl_b_geo_params = pickle.load(fin)
    t, c, k = spl_rcut_geo_params
    t = np.append(np.append(np.ones(k) * t[0], t), np.ones(k) * t[-1])
    spl_rcut_geo = BSpline(t, c, k)

    t, c, k = spl_b_geo_params
    t = np.append(np.append(np.ones(k) * t[0], t), np.ones(k) * t[-1])
    spl_b_geo = BSpline(t, c, k)

with open(os.path.join(dir_path, "pickle/geo_sigmaR_spl.pickle"), "r") as fin:
    data = pickle.load(fin)
    t, c, k = data['geo_R_0m']
    t = np.append(np.append(np.ones(k) * t[0], t), np.ones(k) * t[-1])
    spl_geo_R_0m = BSpline(t, c, k)

    t, c, k = data['geo_R_1564m']
    t = np.append(np.append(np.ones(k) * t[0], t), np.ones(k) * t[-1])
    spl_geo_R_1564m = BSpline(t, c, k)

    t, c, k = data['geo_sigma_0m']