示例#1
0
def fit_polynomial_nplstsq(X, Y, degree, x_pad=10, X_unknown=None):
    """
    Fits a polynomial of degree with variables X over Y

    :param X: variables. n dimensional array, with row-wise instance
    :param Y: value to fit to. n dimensional array, with row wise instances
    :param degree: degree of polynomial. list, dim(X) = len(degree)
    :param x_pad: padding around X for generated data for the model
    :param X_unknown: unknown X for which y is to be predicted, if not None.
    :return: W, coefficients.
    :return: (x, y): (x, y) values of the generated data for the model.
    """
    # TODO: tests for truly n-dimensional X
    # construct the appropriate Vandermonde matrix
    V = pol.polyvander(X, degree)

    # get coefficients by least squares
    coeff = la.lstsq(V, Y)[0]  # discarding other information

    # construct some fitting data
    x = np.linspace(X.min() - x_pad, X.max() + x_pad, 100)
    x_ = pol.polyvander(x, degree)

    y = np.dot(x_, coeff)

    # calculate y for unknown X, if provided
    if X_unknown is not None:
        V_unknown = pol.polyvander(X_unknown, degree)
        y_pred = np.dot(V_unknown, coeff)

        return coeff, (x, y), (X_unknown, y_pred)
    else:
        return coeff, (x, y)
def fitting(x, y, deg):
    x = np.asarray(x) + 0.0
    y = np.asarray(y) + 0.0
    deg = np.asarray(deg)

    if deg.ndim == 0:
        lm = deg
        order = lm + 1
        van = poly.polyvander(x, lm)
 
    else:
        deg = np.sort(deg)
        lm = deg[-1]
        order = len(deg)
        van = poly.polyvander(x, lm)[..., deg]

    ls = van.T
    rs = y.T
    scl = np.sqrt(np.square(ls).sum(1))
    scl[scl == 0] = 1
    
    coeff, res, rank, s = la.lstsq(ls.T / scl, rs.T, rcond=len(x)*np.finfo(x.dtype).eps)
    coeff = (coeff.T/scl).T
    
    if deg.ndim > 0:
        if coeff.ndim == 2:
            exp = np.zeros((lm + 1, coeff.shape[1]), dtype=coeff.dtype)

        else:
            exp = np.zeros(lm + 1, dtype=coeff.dtype)
        
        exp[deg] = coeff
        coeff = exp

    return coeff
示例#3
0
文件: Maps.py 项目: sdevriese/simpeg
    def deriv(self, m, v=None):
        alpha = self.slope
        sig1, sig2, c = m[0], m[1], m[2:]
        if self.logSigma:
            sig1, sig2 = np.exp(sig1), np.exp(sig2)

        # 2D
        if self.mesh.dim == 2:
            X = self.mesh.gridCC[self.actInd, 0]
            Y = self.mesh.gridCC[self.actInd, 1]

            if self.normal == 'X':
                f = polynomial.polyval(Y, c) - X
                V = polynomial.polyvander(Y, len(c) - 1)
            elif self.normal == 'Y':
                f = polynomial.polyval(X, c) - Y
                V = polynomial.polyvander(X, len(c) - 1)
            else:
                raise (Exception("Input for normal = X or Y or Z"))

        # 3D
        elif self.mesh.dim == 3:
            X = self.mesh.gridCC[self.actInd, 0]
            Y = self.mesh.gridCC[self.actInd, 1]
            Z = self.mesh.gridCC[self.actInd, 2]

            if self.normal == 'X':
                f = (polynomial.polyval2d(
                    Y, Z, c.reshape(
                        (self.order[0] + 1, self.order[1] + 1))) - X)
                V = polynomial.polyvander2d(Y, Z, self.order)
            elif self.normal == 'Y':
                f = (polynomial.polyval2d(
                    X, Z, c.reshape(
                        (self.order[0] + 1, self.order[1] + 1))) - Y)
                V = polynomial.polyvander2d(X, Z, self.order)
            elif self.normal == 'Z':
                f = (polynomial.polyval2d(
                    X, Y, c.reshape(
                        (self.order[0] + 1, self.order[1] + 1))) - Z)
                V = polynomial.polyvander2d(X, Y, self.order)
            else:
                raise (Exception("Input for normal = X or Y or Z"))

        if self.logSigma:
            g1 = -(np.arctan(alpha * f) / np.pi + 0.5) * sig1 + sig1
            g2 = (np.arctan(alpha * f) / np.pi + 0.5) * sig2
        else:
            g1 = -(np.arctan(alpha * f) / np.pi + 0.5) + 1.0
            g2 = (np.arctan(alpha * f) / np.pi + 0.5)

        g3 = Utils.sdiag(alpha * (sig2 - sig1) / (1. +
                                                  (alpha * f)**2) / np.pi) * V

        if v is not None:
            return sp.csr_matrix(np.c_[g1, g2, g3]) * v
        return sp.csr_matrix(np.c_[g1, g2, g3])
示例#4
0
    def deriv(self, m, v=None):
        alpha = self.slope
        sig1, sig2, c = m[0], m[1], m[2:]
        if self.logSigma:
            sig1, sig2 = np.exp(sig1), np.exp(sig2)

        # 2D
        if self.mesh.dim == 2:
            X = self.mesh.gridCC[self.actInd, 0]
            Y = self.mesh.gridCC[self.actInd, 1]

            if self.normal == 'X':
                f = polynomial.polyval(Y, c) - X
                V = polynomial.polyvander(Y, len(c)-1)
            elif self.normal == 'Y':
                f = polynomial.polyval(X, c) - Y
                V = polynomial.polyvander(X, len(c)-1)
            else:
                raise(Exception("Input for normal = X or Y or Z"))

        # 3D
        elif self.mesh.dim == 3:
            X = self.mesh.gridCC[self.actInd, 0]
            Y = self.mesh.gridCC[self.actInd, 1]
            Z = self.mesh.gridCC[self.actInd, 2]

            if self.normal == 'X':
                f = (polynomial.polyval2d(Y, Z, c.reshape((self.order[0]+1,
                     self.order[1]+1))) - X)
                V = polynomial.polyvander2d(Y, Z, self.order)
            elif self.normal == 'Y':
                f = (polynomial.polyval2d(X, Z, c.reshape((self.order[0]+1,
                     self.order[1]+1))) - Y)
                V = polynomial.polyvander2d(X, Z, self.order)
            elif self.normal == 'Z':
                f = (polynomial.polyval2d(X, Y, c.reshape((self.order[0]+1,
                     self.order[1]+1))) - Z)
                V = polynomial.polyvander2d(X, Y, self.order)
            else:
                raise(Exception("Input for normal = X or Y or Z"))

        if self.logSigma:
            g1 = -(np.arctan(alpha*f)/np.pi + 0.5)*sig1 + sig1
            g2 = (np.arctan(alpha*f)/np.pi + 0.5)*sig2
        else:
            g1 = -(np.arctan(alpha*f)/np.pi + 0.5) + 1.0
            g2 = (np.arctan(alpha*f)/np.pi + 0.5)

        g3 = Utils.sdiag(alpha*(sig2-sig1)/(1.+(alpha*f)**2)/np.pi)*V

        if v is not None:
            return sp.csr_matrix(np.c_[g1, g2, g3]) * v
        return sp.csr_matrix(np.c_[g1, g2, g3])
示例#5
0
 def test_polyvander(self) :
     # check for 1d x
     x = np.arange(3)
     v = poly.polyvander(x, 3)
     assert_(v.shape == (3,4))
     for i in range(4) :
         coef = [0]*i + [1]
         assert_almost_equal(v[...,i], poly.polyval(x, coef))
     # check for 2d x
     x = np.array([[1,2],[3,4],[5,6]])
     v = poly.polyvander(x, 3)
     assert_(v.shape == (3,2,4))
     for i in range(4) :
         coef = [0]*i + [1]
         assert_almost_equal(v[...,i], poly.polyval(x, coef))
示例#6
0
 def test_polyvander(self) :
     # check for 1d x
     x = np.arange(3)
     v = poly.polyvander(x, 3)
     assert_(v.shape == (3,4))
     for i in range(4) :
         coef = [0]*i + [1]
         assert_almost_equal(v[...,i], poly.polyval(x, coef))
     # check for 2d x
     x = np.array([[1,2],[3,4],[5,6]])
     v = poly.polyvander(x, 3)
     assert_(v.shape == (3,2,4))
     for i in range(4) :
         coef = [0]*i + [1]
         assert_almost_equal(v[...,i], poly.polyval(x, coef))
示例#7
0
 def shoot(self, J_c, rho):
     '''
     Estimate the harmonic frequencies with the passed on a priori.
     -----
     Arguments:
         J_c: int
             Number of harmonics in the designated band.
         rho: float number or 1D float array
             Power spectral density of the noise in the designated band.
     -----
     Returns:
         fhat: float 1D array
             Estimate of the harmonic frequencies in the designated band in ascending order.
         val: float 1D array
             Eigenvalues of the estimate of the harmonic term in descending order.
     '''
     Z = polyvander(
         np.exp(2j * np.pi * self.freq[self.idx_l:self.idx_u + 1]),
         J_c + 1).T[1:]
     Q = null_space(Z)
     D = np.diag(self._N * rho * np.sum(np.abs(Q)**2, axis=1))
     YQ = self.y_k[:, self.idx_l:self.idx_u + 1,
                   np.newaxis] * Q[np.newaxis, :, :]
     YPYh = np.mean(YQ @ YQ.conj().transpose(0, 2, 1), axis=0)
     val, Vec = eigh(
         YPYh - D
     )  # eigenvalues in ascending order and the corresponding eigenvectors
     C_c = Z @ Vec[:, :-J_c - 1:-1]
     A_c = self._tls(C_c[:-1], C_c[1:])
     fhat = np.angle(eigvals(A_c)) / (2 * np.pi)
     return np.sort(fhat), val[::-1]
示例#8
0
def fit_polynomial_bayesian_skl(X, Y, degree,
                                lambda_shape=1.e-6, lambda_invscale=1.e-6,
                                padding=10, n=100,
                                X_unknown=None):
    X_v = pol.polyvander(X, degree)

    clf = BayesianRidge(lambda_1=lambda_shape, lambda_2=lambda_invscale)
    clf.fit(X_v, Y)

    coeff = np.copy(clf.coef_)

    # there some weird intercept thing
    # since the Vandermonde matrix has 1 at the beginning, just add this
    # intercept to the first coeff
    coeff[0] += clf.intercept_

    ret_ = [coeff]

    # generate the line
    x = np.linspace(X.min()-padding, X.max()+padding, n)
    x_v = pol.polyvander(x, degree)

    # using the provided predict method
    y_1 = clf.predict(x_v)

    # using np.dot() with coeff
    y_2 = np.dot(x_v, coeff)

    ret_.append(((x, y_1), (x, y_2)))

    if X_unknown is not None:
        xu_v = pol.polyvander(X_unknown, degree)

        # using the predict method
        yu_1 = clf.predict(xu_v)

        # using np.dot() with coeff
        yu_2 = np.dot(xu_v, coeff)

        ret_.append(((X_unknown, yu_1), (X_unknown, yu_2)))

    return ret_
示例#9
0
def newtVander5d(locations, deg):
    # stolen straight from legvander3d and modified
    n=locations.shape[1]
    ideg = [int(d) for d in deg]
    is_valid = [id == d and id >= 0 for id, d in zip(ideg, deg)]
    if is_valid != [True,]*n:
        raise ValueError("degrees must be non-negative integers")

    vi=[poly.polyvander(locations[:,i], deg[i]) for i in range(n)]
    v = vi[0][..., None, None, None, None]*vi[1][..., None,:,None, None, None]*vi[2][..., None, None,:,None,None]*vi[3][...,None,None,None,:,None]*vi[4][...,None,None,None,None,:]
    return v.reshape(v.shape[:-n] + (-1,))
示例#10
0
def genericNewtVander(locations, deg):
    # stolen straight from legvander3d and modified
    n=locations.shape[1]
    ideg = [int(d) for d in deg]
    is_valid = [id == d and id >= 0 for id, d in zip(ideg, deg)]
    if is_valid != [True,]*n:
        raise ValueError("degrees must be non-negative integers")

    vi=[poly.polyvander(locations[:,i], deg[i]) for i in range(n)]
    indexingTuples=[]
    raise NotImplementedError
    # v = vx[..., None, None]*vy[..., None,:, None]*vz[..., None, None,:]
    return v.reshape(v.shape[:-n] + (-1,))
示例#11
0
def error_vandermonde(x, residuals=None, rank=None, *args, **kwargs):
    '''
    Function to generate 1) error estimation on parameters determined by the function
    coef, [residuals, rank, singular_values, rcond] = numpy.polynomial.polynomial.polyfit()
    2) covariance matrix associated.
    
    The Vandermonde matrix generated by vand = polyvander(x,rank)
    The Covariance matrix cov is obtained via the vandermonde matrix V 
    via this numerical steps:
        1) compute np.dot(V.T,V).inv 
        2) and multiply it by the residual/(nb of data points - nb of coefficients)
    The error parameters are then computed via : error_parameters = np.sqrt(np.diag(cov))
    
    Script written by Timothée Chauviré (https://github.com/TChauvire/EPR_ESR_Suite/), 09/09/2020

    Parameters
    ----------
    residuals : first value generated by polyfit in the list of the second output
        DESCRIPTION. float number. The default is None.
    vandermonde : Vandermonde matrix generated by vand = polyvander(x,rank)
        DESCRIPTION. The default is None.
    
    rank : necessary for multidimensional array
        DESCRIPTION. The default is None.
    
    Raises
    ------
    ValueError if rank is higher than the number of points
        "the number of data points must exceed order "
                                 "to scale the covariance matrix".

    Returns
    -------
    error_parameters : error uncertainties estimated on the parameters ordered 
    from low to high polynomial order.
    By example for a linear model f(x)=a0+a1*x : 
        error_parameters[0] = constant parameters a0, 
        error_parameters[1] = slope a1.
        
    cov : covariance estimated for the Least-squares fit of a polynomial to data
        return a Matrix(Rank by rank)
    '''
    if len(x) <= rank:
        raise ValueError("the number of data points must exceed order "
                         "to scale the covariance matrix")
    else:
        v = polyvander(x, rank)  # generate the vandermonde matrix
        cov = residuals / (len(x) - rank) * np.linalg.inv(np.dot(v.T, v))
        error_parameters = np.sqrt(np.diag(cov))

    return error_parameters, cov
示例#12
0
文件: math.py 项目: bedlamzd/vkr
def mls2d(x, y, p=0, support_radius=1, deg: int = 1):
    """
    двумерный moving least squares

    :param x: данные по x
    :param y: данные по y соответствующие x
    :param p: опорная точка
    :param support_radius: радиус влияния
    :param deg: степень полинома
    :return: коэффициенты полинома
    """
    B = polyvander(x, deg)
    rj = np.sqrt(np.power(x - p, 2)) / support_radius
    W = np.diag(np.where(rj <= 1, 1 - 6 * rj ** 2 + 8 * rj ** 3 - 3 * rj ** 4, 0))
    c = inv(B.T @ W @ B) @ B.T @ W @ y
    return c
示例#13
0
	def V(self, Y):
		""" Build a generalized multivariate Vandermonde matrix for this basis

		"""
		assert Y.shape[1] == self.n
		V_coordinate = [polyvander(Y[:,k], self.p) for k in range(self.n)]
		V = np.zeros((Y.shape[0], self.N))

		for j, terms in enumerate(self.basis_terms):
			for alpha, coeff in terms:
				# determine the coefficients on the monomial polynomial
				# Compute the product of the 
				V_col = np.ones(V.shape[0])
				for k in range(0, self.n):
					V_col *= V_coordinate[k][:,alpha[k]]
				V[:,j] += coeff * V_col
		return V
示例#14
0
	def DV(self, Y):
		""" Build a generalized multivariate Vandermonde matrix for this basis

		"""
		M = Y.shape[0]
		assert Y.shape[1] == self.n
		V_coordinate = [polyvander(Y[:,k], self.p) for k in range(self.n)]
		DV = np.zeros((M, self.N, self.n))

		for k in range(self.n):
			for j, terms in enumerate(self.basis_terms_der[k]):
				for alpha, coeff in terms:
					# determine the coefficients on the monomial polynomial
					# Compute the product of the 
					V_col = np.ones(M)
					for i in range(self.n):
						V_col *= V_coordinate[i][:,alpha[i]]
					DV[:,j,k] += coeff * V_col
		return DV
示例#15
0
def fit_polynomial_bayesian(x, y, degree,
                            sig2=None, sig2_0=3.0,
                            use_pinv=False, use_lsmr=False,
                            padding=10, n=100, get_pdf=True,
                            X_unknown=None):
    X = pol.polyvander(x, degree)

    if sig2 is None:
        sig2 = np.var(y)

    Xt = X.T

    prec = (1/sig2) * (np.dot(Xt, X)) + (1/sig2_0) * np.identity(degree + 1)

    # different approaches to inverse calculation
    # TODO: @motjuste: which one is correct
    prec_inv = la.pinv(prec) if use_pinv else la.inv(prec)

    mu = (1/sig2) * np.dot(prec_inv, np.dot(Xt, y))

    # FIXME: @motjuste: choose the one righteous approach
    if use_lsmr:
        from scipy.sparse.linalg import lsmr

        coeff = lsmr(X, y, damp=(sig2/sig2_0))[0]
    else:
        if use_pinv:
            coeff = la.pinv(np.dot(Xt, X) + (sig2/sig2_0) * np.identity(
                    degree + 1))
        else:
            coeff = la.inv(np.dot(Xt, X) + (sig2/sig2_0) * np.identity(
                    degree + 1))

        coeff = np.dot(coeff, np.dot(Xt, y))

    ret_ = [coeff]

    # generate the line
    x = np.linspace(x.min()-padding, x.max()+padding, n)
    x_v = pol.polyvander(x, degree)

    # the mean of the posterior of y is the best prediction
    y_1 = np.dot(x_v, mu)

    # using np.dot() with coeff
    y_2 = np.dot(x_v, coeff)

    ret_.append(((x, y_1), (x, y_2)))

    if X_unknown is not None:
        xu_v = pol.polyvander(X_unknown, degree)

        # the mean of the posterior of y is the best prediction
        yu_1 = np.dot(xu_v, mu)

        # using np.dot() with coeff
        yu_2 = np.dot(xu_v, coeff)

        ret_.append(((X_unknown, yu_1), (X_unknown, yu_2)))

    if get_pdf:
        # http://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.stats.multivariate_normal.html
        data_min = np.array([x.min(), y.min()])
        data_max = np.array([x.max(), y.max()])
        xy_steps = (data_max - data_min) / (n + 2 * padding)
        # noinspection PyPep8
        xx, yy = np.mgrid[data_min[0] - padding: data_max[0] + padding:
        xy_steps[0],
               data_min[1] - padding: data_max[1] + padding:
               xy_steps[1]]

        x = xx[:, 0]
        y = yy[0, :]
        pdf = []

        for i, x_ in enumerate(x):
            x_v = pol.polyvander(x_, degree).T

            mean = np.dot(mu.T, x_v)
            var = sig2 + np.dot(x_v.T, np.dot(prec_inv, x_v))
            pdf_ = norm.pdf(y, mean, var).T
            pdf.append(pdf_)

        pdf = np.array(pdf)[:, :, 0]

        # use `plt.contourf(x, y, pdf)`
        ret_.append((xx, yy, pdf))

    return ret_
示例#16
0
文件: __init__.py 项目: Bankq/CS6998
def polyvander(x, deg):
    from numpy.polynomial.polynomial import polyvander
    return polyvander(x, deg)
示例#17
0
n = 10
x = np.linspace(xmin, xmax, 100)

# method 1:
# regression using ployfit
c = poly.polyfit(hgt, wgt, n)
y = poly.polyval(x, c)

axs1.set_title("ployfit")
axs1.plot(hgt, wgt, 'ko', x, y, 'r-')
axs1.set_xlim(xmin, xmax)
axs1.set_ylim(ymin, ymax)

# method 2:
# regression using the Vandermonde matrix and pinv
X = poly.polyvander(hgt, n)
c = np.dot(la.pinv(X), wgt)
y = np.dot(poly.polyvander(x, n), c)

axs2.set_title("Vandermonde and pinv")
axs2.plot(hgt, wgt, 'ko', x, y, 'r-')
axs2.set_xlim(xmin, xmax)
axs2.set_ylim(ymin, ymax)

# method 3:
# regression using the Vandermonde matrix and lstsq

X = poly.polyvander(hgt, n)
c = la.lstsq(X, wgt)[0]
y = np.dot(poly.polyvander(x, n), c)
示例#18
0
def error_vandermonde(x,residuals=None,rank=None,*args,**kwargs):
    '''
    Function to generate 1) error estimation on parameters determined by the function
    coef, [residuals, rank, singular_values, rcond] = numpy.polynomial.polynomial.polyfit()
    2) covariance matrix associated.
    
    The Vandermonde matrix generated by vand = polyvander(x,rank)
    The Covariance matrix cov is obtained via the vandermonde matrix V 
    via this numerical steps:
        1) compute np.dot(V.T,V).inv 
        2) and multiply it by the residual/(nb of data points - nb of coefficients)
    The error parameters are then computed via : error_parameters = np.sqrt(np.diag(cov))
    
    Script written by Timothée Chauviré (https://github.com/TChauvire/EPR_ESR_Suite/), 09/09/2020

    Parameters
    ----------
    residuals : first value generated by polyfit in the list of the second output
        DESCRIPTION. float number. The default is None.
    vandermonde : Vandermonde matrix generated by vand = polyvander(x,rank)
        DESCRIPTION. The default is None.
    
    rank : necessary for multidimensional array
        DESCRIPTION. The default is None.
    
    Raises
    ------
    ValueError if rank is higher than the number of points
        "the number of data points must exceed order "
                                 "to scale the covariance matrix".

    Returns
    -------
    error_parameters : error uncertainties estimated on the parameters ordered 
    from low to high polynomial order.
    By example for a linear model f(x)=a0+a1*x : 
        error_parameters[0] = constant parameters a0, 
        error_parameters[1] = slope a1.
        
    cov : covariance estimated for the Least-squares fit of a polynomial to data
        return a Matrix(Rank by rank)
    '''
    
    from numpy.polynomial.polynomial import polyvander

    if len(x) <= rank:
        raise ValueError("the number of data points must exceed order "
                                 "to scale the covariance matrix")
            # note, this used to be: fac = resids / (len(x) - order - 2.0)
            # it was deciced that the "- 2" (originally justified by "Bayesian
            # uncertainty analysis") is not was the user expects
            # (see gh-11196 and gh-11197)
    else:
        v = polyvander(x,rank) # generate the vandermonde matrix
        cov = residuals/(len(x) - rank)*np.linalg.inv(np.dot(v.T, v))
        error_parameters=np.sqrt(np.diag(cov))
    
    return error_parameters,cov


# Ideas To Do or test: 
#     r,p = scipy.stats.pearsonr(x, y)  # and other optional statistical test
#     reference https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html#scipy.stats.pearsonr
#     https://en.wikipedia.org/wiki/Pearson_correlation_coefficient
    
#     cov = np.cov(x,y)
    
# Code from scipy.optimize.curve_fit: (https://github.com/scipy/scipy/blob/v1.5.1/scipy/optimize/minpack.py#L532-L834)
# Do Moore-Penrose inverse discarding zero singular values.
#         _, s, VT = svd(res.jac, full_matrices=False)
#         threshold = np.finfo(float).eps * max(res.jac.shape) * s[0]
#         s = s[s > threshold]
#         VT = VT[:s.size]
#         pcov = np.dot(VT.T / s**2, VT)

# Check lmfit examples : 
#    https://github.com/lmfit/lmfit-py/blob/master/examples/example_fit_with_derivfunc.py

#     bayesian analysis in python
def trsf(x):
    return x / 100.


n = 10
x = np.linspace(xmin, xmax, 100)

# method 1:
# regression using ployfit
c = poly.polyfit(hgt, wgt, n)
y = poly.polyval(x, c)
plot_data_and_fit(hgt, wgt, x, y)

# method 2:
# regression using the Vandermonde matrix and pinv
X = poly.polyvander(hgt, n)
c = np.dot(la.pinv(X), wgt)
y = np.dot(poly.polyvander(x, n), c)
plot_data_and_fit(hgt, wgt, x, y)

# method 3:
# regression using the Vandermonde matrix and lstsq
X = poly.polyvander(hgt, n)
c = la.lstsq(X, wgt)[0]
y = np.dot(poly.polyvander(x, n), c)
plot_data_and_fit(hgt, wgt, x, y)

# method 4:
# regression on transformed data using the Vandermonde
# matrix and either pinv or lstsq
X = poly.polyvander(trsf(hgt), n)
示例#20
0
文件: __init__.py 项目: 1950/sawbuck
def polyvander(x, deg) :
    from numpy.polynomial.polynomial import polyvander
    return polyvander(x, deg)
def ofit(x, y, deg, rcond=None, full=False, w=None, cov=False): # numpy/polynomial.py

    order = int(deg) + 1
    x = NX.asarray(x) + 0.0
    y = NX.asarray(y) + 0.0

    # check arguments.
    if deg < 0:
        raise ValueError("expected deg >= 0")
    if x.ndim != 1:
        raise TypeError("expected 1D vector for x")
    if x.size == 0:
        raise TypeError("expected non-empty vector for x")
    if y.ndim < 1 or y.ndim > 2:
        raise TypeError("expected 1D or 2D array for y")
    if x.shape[0] != y.shape[0]:
        raise TypeError("expected x and y to have same length")

    # set rcond
    if rcond is None:
        rcond = len(x)*np.finfo(x.dtype).eps

    # set up least squares equation for powers of x
    lhs = poly.polyvander(x, order)
    rhs = y

    # apply weighting
    if w is not None:
        w = NX.asarray(w) + 0.0
        if w.ndim != 1:
            raise TypeError("expected a 1-d array for weights")
        if w.shape[0] != y.shape[0]:
            raise TypeError("expected w and y to have the same length")
        lhs *= w[:, NX.newaxis]
        if rhs.ndim == 2:
            rhs *= w[:, NX.newaxis]
        else:
            rhs *= w

    # scale lhs to improve condition number and solve
    scale = NX.sqrt((lhs*lhs).sum(axis=0))
    lhs /= scale
    c, resids, rank, s = la.lstsq(lhs, rhs, rcond)
    c = (c.T/scale).T  # broadcast scale coefficients

    # warn on rank reduction, which indicates an ill conditioned matrix

    if full:
        return c, resids, rank, s, rcond
    elif cov:
        Vbase = inv(dot(lhs.T, lhs))
        Vbase /= NX.outer(scale, scale)
        if cov == "unscaled":
            fac = 1
        else:
            if len(x) <= order:
                raise ValueError("the number of data points must exceed order "
                                 "to scale the covariance matrix")
            # note, this used to be: fac = resids / (len(x) - order - 2.0)
            # it was deciced that the "- 2" (originally justified by "Bayesian
            # uncertainty analysis") is not was the user expects
            # (see gh-11196 and gh-11197)
            fac = resids / (len(x) - order)
        if y.ndim == 1:
            return c, Vbase * fac
        else:
            return c, Vbase[:,:, NX.newaxis] * fac
    else:
        return c
示例#22
0
def savgol_filter_werror(y, window_length, degree, error=None, cov=None,
        deriv=None):
    ynew = y * 0.0

    # Check that window_length is odd
    if window_length % 2 == 0:
        print("Window length must be odd\n")
        exit(11)

    # Take care that the window does not spill out of our array
    margin = int(window_length/2)
    xarr = np.arange(-margin, margin+1)

    if cov is not None:
        vander = polyvander(xarr, deg=degree)
        vanderT = np.transpose(vander)
    else:
        weight = 1./error


    for i in range(margin, y.size-margin):
        if cov is None:
            z = solve_polyfit(xarr,
                              y[i-margin:i+margin+1],
                              degree,
                              weight[i- margin:i+margin+1],
                              deriv=deriv)
        else:
            z = solve_leastsq(y[i-margin:i+margin+1],
                              cov[i-margin:i+margin+1,i-margin:i+margin+1],
                              vander,
                              vanderT,
                              deriv=deriv)
        ynew[i] = P.polyval(0.0, z)


    # Now fit the left boundary, by fitting the first window_length points with
    # a degree order polynomial
    if cov is None:
        z = solve_polyfit(xarr,
                          y[:window_length],
                          degree,
                          weight[:window_length],
                          deriv=deriv)
    else:
        z = solve_leastsq(y[:window_length],
                          cov[:window_length, :window_length],
                          vander,
                          vanderT,
                          deriv=deriv)
    for i in range(margin):
        ynew[i] = P.polyval(xarr[i], z)

    # Now fit the right boundary, by fitting the last window_length points with
    # a degree order polynomial
    if cov is None:
        z = solve_polyfit(xarr,
                          y[-window_length:],
                          degree,
                          weight[-window_length:],
                          deriv=deriv)
    else:
        z = solve_leastsq(y[-window_length:],
                          cov[-window_length:, -window_length:],
                          vander,
                          vanderT,
                          deriv=deriv)

    for i in range(margin):
        ynew[y.size-margin+i] = P.polyval(xarr[i+margin+1], z)

    return ynew
示例#23
0
x = np.linspace(xmin, xmax, 10000)

# ---------
# method 1:
# regression using ployfit

c = poly.polyfit(hgt, wgt, n)
y = poly.polyval(x, c)
plot_data_and_fit(hgt, wgt, x, y)
print("polyfit     error = ", get_error(hgt, wgt, x, y))

# ---------
# method 2:
# regression using the Vandermonde matrix and pinv

X = poly.polyvander(hgt, n)
c = np.dot(la.pinv(X), wgt)
# print("vander condition: ",la.cond(X))
# print("pinv   condition: ",la.cond(la.pinv(X)))
y = np.dot(poly.polyvander(x, n), c)
plot_data_and_fit(hgt, wgt, x, y)
print("vander+pinv error = ", get_error(hgt, wgt, x, y), la.cond(X))

# ---------
# method 3:
# regression using the Vandermonde matrix and lstsq

X = poly.polyvander(hgt, n)
c = la.lstsq(X, wgt)[0]
y = np.dot(poly.polyvander(x, n), c)
plot_data_and_fit(hgt, wgt, x, y)