Example #1
0
 def test_dmd_complex128(self):
     m, n = 9, 7
     a = np.array(np.fliplr(np.vander(np.random.rand(m)+1, n)) + 1j*np.fliplr(np.vander(np.random.rand(m), n)), 
                  np.complex128, order='F')
     a_gpu = gpuarray.to_gpu(a)
     f_gpu, b_gpu, v_gpu, omega = linalg.dmd(a_gpu, modes='standard', return_amplitudes=True, return_vandermonde=True)
     assert np.allclose(a[:,:(n-1)], np.dot(f_gpu.get(), np.dot(np.diag(b_gpu.get()), v_gpu.get()) ), atol_float64)
Example #2
0
    def _regressor(self, x):
        """Form normalised regressor / design matrix from set of input vectors.

        Parameters
        ----------
        x : array of float, shape (2, N)
            Input to function as a 2-D numpy array

        Returns
        -------
        X : array of float, shape (P, N)
            Regressor / design matrix to be used in least-squares fit

        Notes
        -----
        This normalises the 2-D input vectors by centering and scaling them.
        It then forms a regressor matrix with a column per input vector. Each
        column is given by the outer product of the monomials of the first
        dimension with the monomials of the second dimension of the input vector,
        in decreasing polynomial order. For example, if *degrees* is (1, 2) and
        the normalised elements of each input vector in *x* are *x_0* and *x_1*,
        respectively, the column takes the form::

            outer([x_0, 1], [x1 ^ 2, x1, 1])
            = [x_0 * x_1 ^ 2, x_0 * x_1, x_0 * 1, 1 * x_1 ^ 2, 1 * x_1, 1 * 1]
            = [x_0 * x_1 ^ 2, x_0 * x_1, x_0, x_1 ^ 2, x_1, 1]

        This is closely related to the Vandermonde matrix of *x*.

        """
        x_norm = (x - self._mean[:, np.newaxis]) / self._scale[:, np.newaxis]
        v1 = np.vander(x_norm[0], self.degrees[0] + 1)
        v2 = np.vander(x_norm[1], self.degrees[1] + 1).T
        return np.vstack([v1[:, n][np.newaxis, :] * v2 for n in xrange(v1.shape[1])])
Example #3
0
 def test_rdmd_complex64(self):
     m, n = 9, 7
     a = np.array(np.fliplr(np.vander(np.random.rand(m)+1, n)) + 1j*np.fliplr(np.vander(np.random.rand(m)+1, n)), 
                  np.complex64, order='F')
     a_gpu = gpuarray.to_gpu(a)
     f_gpu, b_gpu, v_gpu = rlinalg.rdmd(a_gpu, k=(n-1), p=0, q=1, modes='standard')
     assert np.allclose(a[:,:(n-1)], np.dot(f_gpu.get(), np.dot(np.diag(b_gpu.get()), v_gpu.get()) ), atol_float32)
Example #4
0
def plot_results(x, y, yerr, samples, truth=True, color="r", data_fig=None,
                 show=True):
    if data_fig is None:
        # Plot the data.
        data_fig = plot_data(x, y, yerr, truth=truth)
        data_fig, data_ax = _get_fig_ax(data_fig)
    else:
        data_ax = data_fig.gca()

    # Generate the constraints in data space.
    x0 = np.linspace(-5, 5, 500)
    samples = np.atleast_1d(samples)
    if len(samples.shape) == 2:
        lines = np.dot(np.vander(x0, 2), samples[:, :2].T)
        q = np.percentile(lines, [16, 84, 2.5, 97.5], axis=1)
        data_ax.fill_between(x0, q[2], q[3], color=color, alpha=0.1)
        data_ax.fill_between(x0, q[0], q[1], color=color, alpha=0.3)
    else:
        data_ax.plot(x0, np.dot(np.vander(x0, 2), samples[:2]), color=color)

    if show:
        # Plot the triangle plot.
        true = load_data("line_true_params.txt")
        true[2:] = np.log(true[2:])
        triangle_fig = triangle.corner(samples, bins=24,
                                       labels=["m", "b", "ln(alpha)",
                                               "ln(ell)"],
                                       truths=true)
    else:
        triangle_fig = None

    _format_axes(data_ax)
    return data_fig, triangle_fig
Example #5
0
def polyfit2dPure(x, y, z, order=2, w=None):
    '''
    References:
    http://pingswept.org/2009/06/15/least-squares-fit-of-a-surface-to-a-3d-cloud-of-points-in-python-(with-ridiculous-application)/
    '''
#    x = np.asarray(x) + 0.0
#    y = np.asarray(y) + 0.0
#    z = np.asarray(z) + 0.0

    deg = order + 1
    Gx = np.vander(x, deg)
    Gy = np.vander(y, deg)
    G = np.hstack((Gx, Gy))

    del x, y, Gx, Gy
    
    # Apply weighting
    if w is not None:
#        w = np.asarray(w) + 0.0
        G *= w[:, np.newaxis]
        z *= w
        
    del w

    m, _, _, _ = np.linalg.lstsq(G, z)
    return m
Example #6
0
def noisy_quad_fit(order, Lambda, n_train=20, n_test=81):
    """
      Creates n_train training data points with noise, fits to poly of order,
      then tests on n_test points (noise free).  Uses offset quadratic
    """
    low_x = -2;
    high_x = 2;
    plt.close('all');
    
    train_x = np.linspace(low_x, high_x, n_train);
    X = np.vander(train_x, N = order+1);
    y = (1+train_x**2) + 0.6*(np.random.rand(n_train) - 0.5);
    #y = (np.sin(3*train_x) - (train_x * np.cos(2*train_x))) + 0.6*(np.random.rand(n_train) - 0.5);
    #y = (np.sin(3*train_x) - (train_x * np.cos(2*train_x)));
    #y = (1+train_x**2);
    theta = regress_theta(X,y,Lambda);
    predict_y = np.dot(X,theta);
    print 'Training Error = ', np.max(np.abs(y - predict_y));
    #trainingerror = np.max(np.abs(y - predict_y));
    
    test_x = np.linspace(low_x, high_x, n_test);
    Xt = np.vander(test_x, N = order+1);
    yt = 1+test_x**2;
    #yt = np.sin(3*test_x) - (test_x * np.cos(2*test_x));
    predict_yt = np.dot(Xt,theta);
    print 'Testing Error = ', np.max(np.abs(yt - predict_yt));
    #testingerror = np.max(np.abs(yt - predict_yt));
    
    plt.plot(train_x, y, 'ro');
    plt.plot(train_x, predict_y, 'rx');
    plt.plot(test_x, predict_yt, 'bx');
    plt.show();
Example #7
0
def test_score_mean_mt_mse():
    V3_true = np.vander(np.arange(3))
    V3_pred = np.vander(np.arange(1, 4))
    assert test.score_mean_mt_mse(V3_true, V3_pred) - 4.22222 < 1e-5

    V3_true_ma = np.ma.MaskedArray(V3_true)
    V3_true_ma.mask = np.zeros((3, 3))
    V3_true_ma.mask[2, :] = 1
    assert test.score_mean_mt_mse(V3_true_ma, V3_pred) == 2
Example #8
0
def remez(func, interval, degree, error=None, maxiter=30, float_type=numpy.float128):
    """
        The remez algorithm is an iterative algorithm for finding the optimal polynomial for a giving function on a
    closed interval.
        Chebyshev showed that such a polynomial 'exists' and is 'unique', and meets the following:
            - If R(x) is a polynomial of degree N, then there are N+2 unknowns:
                the N+1 coefficients of the polynomial, and maximal value of the error function.
            - The error function has N+1 roots, and N+2 extrema (minima and maxima).
            - The extrema alternate in sign, and all have the same magnitude.
        The key to finding this polynomial is locating those locations withing then closed interval, that meets all
        three of these properties.
    If we know the location of the extrema of the error function, then we can write N+2 simultaneous equations:
        R(xi) + (-1)iE = f(xi)
    where E is the maximal error term, and xi are the abscissa values of the N+2 extrema of the error function.
    It is then trivial to solve the simultaneous equations to obtain the polynomial coefficients and the error term.
    Unfortunately we don't know where the extrema of the error function are located!

    The remez method is used to locate (hopefully converge in a timely manner) on such locations.

    1) Start by a 'good' estimate, using Chebyshev roots as the points in questions.
    note: this are only applicable on the interval [-1, 1], hence the Chebyshev roots need to be linearly mapped
        to the giving interval [a, b].
    2) Using polynomial interpolation or any other method to locate the initial set of coefficients ...
    3) Locate all local extrema there should N+2 such locations see: get_extrema
    4) create a new solution, (coefficients + error_term) using the extrema(s), if the error_term doesn't change
        by a certain amount quit since progress can no long er be made
        otherwise use the previous extrema(s) as the new locations and repeat steps 3, 4 ...
    """
    f = func if type(func) is numpy.ufunc else numpy.vectorize(func)  # vectorized non-numpy functions ...
    # numpy.pi is a float64 value, this should give us a bit more accuracy ...
    one, two, four, five, sixteen = imap(float_type, (1, 2, 4, 5, 16))
    pi = sixteen * numpy.arctan(one / five) - four * numpy.arctan(one / float_type(239))
    chebyshev_nodes = numpy.cos(  # locate all needed chebyshev nodes ...
        (((two * degree + one - two * numpy.arange(0, degree + 1, dtype=float_type)) * pi)/(two * degree + two))
    )
    # linearly map chebyshev nodes from (-1, 1) to the giving interval, scale + offset ...
    x = (numpy.diff(interval) / two) * chebyshev_nodes + numpy.sum(interval) / two
    fx = f(x)
    coefficients = solve(numpy.vander(x), fx)  # solve the system ...
    # relative error function .. bind the current coefficients to it ...
    rel_error_func = lambda v, coefficients=coefficients, f=f: (numpy.polyval(coefficients, v) - f(v))/f(v)
    alternating_sign = alternating_signs((degree + 2,))
    delta_error_term, error_term = 10, 1000
    x = remez_get_extremas(rel_error_func, interval, roots=x)  # get extremas from Chebyshev roots and use them for sol
    error = numpy.finfo(x.dtype).eps if error is None else error  # set the error to the floats machine epsilon ...
    while abs(delta_error_term) > error and maxiter:  # continue while making progress
        x = remez_get_extremas(
            lambda v, coefficients=coefficients, f=f: rel_error_func(v, coefficients, f), interval, x, accuracy=error
        )
        fx = f(x)
        new_solution = solve(  # solve the system of N + 2 equations to get a new solution and error term
            numpy.append(numpy.vander(x, degree + 1), (alternating_sign * numpy.abs(fx)).reshape(-1, 1), axis=1), fx
        )  # I think f(xi)*-1**i has to be added as the last term (E) in order for errorfunc to equioscillate at extrema
        delta_error_term = new_solution[-1] - error_term
        coefficients, error_term = new_solution[:-1], new_solution[-1]
        maxiter -= 1
    return coefficients
Example #9
0
def polyfit(x, y, deg, rcond=None, full=False):
    """%s

    Notes
    -----
        Any masked values in x is propagated in y, and vice-versa.
    """
    order = int(deg) + 1
    x = asarray(x)
    mx = getmask(x)
    y = asarray(y)
    if y.ndim == 1:
        m = mask_or(mx, getmask(y))
    elif y.ndim == 2:
        y = mask_rows(y)
        my = getmask(y)
        if my is not nomask:
            m = mask_or(mx, my[:,0])
        else:
            m = mx
    else:
        raise TypeError,"Expected a 1D or 2D array for y!"
    if m is not nomask:
        x[m] = y[m] = masked
    # Set rcond
    if rcond is None :
        if x.dtype in (np.single, np.csingle):
            rcond = len(x)*_single_eps
        else :
            rcond = len(x)*_double_eps
    # Scale x to improve condition number
    scale = abs(x).max()
    if scale != 0 :
        x = x / scale
    # solve least squares equation for powers of x
    v = vander(x, order)
    c, resids, rank, s = _lstsq(v, y.filled(0), rcond)
    # warn on rank reduction, which indicates an ill conditioned matrix
    if rank != order and not full:
        warnings.warn("Polyfit may be poorly conditioned", np.RankWarning)
    # scale returned coefficients
    if scale != 0 :
        if c.ndim == 1 :
            c /= np.vander([scale], order)[0]
        else :
            c /= np.vander([scale], order).T
    if full :
        return c, resids, rank, s, rcond
    else :
        return c
Example #10
0
 def test_basic(self):
     c = np.array([0, 1, -2, 3])
     v = vander(c)
     powers = np.array([[ 0,  0, 0,  0, 1],
                        [ 1,  1, 1,  1, 1],
                        [16, -8, 4, -2, 1],
                        [81, 27, 9,  3, 1]])
     # Check default value of N:
     yield (assert_array_equal, v, powers[:, 1:])
     # Check a range of N values, including 0 and 5 (greater than default)
     m = powers.shape[1]
     for n in range(6):
         v = vander(c, N=n)
         yield (assert_array_equal, v, powers[:, m-n:m])
Example #11
0
def test_graph_laplacian():
    for mat in (np.arange(10) * np.arange(10)[:, np.newaxis],
                np.ones((7, 7)),
                np.eye(19),
                np.vander(np.arange(4)) + np.vander(np.arange(4)).T,):
        sp_mat = sparse.csr_matrix(mat)
        for normed in (True, False):
            laplacian = graph_laplacian(mat, normed=normed)
            n_nodes = mat.shape[0]
            if not normed:
                np.testing.assert_array_almost_equal(laplacian.sum(axis=0),
                                                     np.zeros(n_nodes))
            np.testing.assert_array_almost_equal(laplacian.T, laplacian)
            np.testing.assert_array_almost_equal(
                laplacian, graph_laplacian(sp_mat, normed=normed).toarray())
Example #12
0
def polynomial_expansion_Vandermonde(a, degree=1):
    """
    Performs polynomial expansion of given :math:`a` array using *Vandermonde*
    method.

    Parameters
    ----------
    a : array_like
        :math:`a` array to expand.
    degree : int, optional
        Expanded polynomial degree.

    Returns
    -------
    ndarray
        Expanded :math:`a` array.

    References
    ----------
    :cite:`Wikipedia2003e`

    Examples
    --------
    >>> RGB = np.array([0.17224810, 0.09170660, 0.06416938])
    >>> polynomial_expansion_Vandermonde(RGB)  # doctest: +ELLIPSIS
    array([ 0.1722481 ,  0.0917066 ,  0.06416938,  1.        ])
    """

    a = as_float_array(a)

    a_e = np.transpose(np.vander(np.ravel(a), degree + 1))
    a_e = np.hstack(a_e.reshape(a_e.shape[0], -1, 3))

    return np.squeeze(a_e[:, 0:a_e.shape[-1] - a.shape[-1] + 1])
Example #13
0
def polynomial_fit():
    """Load the data from housing.npy. Use least squares to calculate
    the polynomials of degree 3, 6, 9, and 12 that best fit the data.

    Plot the original data points and each least squares polynomial together
    in individual subplots.
    """
    # Load the data and define a more refined domain for plotting.
    year, index = np.load("housing.npy").T
    domain = np.linspace(year.min(), year.max(), 200)

    for i,n in enumerate([3, 6, 9, 12]):
        # Use least squares to compute the coefficients of the polynomial.
        coeffs = la.lstsq(np.vander(year, n+1), index)[0]
        # coeffs = np.polyfit(x, y, deg=n)

        # Plot the polynomial and the data points in an individual subplot.
        plt.subplot(2,2,i+1)
        plt.plot(year, index, 'k*')
        plt.plot(domain, np.polyval(coeffs, domain), 'b-', lw=2)
        plt.title(r"$n = {}$".format(n))
        # plt.axis([x.min(),x.max(),y.min(),y.max()])

    plt.suptitle("Solution to Problem 3")
    plt.show()
Example #14
0
def get_Ab(n,m):
	N = n + 1 				# N = n + 1
	X = np.linspace(-5,5,m)
	A = np.vander(X,N) # Sehr zu empfehlender Trick

	F = lambda x: 1 / (1 + x**2)
	return A, F(X)
def reset_ramsey(res, degree=5):
    '''Ramsey's RESET specification test for linear models

    This is a general specification test, for additional non-linear effects
    in a model.


    Notes
    -----
    The test fits an auxiliary OLS regression where the design matrix, exog,
    is augmented by powers 2 to degree of the fitted values. Then it performs
    an F-test whether these additional terms are significant.

    If the p-value of the f-test is below a threshold, e.g. 0.1, then this
    indicates that there might be additional non-linear effects in the model
    and that the linear model is mis-specified.


    References
    ----------
    http://en.wikipedia.org/wiki/Ramsey_RESET_test

    '''
    order = degree + 1
    k_vars = res.model.exog.shape[1]
    #vander without constant and x:
    y_fitted_vander = np.vander(res.fittedvalues, order)[:, :-2] #drop constant
    exog = np.column_stack((res.model.exog, y_fitted_vander))
    res_aux = OLS(res.model.endog, exog).fit()
    #r_matrix = np.eye(degree, exog.shape[1], k_vars)
    r_matrix = np.eye(degree-1, exog.shape[1], k_vars)
    #df1 = degree - 1
    #df2 = exog.shape[0] - degree - res.df_model  (without constant)
    return res_aux.f_test(r_matrix) #, r_matrix, res_aux
Example #16
0
def linest(*args, **kwargs):

    Y = args[0]
    X = args[1]
    
    if len(args) == 3:
        const = args[2]
        if isinstance(const,str):
            const = (const.lower() == "true")
    else:
        const = True
        
    degree = kwargs.get('degree',1)
    
    # build the vandermonde matrix
    A = np.vander(X, degree+1)
    
    if not const:
        # force the intercept to zero
        A[:,-1] = np.zeros((1,len(X)))
    
    # perform the fit
    (coefs, residuals, rank, sing_vals) = np.linalg.lstsq(A, Y)
        
    return coefs
Example #17
0
def fit_continuum(x, y, ivars, order=6, nsigma=[0.3,3.0], maxniter=50):
    """Fit the continuum using sigma clipping

    Args:
        x: The wavelengths
        y: The log-fluxes
        order: The polynomial order to use
        nsigma: The sigma clipping threshold: tuple (low, high)
        maxniter: The maximum number of iterations to do

    Returns:
        The value of the continuum at the wavelengths in x

    """
    A = np.vander(x - np.nanmean(x), order+1)
    m = np.ones(len(x), dtype=bool)
    for i in range(maxniter):
        m[ivars == 0] = 0  # mask out the bad pixels
        w = np.linalg.solve(np.dot(A[m].T, A[m]), np.dot(A[m].T, y[m]))
        mu = np.dot(A, w)
        resid = y - mu
        sigma = np.sqrt(np.nanmedian(resid**2))
        #m_new = np.abs(resid) < nsigma*sigma
        m_new = (resid > -nsigma[0]*sigma) & (resid < nsigma[1]*sigma)
        if m.sum() == m_new.sum():
            m = m_new
            break
        m = m_new
    return mu
Example #18
0
    def __call__(self, xnew):
        saveshape = np.shape(xnew)
        xnew = np.ravel(xnew)
        res = np.empty_like(xnew)
        mask = (self.a <= xnew) & (xnew <= self.b)
        res[~mask] = self.fill
        xx = xnew.compress(mask)
        indxs = np.searchsorted(self.breaks[:-1], xx) - 1
        indxs = indxs.clip(0, len(self.breaks))
        pp = self.coeffs
        dx = xx - self.breaks.take(indxs)
        if True:
            v = pp[0, indxs]
            for i in xrange(1, self.order):
                v = dx * v + pp[i, indxs]
            values = v
        else:
            V = np.vander(dx, N=self.order)
            # values = np.diag(dot(V,pp[:,indxs]))
            dot = np.dot
            values = np.array([dot(V[k, :], pp[:, indxs[k]])
                              for k in xrange(len(xx))])

        res[mask] = values
        res.shape = saveshape
        return res
Example #19
0
def lomb_scargle_estimator(x, y, yerr=None,
                           min_period=None, max_period=None,
                           filter_period=None,
                           max_peaks=2,
                           **kwargs):
    """Estimate period of a time series using the periodogram

    Args:
        x (ndarray[N]): The times of the observations
        y (ndarray[N]): The observations at times ``x``
        yerr (Optional[ndarray[N]]): The uncertainties on ``y``
        min_period (Optional[float]): The minimum period to consider
        max_period (Optional[float]): The maximum period to consider
        filter_period (Optional[float]): If given, use a high-pass filter to
            down-weight period longer than this
        max_peaks (Optional[int]): The maximum number of peaks to return
            (default: 2)

    Returns:
        A dictionary with the computed ``periodogram`` and the parameters for
        up to ``max_peaks`` peaks in the periodogram.

    """
    if min_period is not None:
        kwargs["maximum_frequency"] = 1.0 / min_period
    if max_period is not None:
        kwargs["minimum_frequency"] = 1.0 / max_period

    # Estimate the power spectrum
    model = LombScargle(x, y, yerr)
    freq, power = model.autopower(method="fast", normalization="psd", **kwargs)
    power /= len(x)
    power_est = np.array(power)

    # Filter long periods
    if filter_period is not None:
        freq0 = 1.0 / filter_period
        filt = 1.0 / np.sqrt(1 + (freq0 / freq) ** (2*3))
        power *= filt

    # Find and fit peaks
    peak_inds = (power[1:-1] > power[:-2]) & (power[1:-1] > power[2:])
    peak_inds = np.arange(1, len(power)-1)[peak_inds]
    peak_inds = peak_inds[np.argsort(power[peak_inds])][::-1]
    peaks = []
    for i in peak_inds[:max_peaks]:
        A = np.vander(freq[i-1:i+2], 3)
        w = np.linalg.solve(A, np.log(power[i-1:i+2]))
        sigma2 = -0.5 / w[0]
        freq0 = w[1] * sigma2
        peaks.append(dict(
            log_power=w[2] + 0.5*freq0**2 / sigma2,
            period=1.0 / freq0,
            period_uncert=np.sqrt(sigma2 / freq0**4),
        ))

    return dict(
        periodogram=(freq, power_est),
        peaks=peaks,
    )
Example #20
0
    def test_dtypes(self):
        c = array([11, -12, 13], dtype=np.int8)
        v = vander(c)
        expected = np.array([[121,  11, 1],
                             [144, -12, 1],
                             [169,  13, 1]])
        yield (assert_array_equal, v, expected)

        c = array([1.0+1j, 1.0-1j])
        v = vander(c, N=3)
        expected = np.array([[ 2j, 1+1j, 1],
                             [-2j, 1-1j, 1]])
        # The data is floating point, but the values are small integers,
        # so assert_array_equal *should* be safe here (rather than, say,
        # assert_array_almost_equal).
        yield (assert_array_equal, v, expected)
 def setUp(self):
     # Quadratic function centred at p
     func = lambda p, x: ((x - p) ** 2).sum()
     self.vFunc = vectorize_fit_func(func)
     self.true_params = np.array([1, -4])
     self.init_params = np.array([0, 0])
     self.x = 4.0 * np.random.randn(2, 20)
     self.y = self.vFunc(self.true_params, self.x)
     # 2-D log Gaussian function
     def lngauss_diagcov(p, x):
         xminmu = x - p[:2, np.newaxis]
         return p[4] - 0.5 * np.dot(p[2:4], xminmu * xminmu)
     self.func2 = lngauss_diagcov
     self.true_params2 = np.array([3, -2, 10, 10, 4])
     self.init_params2 = np.array([0, 0, 1, 1, 0])
     self.x2 = np.random.randn(2, 80)
     self.y2 = lngauss_diagcov(self.true_params2, self.x2)
     # Linear function
     self.func3 = lambda p, x: np.dot(p, x)
     self.jac3 = lambda p, x: x
     self.true_params3 = np.array([-0.1, 0.2, -0.3, 0.0, 0.5])
     self.init_params3 = np.zeros(5)
     self.enabled_params_int = [0, 1, 2, 4]
     self.enabled_params_bool = [True, True, True, False, True]
     t = np.arange(0, 10., 10. / 100)
     self.x3 = np.vander(t, 5).T
     self.y3 = self.func3(self.true_params3, self.x3)
Example #22
0
def test_vand():
    """Simple test of vand."""
    n = 10
    a = np.arange(1, n)
    b = rogues.vand(a)
    c = np.vander(a)
    npt.assert_equal(b, c.T[::-1, :])
Example #23
0
def linest(*args, **kwargs): # Excel reference: https://support.office.com/en-us/article/LINEST-function-84d7d0d9-6e50-4101-977a-fa7abf772b6d

    Y = args[0].values()
    X = args[1].values()
    
    if len(args) == 3:
        const = args[2]
        if isinstance(const,str):
            const = (const.lower() == "true")
    else:
        const = True
        
    degree = kwargs.get('degree',1)
    
    # build the vandermonde matrix
    A = np.vander(X, degree+1)
    
    if not const:
        # force the intercept to zero
        A[:,-1] = np.zeros((1,len(X)))
    
    # perform the fit
    (coefs, residuals, rank, sing_vals) = np.linalg.lstsq(A, Y)
        
    return coefs
def polybkdfit(q,sq,porder):
    '''
    Module to fit a polynomial background to s(q).
    
    Inputs: array q, array s(q), and desired order of the background polynomial to be fit.
    
    Returns: polynomial coefficients in array p.
    '''
    qscale=q[-1]
    qsc = q / qscale
    Mv0 = np.vander(qsc,porder+1)
    Mv1 = Mv0[:,:-1]
    yfq = q * (sq - 1.0)
    p,resids,rank,s=lstsq(Mv1,yfq)
    p /= np.vander([qscale],porder+1)[0,:-1]
    return p
Example #25
0
def lsa(n):
   
   points=[]
   yvals=[]
   xvals=[]
   start = time.time()
   for i in np.arange(-5.,5.,(10./(n))):
      points.append([i,1./(1.+i**2.)])
      yvals.append(1./(1.+i**2.))
      xvals.append(i)
   points.append([5.,1./(1.+5.**2.)])
   yvals.append(1./(1.+5.**2.))
   xvals.append(5.)
   yvals = np.array(yvals)
   xvals = np.array(xvals)
   A = np.vander(xvals, n)
   # find the x that minimizes the norm of Ax-y
   (coeffs, residuals, rank, sing_vals) = np.linalg.lstsq(A, yvals)
   # create a polynomial using coefficients
   f = np.poly1d(coeffs)
   end = time.time()
   print "The least square approximation is: " + str(f)
   print "It took  " + str(end-start) + " seconds to calculate"
   ss_err=(residuals**2).sum()
   ss_tot=((yvals-yvals.mean())**2).sum()
   rsquared=1-(ss_err/ss_tot)
   print("The rsquared value is: " + str(rsquared))
   return f
  def __init__(s):
    '''Initialize the beat detector. Screwing with the config is up to you.'''

    # The number of running samples with which to compare the new sample
    s.n = 256


    # Parameters for the incoming data:

    # Audio sample rate:
    s.sample_freq = 44100
    
    # The number of samples considered at a time in the audio input:
    s.window_width = 1024

    # Just precompute this since we reference it a bunch:
    s.window_halfwidth = s.window_width//2

    # Precalculate a window function for the fft:
    s.window = hamming_window(s.window_width)

    # Width of a window of data, in seconds:
    s.window_dt = 1.0*s.window_width / s.sample_freq

    s.spectrum = Signal.from_time_domain(n=s.n, dt=s.window_dt)
    s.cepstrum = Signal.from_time_domain(n=s.n*8, dt=s.window_dt)

    s.beat_spectrum_window = flattop_window(s.spectrum.n)

    # A reference to the function with which we compute the spectrum of each segment
    # of data:
    s.spectrum_function = summed_spec

    # There's really no sense storing a full similarity matrix. Instead, let's just
    # accumulate data into a running beat spectrum equivalent to diagonal sums of the
    # similarity matrix. Of course since we're not storing the whole thing, and since
    # it's a realtime algorithm, we'll have to do a decaying sort of sum.
    s.diagonal_sums = np.zeros(s.n)

    s.circular_buffer = np.zeros((s.n,s.window_width//2+1))
    s.circular_buffer_position = 0

    # Characteristic decay time of the beat spectrum:
    s.halflife = 0.5

    # The window shifts by half of the window width due to the half-window overlap,
    # so the time constant accounts for this:
    s.decay_factor = np.exp( -np.log(2.0) * ( s.window_dt * 0.5 ) / s.halflife )

    s.l_halfprev = None
    s.r_halfprev = None

    # Precalculate the matrices necessary for a linear fit:
    s.A = np.vander(np.arange(s.n),3)
    s.At = np.linalg.pinv(s.A)

    # Time-average the cepstrum to suppress fluctuations
    s.time_averaged_cepstrum = Signal.from_time_domain(n=s.cepstrum.n, dt=s.window_dt)
    s.time_averaged_cepstrum.yf
Example #27
0
def test_softmask_int():
    X = 2 * np.ones((3,3), dtype=np.int32)
    X_ref = np.vander(np.arange(3))

    M1 = librosa.util.softmask(X, X_ref, power=1)
    M2 = librosa.util.softmask(X_ref, X, power=1)

    assert np.allclose(M1 + M2, 1)
Example #28
0
    def __init__(self, x, y, fitdegree=1):
        order = fitdegree + 1
        self.X = np.asarray(x)
        self.Y = np.asarray(y)
        self._ols = sm.OLS(self.Y, np.vander(self.X, order))


        self.results = self._ols.fit()
Example #29
0
 def setUp(self):
     self.params = np.array([0.1, -0.2, 0.0, 0.5, 0.5])
     self.N = 1000
     self.x = np.random.randn(len(self.params), self.N)
     self.y = np.dot(self.params, self.x)
     t = np.arange(0., 10., 10. / self.N)
     self.poly_x = np.vander(t, 5).T
     self.poly_y = np.dot(self.params, self.poly_x)
Example #30
0
def pseudocal(prdict,rsHH):
    '''
    La tan esperada y poco entendida pseudocalibracion.
    Tomo los valores de rsHH y los alineo segun la com-
    paracion con la estadistica derivada de prdict
    '''
    #CANALES 212
    degree = 2
    #eixo X
    xi = prdict['stat01']
    A = np.vander(xi,degree)
    #eixos Y
    y = prdict['stat02']
    (coeffs2, residuals2, rank2, sing_vals2) = np.linalg.lstsq(A, y)
    #a,b,c = coeffs2[0],coeffs2[1],coeffs2[2]
    print coeffs2
    f2 = np.poly1d(coeffs2)
    rsHH['adcval'][:,1] = (rsHH['adcval'][:,1]-coeffs2[1])/coeffs2[0]
    #integrato12.adcval[:,1] = np.sqrt((integrato12.adcval[:,1]-c)/a + b**2/(4*a))-b/(2*a)
    y = prdict['stat03']
    (coeffs3, residuals3, rank3, sing_vals3) = np.linalg.lstsq(A, y)
    #a,b,c = coeffs3[0],coeffs3[1],coeffs3[2]
    print coeffs3
    f3 = np.poly1d(coeffs3)
    rsHH['adcval'][:,2] = (rsHH['adcval'][:,2]-coeffs3[1])/coeffs3[0]
    #integrato12.adcval[:,2] = np.sqrt((integrato12.adcval[:,2]-c)/a + b**2/(4*a))-b/(2*a)
    y = prdict['stat04']
    (coeffs4, residuals4, rank4, sing_vals4) = np.linalg.lstsq(A, y)
    #a,b,c = coeffs4[0],coeffs4[1],coeffs4[2]
    print coeffs4
    f4 = np.poly1d(coeffs4)
    rsHH['adcval'][:,3] = (rsHH['adcval'][:,3]-coeffs4[1])/coeffs4[0]
    #integrato12.adcval[:,3] = np.sqrt((integrato12.adcval[:,3]-c)/a + b**2/(4*a))-b/(2*a 
    #CANALES 405
    degree = 2
    #eixo X
    xi = prdict['stat06']
    A = np.vander(xi,degree)
    #eixos Y
    y = prdict['stat05']
    (coeffs5, residuals5, rank5, sing_vals5) = np.linalg.lstsq(A, y)
    print coeffs5
    f5 = np.poly1d(coeffs5)
    rsHH['adcval'][:,4] = (rsHH['adcval'][:,4]-coeffs5[1])/coeffs5[0]
    return rsHH
Example #31
0
def detrend(x, order=1, axis=0):
    """
    Detrend an array with a trend of given order along axis 0 or 1

    Parameters
    ----------
    x : array_like, 1d or 2d
        data, if 2d, then each row or column is independently detrended with the
        same trendorder, but independent trend estimates
    order : int
        specifies the polynomial order of the trend, zero is constant, one is
        linear trend, two is quadratic trend
    axis : int
        axis can be either 0, observations by rows,
        or 1, observations by columns

    Returns
    -------
    detrended data series : ndarray
        The detrended series is the residual of the linear regression of the
        data on the trend of given order.
    """
    order = int_like(order, 'order')
    axis = int_like(axis, 'axis')

    if x.ndim == 2 and int(axis) == 1:
        x = x.T
    elif x.ndim > 2:
        raise NotImplementedError(
            'x.ndim > 2 is not implemented until it is needed')

    nobs = x.shape[0]
    if order == 0:
        # Special case demean
        resid = x - x.mean(axis=0)
    else:
        trends = np.vander(np.arange(float(nobs)), N=order + 1)
        beta = np.linalg.pinv(trends).dot(x)
        resid = x - np.dot(trends, beta)

    if x.ndim == 2 and int(axis) == 1:
        resid = resid.T

    return resid
Example #32
0
def _test_solve(alpha_real,
                beta_real,
                alpha_complex_real,
                alpha_complex_imag,
                beta_complex_real,
                beta_complex_imag,
                seed=42,
                with_general=False):
    solver = celerite.CholeskySolver()
    np.random.seed(seed)
    t = np.sort(np.random.rand(500))
    diag = np.random.uniform(0.1, 0.5, len(t))
    b = np.random.randn(len(t))

    with pytest.raises(RuntimeError):
        solver.log_determinant()
    with pytest.raises(RuntimeError):
        solver.dot_solve(b)

    if with_general:
        U = np.vander(t - np.mean(t), 4).T
        V = U * np.random.rand(4)[:, None]
        A = np.sum(U * V, axis=0) + 1e-8
    else:
        A = np.empty(0)
        U = np.empty((0, 0))
        V = np.empty((0, 0))

    solver.compute(0.0, alpha_real, beta_real, alpha_complex_real,
                   alpha_complex_imag, beta_complex_real, beta_complex_imag, A,
                   U, V, t, diag)
    K = get_kernel_value(alpha_real, beta_real, alpha_complex_real,
                         alpha_complex_imag, beta_complex_real,
                         beta_complex_imag, t[:, None] - t[None, :])
    K[np.diag_indices_from(K)] += diag

    if len(A):
        K[np.diag_indices_from(K)] += A
        K += np.tril(np.dot(U.T, V), -1) + np.triu(np.dot(V.T, U), 1)

    assert np.allclose(solver.solve(b).T, np.linalg.solve(K, b))

    b = np.random.randn(len(t), 5)
    assert np.allclose(solver.solve(b), np.linalg.solve(K, b))
Example #33
0
    def plot(self, xlab='$x$', ylab='$y$'):
        # plot the data points
        plt.errorbar(self.xi, self.yi, yerr=self.dyi, fmt='.k')

        # do some shimmying to get quantile bounds
        xa = np.linspace(self.xi.min(), self.xi.max(), 100)
        A = np.vander(xa, 2)
        # generate all possible lines
        lines = np.dot(
            np.hstack((np.zeros_like(self.trace[:]), self.trace[:])), A.T)
        quantiles = np.percentile(lines, [16, 84], axis=0)
        plt.fill_between(xa,
                         quantiles[0],
                         quantiles[1],
                         color="#8d44ad",
                         alpha=0.5)

        plt.xlabel(xlab)
        plt.ylabel(ylab)
Example #34
0
    def calculate_weights(self, nsing):

        assert self.gamma is not None
        assert self.gamma.size > 0

        # Build Vandermonde matrix from c-eigenroots.
        self.vand = np.vander(self.gamma, N=2 * self.N + 1).transpose()[::-1]

        # Normalize Vandermonde columns to improve conditioning of least squares by SVD.
        self.vand_norm = np.linalg.norm(self.vand, axis=0)
        self.vand /= self.vand_norm
        self.vand[np.where(abs(self.vand) < 1.e-14)] = 0

        # Change basis from complex exponentials to oscillating real exponentials.
        lamda_t = np.log(self.vand)
        omega_real_t = np.real(lamda_t)
        omega_imag_t = np.imag(lamda_t)
        self.vand = abs(self.vand)

        # Indices where weights correspond to damped cosines, damped sines, and monotonically damped functions.
        self.cos_inds = np.where(omega_imag_t[1, :] > 1.e-14)[0]
        self.sin_inds = np.where(omega_imag_t[1, :] < -1.e-14)[0]
        self.monotonic_inds = np.where(abs(omega_imag_t[1, :]) <= 1.e-14)[0]

        for i, wt in enumerate(omega_imag_t[1, :]):

            if wt > 1.e-14:
                self.vand[:, i] *= np.cos(omega_imag_t[:, i])
            elif wt < -1.e-14:
                self.vand[:, i] *= -np.sin(omega_imag_t[:, i])

        # Calculate Prony weights using least squares fit.
        lstsq_ret = scipy.linalg.lstsq(self.vand, self.h)
        self.weights = lstsq_ret[0]

        # Remove small weights.
        self.weights[np.where(abs(self.gamma) < 1.e-14)] = 0.

        # Sort weights.
        inds = np.argsort(abs(self.weights))

        # Set small weights to zero.
        self.weights[inds[:-nsing]] = 0
Example #35
0
File: dmd.py Project: ivanvovk/dmdx
 def reconstruct(self):
     """
     Reconstructs data with fitted `modes`, `eigvals` and `b` parameters.
     :return reconstructed snapshots
     """
     if not self.fitted:
         raise RuntimeError('Model is not fitted yet.')
     return self._DOT(
         self._DOT(
             self.modes,
             np.diag(self.eigvals)
         ),
         self._ELEMWISE_DOT(
             np.vander(self.eigvals,
                       N=self.time_series_size,
                       increasing=True),
             self.b.reshape(-1, 1)
         )
     )
Example #36
0
def _lambdas_ONeill2016(df, radii, params=None):
    """
    Implementation of the original algorithm. [#ref_1]_

    Parameters
    -----------
    df : :class:`pandas.DataFrame`
        Dataframe of REE data, with sample analyses organised by row.
    radii : :class:`list`, :class:`numpy.ndarray`
        Radii at which to evaluate the orthogonal polynomial.
    params : :class:`tuple`
        Tuple of constants for the orthogonal polynomial.

    Returns
    --------
    :class:`pandas.DataFrame`

    See Also
    ---------
    :func:`~pyrolite.util.lambdas.orthogonal_polynomial_constants`
    :func:`~pyrolite.geochem.transform.lambda_lnREE`

    References
    -----------
    .. [#ref_1] O’Neill HSC (2016) The Smoothness and Shapes of Chondrite-normalized
           Rare Earth Element Patterns in Basalts. J Petrology 57:1463–1508.
           doi: `10.1093/petrology/egw047 <https://dx.doi.org/10.1093/petrology/egw047>`__
    """
    assert params is not None
    degree = len(params)
    # initialise the dataframe
    lambdas = pd.DataFrame(
        index=df.index,
        columns=[chr(955) + str(d) for d in range(degree)],
        dtype="float32",
    )
    A = get_polynomial_matrix(radii, params=params)
    invA = np.linalg.inv(A)
    V = np.vander(radii, degree, increasing=True).T
    for row in range(df.index.size):
        Z = (df.iloc[row, :].fillna(0).values * V).sum(axis=-1)
        lambdas.iloc[row, :] = invA @ Z
    return lambdas
Example #37
0
 def _ortho_poly_fit(self, x, degree=1, dictionary={}):
     """Helper method to fit an orthogonal polynomial in the GLM.  This function
     should generally not be called by end-user and can be hidden."""
     n = degree + 1
     if dictionary != {}:
         x = x.map(dictionary)
     x = np.asarray(x).flatten()
     xbar = np.mean(x)
     x = x - xbar
     X = np.fliplr(np.vander(x, n))
     q, r = np.linalg.qr(X)
     z = np.diag(np.diag(r))
     raw = np.dot(q, z)
     norm2 = np.sum(raw ** 2, axis=0)
     alpha = (np.sum((raw ** 2) * np.reshape(x, (-1, 1)), axis=0) / norm2 + xbar)[
         :degree
     ]
     Z = raw / np.sqrt(norm2)
     return Z, norm2, alpha
Example #38
0
def get_rmat_vand(krs, grid_r):
    """
    Given uniform grid of n  ranges and column vector of m 
    wavenumbers krs, compute "rmat" --> rmat_{mn} = e^{jk_{m}r_{n}}
    Doesn't compute the range spreading and attenuation terms
    Use the fact that you can factor out the first column of rmat,
    and the remaining matrix is vandermonde.
    Python is quick with the vandermonde
    """
    #r_mat= np.outer(krs, r-r_corr[index])
    n = grid_r.size
    m = krs.size
    r0 = grid_r[0]
    dr = grid_r[1] - grid_r[0]
    offset = np.exp(complex(0,1)*krs.real*r0).reshape((m,1)).astype(np.complex128)
    base = np.exp(complex(0,1)*krs.real*dr).reshape(m).astype(np.complex128)
    exp_mat = np.vander(base, N=n, increasing=True)
    rmat = exp_mat * offset
    return rmat
 def fit(self, train_X, train_Y):
     """ This method fits the model parameters, given the training inputs and outputs.
     parameters = (X^{T}X)^{-1}X^{T}Y
     Args:
     - train_X (ndarray (shape: (N, 1))): A N-column vector consisting N scalar training inputs.
     - train_Y (ndarray (shape: (N, 1))): A N-column vector consisting N scalar training outputs.
     """
     assert train_X.shape == train_Y.shape and train_X.shape == (
         train_X.shape[0], 1
     ), f"input and/or output has incorrect shape (train_X: {train_X.shape}, train_Y: {train_Y.shape})."
     assert train_X.shape[
         0] >= self.K, f"require more data points to fit a polynomial (train_X: {train_X.shape}, K: {self.K})."
     print("X is ")
     print(train_X.flatten())
     v = np.vander(train_X.flatten(), self.K + 1, increasing=True)
     p1 = np.linalg.inv(v.T @ v)
     p2 = v.T @ train_Y
     self.parameters = p1 @ p2
     assert self.parameters.shape == (self.K + 1, 1)
Example #40
0
def fit_continuum(x, y, ivars, order=6, nsigma=[0.3, 3.0], maxniter=50):
    """Fit the continuum using sigma clipping

    Args:
        x: The wavelengths
        y: The log-fluxes
        order: The polynomial order to use
        nsigma: The sigma clipping threshold: tuple (low, high)
        maxniter: The maximum number of iterations to do

    Returns:
        The value of the continuum at the wavelengths in x

    """
    A = np.vander(x - np.nanmean(x), order + 1)
    m = np.ones(len(x), dtype=bool)
    for i in range(maxniter):
        m[ivars == 0] = 0  # mask out the bad pixels
        #w = np.linalg.solve(np.dot(A[m].T, A[m]), np.dot(A[m].T, y[m]))

        ###for diagnosis of numpy.linalg.LinAlgError: Singular matrix
        try:
            w = np.linalg.solve(np.dot(A[m].T, A[m]), np.dot(A[m].T, y[m]))

            mu = np.dot(A, w)
            resid = y - mu
            sigma = np.sqrt(np.nanmedian(resid**2))
            #m_new = np.abs(resid) < nsigma*sigma
            m_new = (resid > -nsigma[0] * sigma) & (resid < nsigma[1] * sigma)
            if m.sum() == m_new.sum():
                m = m_new
                break
            m = m_new
        except:
            print("Unexpected error:", sys.exc_info()[0])
            print(A, y)
            print(A.shape, y.shape)
            print("unsing mask m:")
            print(A[m], y[m])
            print(A[m].shape, y[m].shape)
            raise Exception("Error during continnuum normalization: ",
                            sys.exc_info()[0], "Dropping this order")
    return mu
    def fit(self, x_data, y_data):
        """
        Fit the estimator to the 1D data.
        """
        # Create a Vandermode matrix
        X_data = np.vander(x_data, N=self.degree + 1, increasing=True)

        # The left hand side and right hand side of the equation for ridge
        # regularized least squares. Computing X.T * X is the expensive part
        # Could have used np.linalg.solve
        lhs = np.dot(X_data.T, X_data) + np.eye(self.degree + 1) * self.alpha
        rhs = np.dot(X_data.T, y_data)

        # Solve the linear equation. np.linalg.solve is faster than
        # np.linalg.lstsq, but we assume that the system of equations has
        # a unique solution here. I.e. not under- or overdetermined.
        w = np.linalg.solve(lhs, rhs)
        self.w_ = w
        return self
Example #42
0
    def set_poly_model(self, scale=2, num_terms=4):
        """Set the polynomial model parameters. 
        
        The polynomial model is used to capture long term trends in the data
        believed to be signal and not background noise (such as supernova lightcurves). This method is essentially 
        calling the ``numpy.vander()`` method.

        Args:
            scale (Optional[float]): Scales the input vector to pass to ``numpy.vander``.
                The larger this value, the more flexibility the polynomial model will have for a given number of powers.
            num_terms (Optional[int]): Specify the number of "powers" to use in the polynomial model.
                As the first power is the intercept, the highest power is ``num_terms - 1``. 

        """
        self.scale = scale
        self.input_vector = scale * self.normalized_time
        self.num_terms = num_terms  # With intercept
        self.m = np.vander(self.input_vector, N=num_terms,
                           increasing=False)  # With intercept
Example #43
0
def toom_cook_mats_w_pts(r, n, pts):
    assert (len(pts) == r + n - 1)
    use_infty = np.infty in pts
    if (use_infty and pts[-1] != np.infty):
        raise Exception('np.infty must be the last node choice')

    V = np.vander(pts, increasing=True)
    if (use_infty):
        V[-1, :] = 0
        V[-1, -1] = 1

    A = V[:, :r].copy()
    B = V[:, :n].copy()
    if (use_infty):
        A[-1, -1] = 1
        B[-1, -1] = 1

    C = la.inv(V)
    return [A.T, B.T, C]
Example #44
0
def fit_gravity(data, deg=2, **kwargs):
    """Polynomial fit of the gravity values.

    """

    # endog
    endog = np.asarray(data.g)

    # design_matrix
    exog = np.vander(data.level.values, N=deg + 1, increasing=True)[:, 1:]

    # rename unknowns
    poly_cnames = [x for x in ascii_lowercase[:deg]]
    exog = pd.DataFrame(exog, columns=poly_cnames)

    # fit
    results = WLS(endog, exog, **kwargs).fit()

    return results
Example #45
0
    def testPatchify(self):
        from lazyflow.operators.opDetectMissingData import _patchify as patchify

        X = np.vander(np.arange(2, 5))
        ''' results in 
        X = array([[ 4,  2,  1],
                   [ 9,  3,  1],
                   [16,  4,  1]])
        '''
        (patches, slices) = patchify(X, 1, 1)

        expected = [np.array([[4,2],[9,3]]), \
                    np.array([[4,2,1],[9,3,1]]), \
                    np.array([[2,1],[3,1]]), \
                    np.array([[4,2],[9,3],[16,4]]), \
                    np.array([[4,2,1],[9,3,1],[16,4,1]]), \
                    np.array([[2,1],[3,1],[4,1]]), \
                    np.array([[9,3],[16,4]]), \
                    np.array([[9,3,1],[16,4,1]]), \
                    np.array([[3,1],[4,1]])]

        expSlices = [(slice(0,1),slice(0,1)), \
                  (slice(0,1), slice(1,2)), \
                  (slice(0,1), slice(2,3)), \
                  (slice(1,2), slice(0,1)), \
                  (slice(1,2), slice(1,2)), \
                  (slice(1,2), slice(2,3)), \
                  (slice(2,3), slice(0,1)), \
                  (slice(2,3), slice(1,2)), \
                  (slice(2,3), slice(2,3))]

        for ep, s in zip(expected, expSlices):
            #check if patch is in the result
            has = False
            for i, p in enumerate(patches):
                if np.all(p == ep):
                    has = True
                    # check if slice is ok
                    self.assertEqual(s, slices[i])

            assert has, "Mising patch {}".format(ep)
            pass
Example #46
0
    def __init__(self, t, A, order=3, maxn=32, poly_order=3, max_decay=10):
        self.order = order
        self.maxn = min(A.shape[1], maxn)

        # First order
        A = np.array(A[:, np.argsort(np.median(A, axis=0))[::-1]])
        A -= np.mean(A, axis=0)[None, :]

        # Higher order blocks
        blocks = [A]
        for order in range(2, self.order + 1):
            A2 = np.product(list(
                combinations_with_replacement(A[:, :maxn].T, order)),
                            axis=1).T
            U, S, V = np.linalg.svd(A2 - np.mean(A2, axis=0),
                                    full_matrices=True)
            block = U[:, :maxn] - np.mean(U[:, :maxn], axis=0)[None, :]
            blocks.append(block)

        # Polynimial block
        tt = 2 * (t - t.min()) / (t.max() - t.min()) - 1
        blocks.append(np.vander(tt, poly_order + 1))

        # Initial decay block
        dt = t - t.min()
        decay = np.exp(-dt[:, None] /
                       np.arange(1, max_decay + 1, 1.0)[None, :])
        blocks.append(decay)

        # Combine the blocks.
        # block_inds tracks the beginning and end of each block in columns of A
        self.A = np.concatenate(blocks, axis=1)
        self.block_sizes = np.array([block.shape[1] for block in blocks])
        block_inds = np.append(0, np.cumsum(self.block_sizes))
        self.block_inds = list(zip(block_inds[:-1], block_inds[1:]))
        self.nblocks = len(self.block_sizes)

        params = dict(
            ("log_lambda_{0}".format(i), 0.0) for i in range(self.nblocks))
        self.parameter_names = tuple(sorted(params.keys()))
        params["bounds"] = [(-5.0, 5.0) for i in range(self.nblocks)]
        super(PLDModel, self).__init__(**params)
Example #47
0
    def __init__(self, L, order):
        assert L.ref_el == FIAT.ufc_simplex(1)

        for ell in L.dual.nodes:
            assert isinstance(ell, FIAT.functional.PointEvaluation)

        c = numpy.asarray([list(ell.pt_dict.keys())[0][0]
                           for ell in L.dual.nodes])

        num_stages = len(c)

        Q = FIAT.make_quadrature(L.ref_el, 2*num_stages)
        qpts = Q.get_points()
        qwts = Q.get_weights()

        Lvals = L.tabulate(0, qpts)[0, ]

        # integrates them all!
        b = Lvals @ qwts

        # now for A, which means we have to adjust the interval
        A = numpy.zeros((num_stages, num_stages))
        for i in range(num_stages):
            qpts_i = qpts * c[i]
            qwts_i = qwts * c[i]
            Lvals_i = L.tabulate(0, qpts_i)[0, ]
            A[i, :] = Lvals_i @ qwts_i

        Aexplicit = numpy.zeros((num_stages, num_stages))
        for i in range(num_stages):
            qpts_i = 1 + qpts * c[i]
            qwts_i = qwts * c[i]
            Lvals_i = L.tabulate(0, qpts_i)[0, ]
            Aexplicit[i, :] = Lvals_i @ qwts_i

        self.Aexplicit = Aexplicit

        V = vander(c, increasing=True)
        rhs = numpy.array([1.0/(s+1) for s in range(num_stages-1)] + [0])
        btilde = numpy.linalg.solve(V.T, rhs)

        super(CollocationButcherTableau, self).__init__(A, b, btilde, c, order)
def handle_S_m_degeneracy(rhoStart, which_index=1):
    """
    here, which_index is such that:
    which_index = 0 returns NESS corresponding to M = 2
    which_index = 1 returns NESS corresponding to M = 0
    which_index = 2 returns NESS corresponding to M = -2
    which_index = 3 returns NESS corresponding to M = 4
    which_index = 4 returns NESS corresponding to M = -4

    """
    rhoTemp = rhoStart
    for i in range(num_qubits):
        for j in range(num_qubits):
            if i == j:
                continue
            else:
                rhoTempTemp = rhoTemp - S_m @ rhoStart @ S_m.conj().T
                factor = 1 - np.exp(1j * (S_m_eigvals[i] - S_m_eigvals[j]))
                rhoTemp = rhoTemp - (1 / factor) * rhoTempTemp

    rhoPhys = rhoTemp
    vanderMat = np.vander(S_m_eigvals, increasing=True).T
    vanderMatInv = scp.linalg.inv(vanderMat)

    matrices = []
    for i in range(len(S_m_eigvals)):
        factor = np.linalg.matrix_power(S_m, i)
        matrices.append(factor @ rhoPhys)
    matrices = np.array(matrices)

    index = which_index
    vanderMatVec = vanderMatInv[index]
    last = vanderMatVec[-1] * matrices[-1]
    for j in range(len(matrices) - 1):
        last += vanderMatVec[j] * matrices[j]

    last = last / np.trace(last)

    last_dot = pp.evaluate_rho_dot(last, hamiltonian, gammas, L_terms)
    print(np.max(np.max(last_dot)))

    return last
def reset_ramsey(res, degree=5):
    """Ramsey's RESET specification test for linear models

    This is a general specification test, for additional non-linear effects
    in a model.

    Parameters
    ----------
    degree : int
        Maximum power to include in the RESET test.  Powers 0 and 1 are
        excluded, so that degree tests powers 2, ..., degree of the fitted
        values.

    Notes
    -----
    The test fits an auxiliary OLS regression where the design matrix, exog,
    is augmented by powers 2 to degree of the fitted values. Then it performs
    an F-test whether these additional terms are significant.

    If the p-value of the f-test is below a threshold, e.g. 0.1, then this
    indicates that there might be additional non-linear effects in the model
    and that the linear model is mis-specified.

    References
    ----------
    https://en.wikipedia.org/wiki/Ramsey_RESET_test
    """
    order = degree + 1
    k_vars = res.model.exog.shape[1]
    # vander without constant and x, and drop constant
    norm_values = np.asarray(res.fittedvalues)
    norm_values = norm_values / np.sqrt((norm_values**2).mean())
    y_fitted_vander = np.vander(norm_values, order)[:, :-2]
    exog = np.column_stack((res.model.exog, y_fitted_vander))
    exog /= np.sqrt((exog**2).mean(0))
    endog = res.model.endog / (res.model.endog**2).mean()
    res_aux = OLS(endog, exog).fit()
    # r_matrix = np.eye(degree, exog.shape[1], k_vars)
    r_matrix = np.eye(degree - 1, exog.shape[1], k_vars)
    # df1 = degree - 1
    # df2 = exog.shape[0] - degree - res.df_model  (without constant)
    return res_aux.f_test(r_matrix)  # , r_matrix, res_aux
    def fit_with_l2_regularization(self, train_X, train_Y, l2_coef):
        """ This method fits the model parameters with L2 regularization, given the training inputs and outputs.

        parameters = (X^{T}X + lambda*I)^{-1}X^{T}Y

        Args:
        - train_X (ndarray (shape: (N, 1))): A N-column vector consisting N scalar training inputs.
        - train_Y (ndarray (shape: (N, 1))): A N-column vector consisting N scalar training outputs.
        - l2_coef (float): The lambda term that decides how much regularization we want.
        """
        assert train_X.shape == train_Y.shape and train_X.shape == (
            train_X.shape[0], 1
        ), f"input and/or output has incorrect shape (train_X: {train_X.shape}, train_Y: {train_Y.shape})."

        v = np.vander(train_X.flatten(), self.K + 1, increasing=True)
        p1 = np.linalg.inv(v.T @ v + np.dot(l2_coef, np.identity(self.K + 1)))
        p2 = v.T @ train_Y
        self.parameters = p1 @ p2

        assert self.parameters.shape == (self.K + 1, 1)
Example #51
0
def albrecht_6():
    # The values are solutions of
    # 11025*x^3 - 19020*x^2 + 9370*x - 1212 = 0
    sigma2 = roots([11025, -19020, 9370, -1212])
    A = numpy.vander(sigma2, increasing=True).T
    b = numpy.array(
        [frac(1432433, 18849024),
         frac(1075, 31104),
         frac(521, 25920)])
    B = linear_solve(A, b)

    B0 = frac(2615, 43632)
    C = frac(16807, 933120)

    d = {
        "zero2": [[B0]],
        "d10.0": [B, sqrt(sigma2)],
        "d10.1": [[C], [sqrt(frac(6, 7))]],
    }
    return S2Scheme("Albrecht 6", d, 13, _source)
Example #52
0
    def __init__(self, num_stages):
        assert num_stages > 1
        # mooch the b and c from IIIA
        IIIA = LobattoIIIA(num_stages)
        b = IIIA.b
        c = IIIA.c

        A = numpy.zeros((num_stages, num_stages))
        for i in range(num_stages):
            A[i, 0] = b[0]
        for j in range(num_stages):
            A[-1, j] = b[j]

        mat = numpy.vander(c[1:], increasing=True).T
        for i in range(num_stages-1):
            rhs = numpy.array([(c[i]**(k+1))/(k+1) - b[0] * c[0]**k
                               for k in range(num_stages-1)])
            A[i, 1:] = numpy.linalg.solve(mat, rhs)

        super(LobattoIIIC, self).__init__(A, b, None, c, 2 * num_stages - 2)
def c_matrix_vander(n):
    """
    C_n matrix used to generate the non-Clifford gate sets, generated as a
    Vandermonde matrix.
    for 4*n
    C_n := [[exp(+i*2pi/n), 0],
            [0, exp(-i*2pi/n)]]
    Args:
        n (int): A number

    Returns:
        ndarray: A block matrix of C_n matrices.
    """

    exp = np.exp(1j*np.pi/n)
    exp_n = np.array([[exp, 0], [0, exp.conj()]], dtype=complex)
    c_matrix = np.vander(exp_n.ravel(), n,
                         increasing=True)[:, 1:].swapaxes(0, 1).reshape(n-1,
                                                                        2, 2)
    return np.concatenate(([II], c_matrix))
Example #54
0
    def fit(self, X, y=None):
        degree = self.degree
        assert len(X.shape) == 1, X.shape
        assert degree < len(
            np.unique(X)), "'degree' must be less than number of unique points"
        assert degree > 1, degree
        xbar = np.mean(X)
        X = X - xbar
        X = np.fliplr(np.vander(X, degree + 1))
        q, r = np.linalg.qr(X)

        z = np.diag(np.diag(r))
        raw = np.dot(q, z)

        norm2 = np.sum(raw**2, axis=0)
        print X.shape
        print raw.shape
        alpha = (np.sum((raw**2) * X, axis=0) / norm2 + xbar)[:degree]
        self.norm2_, self.alpha_ = norm2, alpha
        return self
Example #55
0
    def _gram_matrix(self):
        integral_coefs = np.polyint(np.ones(2 * self.n_basis - 1))

        # We obtain the powers of both extremes in the domain range
        power_domain_limits = np.vander(self.domain_range[0], 2 * self.n_basis)

        # Subtract the powers (Barrow's rule)
        power_domain_limits_diff = (power_domain_limits[1] -
                                    power_domain_limits[0])

        # Multiply the constants that appear in the integration
        evaluated_points = integral_coefs * power_domain_limits_diff

        # Order the powers, lower to higher, discarding the constant
        # (it does not appear in the integral)
        ordered_evaluated_points = evaluated_points[-2::-1]

        # Build the matrix
        return scipy.linalg.hankel(ordered_evaluated_points[:self.n_basis],
                                   ordered_evaluated_points[self.n_basis - 1:])
Example #56
0
def iter_polynom(y, deg=3, max_it=100, tol=1e-3):
    '''Computes the baseline of a given data.
    Iteratively performs a polynomial fitting in the data to detect its
    baseline. At every iteration, the fitting weights on the regions with
    peaks is reduced to identify the baseline only.
    Parameters
    ----------
    y : ndarray
        Data to detect the baseline.
    deg : int
        Degree of the polynomial that will estimate the data baseline. A low
        degree may fail to detect all the baseline present, while a high
        degree may make the data too oscillatory, especially at the edges.
    max_it : int
        Maximum number of iterations to perform.
    tol : float
        Tolerance to use when comparing the difference between the current
        fit coefficient and the ones from the last iteration. The iteration
        procedure will stop when the difference between them is lower than
        *tol*.
    Returns
    -------
    ndarray
        Array with the baseline amplitude for every original point in *y*
    '''
    order = deg+1
    coeffs = np.ones(order)
    # try to avoid numerical issues
    cond = pow(y.max(), 1./order)
    x = np.linspace(0., cond, y.size)
    base = y.copy()
    vander = np.vander(x, order)
    vander_pinv = pinv2(vander)
    for _ in range(max_it):
        coeffs_new = np.dot(vander_pinv, y)
        if norm(coeffs_new-coeffs) / norm(coeffs) < tol:
            break
        coeffs = coeffs_new
        base = np.dot(vander, coeffs)
        y = np.minimum(y, base)
    return base
Example #57
0
def test_grad_log_likelihood(kernel, with_general, seed=42, eps=1.34e-7):
    np.random.seed(seed)
    x = np.sort(np.random.rand(100))
    yerr = np.random.uniform(0.1, 0.5, len(x))
    y = np.sin(x)

    if with_general:
        U = np.vander(x - np.mean(x), 4).T
        V = U * np.random.rand(4)[:, None]
        A = np.sum(U * V, axis=0) + 1e-8
    else:
        A = np.empty(0)
        U = np.empty((0, 0))
        V = np.empty((0, 0))

    if not terms.HAS_AUTOGRAD:
        gp = GP(kernel)
        gp.compute(x, yerr, A=A, U=U, V=V)
        with pytest.raises(ImportError):
            _, grad = gp.grad_log_likelihood(y)
        return

    for fit_mean in [True, False]:
        gp = GP(kernel, fit_mean=fit_mean)
        gp.compute(x, yerr, A=A, U=U, V=V)
        _, grad = gp.grad_log_likelihood(y)
        grad0 = np.empty_like(grad)

        v = gp.get_parameter_vector()
        for i, pval in enumerate(v):
            v[i] = pval + eps
            gp.set_parameter_vector(v)
            ll = gp.log_likelihood(y)

            v[i] = pval - eps
            gp.set_parameter_vector(v)
            ll -= gp.log_likelihood(y)

            grad0[i] = 0.5 * ll / eps
            v[i] = pval
        assert np.allclose(grad, grad0)
Example #58
0
def lsa():  #Define least square approximation
    n = input(
        'What order least square approximation polynomial do you want?: ')
    points = []
    yvals = []
    xvals = []
    start = time.time()
    for i in np.arange(-5., 5., (10. / (n))):  #make points
        points.append([i, 1. / (1. + i**2.)])
        yvals.append(1. / (1. + i**2.))
        xvals.append(i)
    points.append([5., 1. / (1. + 5.**2.)])
    yvals.append(1. / (1. + 5.**2.))
    xvals.append(5.)
    yvals = np.array(yvals)  #Make arrays
    xvals = np.array(xvals)  #Make arrays
    A = np.vander(xvals, n)  #Make matrix
    # find the x that minimizes the norm of Ax-y
    (coeffs, residuals, rank, sing_vals) = np.linalg.lstsq(A, yvals)
    # create a polynomial using coefficients
    f = np.poly1d(coeffs)  #Make polynomial
    end = time.time()
    print "The least square approximation is: " + str(f)
    print "It took  " + str(end - start) + " seconds to calculate"
    ss_err = (residuals**2).sum()  #Find R^2
    ss_tot = ((yvals - yvals.mean())**2).sum()
    rsquared = 1 - (ss_err / ss_tot)
    print("The rsquared value is: " + str(rsquared))
    x = np.arange(-5., 5.,
                  (10. / (n)))  #Plot function along with original function
    xlot = np.linspace(-5, 5, 1000)
    y = f(xlot)
    z = 1 / (1 + xlot**2)
    zpoint = 1 / (1 + x**2)
    pyplot.plot(xlot, y, label="Least Square Polynomial")
    pyplot.plot(x, zpoint, 'co')
    pyplot.plot(xlot, z, label="1/(1+x^2)")
    pyplot.legend()
    pyplot.ylim(0, 2)
    pyplot.title(str(n) + "th Degree Least Squares Polynomial")
    pyplot.show()
Example #59
0
    def predict(self, x):
        """ Return the mass of a heavy-light meson in MeV.

        Note that the effective formula are originally organized as functions
        of :math:`z = m_l/ (0.4 m_s)`, but the interface of function uses
        :math:`x = m_l/m_s` as input.

        Parameters:
            x : array-like or scalar
                Ratio of the light quark mass to the strange quark mass,
                :math:`x = m_l/m_s`.

            par : an instance of class `par`
                The class of coefficients of polynomial expansion in
                powers of (0.4 times) :math:`x`.

        Returns:
            mass : gvar
                The mass(es) of the meson at given `x` as a gvar object.
                To get the mean value of the meson mass(es) use `gv.gvar(mass)`
                and to get its one-sigma uncertainty use `gv.sdev(mass)`.
        """

        if np.ndim(x) == 1:
            x_ = x
        elif np.ndim(x) == 0:
            x_ = [x]
        else:
            raise Exception("x is expected to be a scalar or 1-dim array like")

        z = 2.5 * np.array(x_)  # converting from `s` units to `p4s` units

        par = self.param
        Z = np.vander(z, par.degree+1, increasing=True)
        mean = np.sum(Z * par.mean, axis=1)
        cov = Z @ par.cov @ Z.transpose()

        if np.ndim(x) == 0:
            return gv.gvar(mean[0], cov[0, 0]**0.5)  # gv.gvar(mean, sigma)
        else:
            return gv.gvar(mean, cov)  # gv.gvar(mean, covariance_matrix)
Example #60
0
def _line_flatten_tilt(img, mask, degree):
    """
    Estimate tilt using the line flatten method.

    Parameters
    ----------
    img : ndarray
        The image from which the tilt is estimated.
    mask : ndarray, or None
        If not None, a bool ndarray of the the shape as `img` indicating which
        pixels should be used in estimate of tilt.
    degree : int
        The degree of the polynomial in the estimated line tilt.

    Returns
    -------
    tilt : ndarray
        The estimated tilt.

    """

    m, n = img.shape
    x = np.arange(n)

    # Shapes of matrices used in the detilting:
    # vander.shape=(n, degree+1), coef.shape=(degree+1, len(m_masked))
    vander = np.fliplr(np.vander(x, degree + 1))  # [1, x, x**2, ...]

    if mask is not None:
        tilt = np.zeros_like(img)
        for l in range(m):
            if mask[l, :].sum() >= degree + 1:  # Skip if underdetermined
                coef, res, rank, s = np.linalg.lstsq(vander[mask[l, :]],
                                                     img[l, mask[l, :]])
                tilt[l, :] = vander.dot(coef).T

    else:
        coef, res, rank, s = np.linalg.lstsq(vander, img.T)
        tilt = vander.dot(coef).T

    return tilt