Example #1
0
def _plot_error(neval, err_dic, plot_error):
    if plot_error:
        plt.figure(0)
        for name in err_dic:
            plt.loglog(neval, err_dic[name], label=name)

        plt.xlabel('number of function evaluations')
        plt.ylabel('error')
        plt.legend()
Example #2
0
    def plotecdf(self, symb1="r-", symb2="b."):
        """  Plot Empirical and fitted Cumulative Distribution Function

        The purpose of the plot is to graphically assess whether
        the data could come from the fitted distribution.
        If so the empirical CDF should resemble the model CDF.
        Other distribution types will introduce deviations in the plot.
        """
        n = len(self.data)
        F = (arange(1, n + 1)) / n
        plotbackend.plot(self.data, F, symb2, self.data, self.cdf(self.data), symb1)
        plotbackend.xlabel("x")
        plotbackend.ylabel("F(x) (%s)" % self.dist.name)
        plotbackend.title("Empirical CDF plot")
Example #3
0
    def plotesf(self, symb1="r-", symb2="b."):
        """  Plot Empirical and fitted Survival Function

        The purpose of the plot is to graphically assess whether
        the data could come from the fitted distribution.
        If so the empirical CDF should resemble the model CDF.
        Other distribution types will introduce deviations in the plot.
        """
        n = len(self.data)
        SF = (arange(n, 0, -1)) / n
        plotbackend.semilogy(self.data, SF, symb2, self.data, self.sf(self.data), symb1)
        # plotbackend.plot(self.data,SF,'b.',self.data,self.sf(self.data),'r-')
        plotbackend.xlabel("x")
        plotbackend.ylabel("F(x) (%s)" % self.dist.name)
        plotbackend.title("Empirical SF plot")
Example #4
0
    def plotecdf(self, symb1='r-', symb2='b.'):
        '''  Plot Empirical and fitted Cumulative Distribution Function

        The purpose of the plot is to graphically assess whether
        the data could come from the fitted distribution.
        If so the empirical CDF should resemble the model CDF.
        Other distribution types will introduce deviations in the plot.
        '''
        n = len(self.data)
        F = (arange(1, n + 1)) / n
        plotbackend.plot(self.data, F, symb2,
                         self.data, self.cdf(self.data), symb1)
        plotbackend.xlabel('x')
        plotbackend.ylabel('F(x) (%s)' % self.dist.name)
        plotbackend.title('Empirical CDF plot')
Example #5
0
    def plotesf(self, symb1='r-', symb2='b.'):
        '''  Plot Empirical and fitted Survival Function

        The purpose of the plot is to graphically assess whether
        the data could come from the fitted distribution.
        If so the empirical CDF should resemble the model CDF.
        Other distribution types will introduce deviations in the plot.
        '''
        n = len(self.data)
        SF = (arange(n, 0, -1)) / n
        plotbackend.semilogy(
            self.data, SF, symb2, self.data, self.sf(self.data), symb1)
        # plotbackend.plot(self.data,SF,'b.',self.data,self.sf(self.data),'r-')
        plotbackend.xlabel('x')
        plotbackend.ylabel('F(x) (%s)' % self.dist.name)
        plotbackend.title('Empirical SF plot')
Example #6
0
    def plotresq(self, symb1='r-', symb2='b.'):
        '''PLOTRESQ displays a residual quantile plot.

        The purpose of the plot is to graphically assess whether
        the data could come from the fitted distribution. If so the
        plot will be linear. Other distribution types will introduce
        curvature in the plot.
        '''
        n = len(self.data)
        eprob = (arange(1, n + 1) - 0.5) / n
        y = self.ppf(eprob)
        y1 = self.data[[0, -1]]
        plotbackend.plot(self.data, y, symb2, y1, y1, symb1)
        plotbackend.xlabel('Empirical')
        plotbackend.ylabel('Model (%s)' % self.dist.name)
        plotbackend.title('Residual Quantile Plot')
        plotbackend.axis('tight')
        plotbackend.axis('equal')
Example #7
0
    def plotepdf(self, symb1="r-", symb2="b-"):
        """Plot Empirical and fitted Probability Density Function

        The purpose of the plot is to graphically assess whether
        the data could come from the fitted distribution.
        If so the histogram should resemble the model density.
        Other distribution types will introduce deviations in the plot.
        """
        x, pdf = self._get_empirical_pdf()
        ymax = pdf.max()
        # plotbackend.hist(self.data,normed=True,fill=False)
        plotbackend.plot(self.data, self.pdf(self.data), symb1, x, pdf, symb2)
        ax = list(plotbackend.axis())
        ax[3] = min(ymax * 1.3, ax[3])
        plotbackend.axis(ax)
        plotbackend.xlabel("x")
        plotbackend.ylabel("f(x) (%s)" % self.dist.name)
        plotbackend.title("Density plot")
Example #8
0
    def plotresq(self, symb1="r-", symb2="b."):
        """PLOTRESQ displays a residual quantile plot.

        The purpose of the plot is to graphically assess whether
        the data could come from the fitted distribution. If so the
        plot will be linear. Other distribution types will introduce
        curvature in the plot.
        """
        n = len(self.data)
        eprob = (arange(1, n + 1) - 0.5) / n
        y = self.ppf(eprob)
        y1 = self.data[[0, -1]]
        plotbackend.plot(self.data, y, symb2, y1, y1, symb1)
        plotbackend.xlabel("Empirical")
        plotbackend.ylabel("Model (%s)" % self.dist.name)
        plotbackend.title("Residual Quantile Plot")
        plotbackend.axis("tight")
        plotbackend.axis("equal")
Example #9
0
    def plotepdf(self, symb1='r-', symb2='b-'):
        '''Plot Empirical and fitted Probability Density Function

        The purpose of the plot is to graphically assess whether
        the data could come from the fitted distribution.
        If so the histogram should resemble the model density.
        Other distribution types will introduce deviations in the plot.
        '''
        x, pdf = self._get_empirical_pdf()
        ymax = pdf.max()
        # plotbackend.hist(self.data,normed=True,fill=False)
        plotbackend.plot(self.data, self.pdf(self.data), symb1,
                         x, pdf, symb2)
        ax = list(plotbackend.axis())
        ax[3] = min(ymax * 1.3, ax[3])
        plotbackend.axis(ax)
        plotbackend.xlabel('x')
        plotbackend.ylabel('f(x) (%s)' % self.dist.name)
        plotbackend.title('Density plot')
Example #10
0
    def plotresprb(self, symb1="r-", symb2="b."):
        """ PLOTRESPRB displays a residual probability plot.

        The purpose of the plot is to graphically assess whether
        the data could come from the fitted distribution. If so the
        plot will be linear. Other distribution types will introduce curvature
        in the plot.
        """
        n = len(self.data)
        # ecdf = (0.5:n-0.5)/n;
        ecdf = arange(1, n + 1) / (n + 1)
        mcdf = self.cdf(self.data)
        p1 = [0, 1]
        plotbackend.plot(ecdf, mcdf, symb2, p1, p1, symb1)
        plotbackend.xlabel("Empirical")
        plotbackend.ylabel("Model (%s)" % self.dist.name)
        plotbackend.title("Residual Probability Plot")
        plotbackend.axis("equal")
        plotbackend.axis([0, 1, 0, 1])
Example #11
0
    def plotresprb(self, symb1='r-', symb2='b.'):
        ''' PLOTRESPRB displays a residual probability plot.

        The purpose of the plot is to graphically assess whether
        the data could come from the fitted distribution. If so the
        plot will be linear. Other distribution types will introduce curvature
        in the plot.
        '''
        n = len(self.data)
        # ecdf = (0.5:n-0.5)/n;
        ecdf = arange(1, n + 1) / (n + 1)
        mcdf = self.cdf(self.data)
        p1 = [0, 1]
        plotbackend.plot(ecdf, mcdf, symb2,
                         p1, p1, symb1)
        plotbackend.xlabel('Empirical')
        plotbackend.ylabel('Model (%s)' % self.dist.name)
        plotbackend.title('Residual Probability Plot')
        plotbackend.axis('equal')
        plotbackend.axis([0, 1, 0, 1])
Example #12
0
def demo_tide_filter():
    """
    Examples
    --------
    >>> demo_tide_filter()

    >>> plt.close()
    """
    # import statsmodels.api as sa
    import wafo.spectrum.models as sm
    sd = 10
    Sj = sm.Jonswap(Hm0=4. * sd)
    S = Sj.tospecdata()

    q = (0.1 * sd)**2  # variance of process noise s the car operates
    r = (100 * sd)**2  # variance of measurement error
    b = 0  # no system input
    u = 0  # no system input

    from scipy.signal import butter, filtfilt, lfilter_zi  # lfilter,
    freq_tide = 1. / (12 * 60 * 60)
    freq_wave = 1. / 10
    freq_filt = freq_wave / 10
    dt = 1.
    freq = 1. / dt
    fn = (freq / 2)

    P = 10 * np.diag([1, 0.01])
    R = r
    H = np.atleast_2d([1, 0])

    F = np.atleast_2d([[0, 1], [0, 0]])
    A, Q = lti_disc(F, L=None, Q=np.diag([0, q]), dt=dt)

    t = np.arange(0, 60 * 12, 1. / freq)
    w = 2 * np.pi * freq  # 1 Hz
    tide = 100 * np.sin(freq_tide * w * t + 2 * np.pi / 4) + 100
    y = tide + S.sim(len(t), dt=1. / freq)[:, 1].ravel()
    #     lowess = sa.nonparametric.lowess
    #     y2 = lowess(y, t, frac=0.5)[:,1]

    filt = Kalman(R=R, x=np.array([[tide[0]], [0]]), P=P, A=A, Q=Q, H=H, B=b)
    filt2 = Kalman(R=R, x=np.array([[tide[0]], [0]]), P=P, A=A, Q=Q, H=H, B=b)
    # y = tide + 0.5 * np.sin(freq_wave * w * t)
    # Butterworth filter
    b, a = butter(9, (freq_filt / fn), btype='low')
    # y2 = [lowess(y[max(i-60,0):i + 1], t[max(i-60,0):i + 1], frac=.3)[-1,1]
    #    for i in range(len(y))]
    # y2 = [lfilter(b, a, y[:i + 1])[i] for i in range(len(y))]
    # y3 = filtfilt(b, a, y[:16]).tolist() + [filtfilt(b, a, y[:i + 1])[i]
    #    for i in range(16, len(y))]
    # y0 = medfilt(y, 41)
    _zi = lfilter_zi(b, a)
    # y2 = lfilter(b, a, y)#, zi=y[0]*zi)  # standard filter
    y3 = filtfilt(b, a, y)  # filter with phase shift correction
    y4 = []
    y5 = []
    for _i, j in enumerate(y):
        tmp = np.ravel(filt(j, u=u))
        tmp = np.ravel(filt2(tmp[0], u=u))
        #         if i==0:
        #             print(filt.x)
        #             print(filt2.x)
        y4.append(tmp[0])
        y5.append(tmp[1])
    _y0 = medfilt(y4, 41)
    # print(filt.P)
    # plot

    plt.plot(t, y, 'r.-', linewidth=2, label='raw data')
    # plt.plot(t, y2, 'b.-', linewidth=2, label='lowess @ %g Hz' % freq_filt)
    # plt.plot(t, y2, 'b.-', linewidth=2, label='filter @ %g Hz' % freq_filt)
    plt.plot(t, y3, 'g.-', linewidth=2, label='filtfilt @ %g Hz' % freq_filt)
    plt.plot(t, y4, 'k.-', linewidth=2, label='kalman')
    # plt.plot(t, y5, 'k.', linewidth=2, label='kalman2')
    plt.plot(t, tide, 'y-', linewidth=2, label='True tide')
    plt.legend(frameon=False, fontsize=14)
    plt.xlabel("Time [s]")
    plt.ylabel("Amplitude")
Example #13
0
def qdemo(f, a, b, kmax=9, plot_error=False):
    '''
    Compares different quadrature rules.

    Parameters
    ----------
    f : callable
        function
    a,b : scalars
        lower and upper integration limits

    Details
    -------
    qdemo(f,a,b) computes and compares various approximations to
    the integral of f from a to b.  Three approximations are used,
    the composite trapezoid, Simpson's, and Boole's rules, all with
    equal length subintervals.
    In a case like qdemo(exp,0,3) one can see the expected
    convergence rates for each of the three methods.
    In a case like qdemo(sqrt,0,3), the convergence rate is limited
    not by the method, but by the singularity of the integrand.

    Example
    -------
    >>> import numpy as np
    >>> qdemo(np.exp,0,3)
    true value =  19.08553692
     ftn,                Boole,            Chebychev
    evals       approx        error       approx        error
       3, 19.4008539142, 0.3153169910, 19.5061466023, 0.4206096791
       5, 19.0910191534, 0.0054822302, 19.0910191534, 0.0054822302
       9, 19.0856414320, 0.0001045088, 19.0855374134, 0.0000004902
      17, 19.0855386464, 0.0000017232, 19.0855369232, 0.0000000000
      33, 19.0855369505, 0.0000000273, 19.0855369232, 0.0000000000
      65, 19.0855369236, 0.0000000004, 19.0855369232, 0.0000000000
     129, 19.0855369232, 0.0000000000, 19.0855369232, 0.0000000000
     257, 19.0855369232, 0.0000000000, 19.0855369232, 0.0000000000
     513, 19.0855369232, 0.0000000000, 19.0855369232, 0.0000000000
     ftn,      Clenshaw-Curtis,       Gauss-Legendre
    evals       approx        error       approx        error
       3, 19.5061466023, 0.4206096791, 19.0803304585, 0.0052064647
       5, 19.0834145766, 0.0021223465, 19.0855365951, 0.0000003281
       9, 19.0855369150, 0.0000000082, 19.0855369232, 0.0000000000
      17, 19.0855369232, 0.0000000000, 19.0855369232, 0.0000000000
      33, 19.0855369232, 0.0000000000, 19.0855369232, 0.0000000000
      65, 19.0855369232, 0.0000000000, 19.0855369232, 0.0000000000
     129, 19.0855369232, 0.0000000000, 19.0855369232, 0.0000000000
     257, 19.0855369232, 0.0000000000, 19.0855369232, 0.0000000000
     513, 19.0855369232, 0.0000000000, 19.0855369232, 0.0000000000
     ftn,                Simps,                Trapz
    evals       approx        error       approx        error
       3, 19.5061466023, 0.4206096791, 22.5366862979, 3.4511493747
       5, 19.1169646189, 0.0314276957, 19.9718950387, 0.8863581155
       9, 19.0875991312, 0.0020622080, 19.3086731081, 0.2231361849
      17, 19.0856674267, 0.0001305035, 19.1414188470, 0.0558819239
      33, 19.0855451052, 0.0000081821, 19.0995135407, 0.0139766175
      65, 19.0855374350, 0.0000005118, 19.0890314614, 0.0034945382
     129, 19.0855369552, 0.0000000320, 19.0864105817, 0.0008736585
     257, 19.0855369252, 0.0000000020, 19.0857553393, 0.0002184161
     513, 19.0855369233, 0.0000000001, 19.0855915273, 0.0000546041
    '''
    true_val, _tol = intg.quad(f, a, b)
    print('true value = %12.8f' % (true_val,))
    neval = zeros(kmax, dtype=int)
    vals_dic = {}
    err_dic = {}

    # try various approximations
    methods = [trapz, simps, boole, ]

    for k in xrange(kmax):
        n = 2 ** (k + 1) + 1
        neval[k] = n
        x = np.linspace(a, b, n)
        y = f(x)
        for method in methods:
            name = method.__name__.title()
            q = method(y, x)
            vals_dic.setdefault(name, []).append(q)
            err_dic.setdefault(name, []).append(abs(q - true_val))

        name = 'Clenshaw-Curtis'
        q, _ec3 = clencurt(f, a, b, (n - 1) / 2)
        vals_dic.setdefault(name, []).append(q[0])
        err_dic.setdefault(name, []).append(abs(q[0] - true_val))

        name = 'Chebychev'
        ck = np.polynomial.chebyshev.chebfit(x, y, deg=min(n-1, 36))
        cki = np.polynomial.chebyshev.chebint(ck)
        q = np.polynomial.chebyshev.chebval(x[-1], cki)
        vals_dic.setdefault(name, []).append(q)
        err_dic.setdefault(name, []).append(abs(q - true_val))
        # ck = chebfit(f,n,a,b)
        # q  = chebval(b,chebint(ck,a,b),a,b)
        # qc2[k] = q; ec2[k] = abs(q - true)

        name = 'Gauss-Legendre'  # quadrature
        q = intg.fixed_quad(f, a, b, n=n)[0]
        # [x, w]=qrule(n,1)
        # x = (b-a)/2*x + (a+b)/2     % Transform base points X.
        # w = (b-a)/2*w               % Adjust weigths.
        # q = sum(feval(f,x)*w)
        vals_dic.setdefault(name, []).append(q)
        err_dic.setdefault(name, []).append(abs(q - true_val))

    # display results
    names = sorted(vals_dic.keys())
    num_cols = 2
    formats = ['%4.0f, ', ] + ['%10.10f, ', ] * num_cols * 2
    formats[-1] = formats[-1].split(',')[0]
    formats_h = ['%4s, ', ] + ['%20s, ', ] * num_cols
    formats_h[-1] = formats_h[-1].split(',')[0]
    headers = ['evals'] + ['%12s %12s' % ('approx', 'error')] * num_cols
    while len(names) > 0:
        print(''.join(fi % t for fi, t in zip(formats_h,
                                              ['ftn'] + names[:num_cols])))
        print(' '.join(headers))

        data = [neval]
        for name in names[:num_cols]:
            data.append(vals_dic[name])
            data.append(err_dic[name])
        data = np.vstack(tuple(data)).T
        for k in xrange(kmax):
            tmp = data[k].tolist()
            print(''.join(fi % t for fi, t in zip(formats, tmp)))
        if plot_error:
            plt.figure(0)
            for name in names[:num_cols]:
                plt.loglog(neval, err_dic[name], label=name)

        names = names[num_cols:]
    if plot_error:
        plt.xlabel('number of function evaluations')
        plt.ylabel('error')
        plt.legend()
        plt.show('hold')
Example #14
0
##
# Return values in the Gumbel distribution
plt.clf()
T = np.r_[1:100000]
sT = gum[0] - gum[1] * np.log(-np.log1p(-1./T))
plt.semilogx(T, sT)
plt.hold(True)
# ws.edf(Hs).plot()
Nmax = len(Hs)
N = np.r_[1:Nmax+1]

plt.plot(Nmax/N, sorted(Hs, reverse=True), '.')
plt.title('Return values in the Gumbel model')
plt.xlabel('Return period')
plt.ylabel('Return value')
#wafostamp([],'(ER)')
plt.show()
#disp('Block = 4'),pause(pstate)

## Section 5.2 Generalized Pareto and Extreme Value distributions
## Section 5.2.1 Generalized Extreme Value distribution

# Empirical distribution of significant wave-height with estimated
# Generalized Extreme Value distribution,
gev = ws.genextreme.fit2(Hs)
gev.plotfitsummary()
# wafostamp([],'(ER)')
# disp('Block = 5a'),pause(pstate)

plt.clf()
Example #15
0
#plot(rfc(:, 2), rfc(:, 1), '.')
#wafostamp('', '(ER)')
#hold off
#disp('Block = 9'), pause(pstate)

#! Section 1.4.5 Extreme value statistics
#!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Plot of yura87 data
plt.clf()
import wafo.data as wd
xn = wd.yura87()
#xn = load('yura87.dat');
plt.subplot(211)
plt.plot(xn[::30, 0] / 3600, xn[::30, 1], '.')
plt.title('Water level')
plt.ylabel('(m)')

#! Formation of 5 min maxima
yura = xn[:85500, 1]
yura = np.reshape(yura, (285, 300)).T
maxyura = yura.max(axis=0)
plt.subplot(212)
plt.plot(xn[299:85500:300, 0] / 3600, maxyura, '.')
plt.xlabel('Time (h)')
plt.ylabel('(m)')
plt.title('Maximum 5 min water level')
plt.show()

#! Estimation of GEV for yuramax
plt.clf()
import wafo.stats as ws
Example #16
0
#plot(rfc(:, 2), rfc(:, 1), '.')
#wafostamp('', '(ER)')
#hold off
#disp('Block = 9'), pause(pstate)

#! Section 1.4.5 Extreme value statistics
#!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Plot of yura87 data
plt.clf()
import wafo.data as wd
xn = wd.yura87()
#xn = load('yura87.dat');
plt.subplot(211)
plt.plot(xn[::30, 0] / 3600, xn[::30, 1], '.')
plt.title('Water level')
plt.ylabel('(m)')

#! Formation of 5 min maxima
yura = xn[:85500, 1]
yura = np.reshape(yura, (285, 300)).T
maxyura = yura.max(axis=0)
plt.subplot(212)
plt.plot(xn[299:85500:300, 0] / 3600, maxyura, '.')
plt.xlabel('Time (h)')
plt.ylabel('(m)')
plt.title('Maximum 5 min water level')
plt.show()

#! Estimation of GEV for yuramax
plt.clf()
import wafo.stats as ws
Example #17
0
def qdemo(f, a, b):
    '''
    Compares different quadrature rules.

    Parameters
    ----------
    f : callable
        function
    a,b : scalars
        lower and upper integration limits

    Details
    -------
    qdemo(f,a,b) computes and compares various approximations to
    the integral of f from a to b.  Three approximations are used,
    the composite trapezoid, Simpson's, and Boole's rules, all with
    equal length subintervals.
    In a case like qdemo(exp,0,3) one can see the expected
    convergence rates for each of the three methods.
    In a case like qdemo(sqrt,0,3), the convergence rate is limited
    not by the method, but by the singularity of the integrand.

    Example
    -------
    >>> import numpy as np
    >>> qdemo(np.exp,0,3)
    true value =  19.08553692
     ftn         Trapezoid                  Simpsons                   Booles
    evals    approx       error          approx       error           approx        error
       3, 22.5366862979, 3.4511493747, 19.5061466023, 0.4206096791, 19.4008539142, 0.3153169910
       5, 19.9718950387, 0.8863581155, 19.1169646189, 0.0314276957, 19.0910191534, 0.0054822302
       9, 19.3086731081, 0.2231361849, 19.0875991312, 0.0020622080, 19.0856414320, 0.0001045088
      17, 19.1414188470, 0.0558819239, 19.0856674267, 0.0001305035, 19.0855386464, 0.0000017232
      33, 19.0995135407, 0.0139766175, 19.0855451052, 0.0000081821, 19.0855369505, 0.0000000273
      65, 19.0890314614, 0.0034945382, 19.0855374350, 0.0000005118, 19.0855369236, 0.0000000004
     129, 19.0864105817, 0.0008736585, 19.0855369552, 0.0000000320, 19.0855369232, 0.0000000000
     257, 19.0857553393, 0.0002184161, 19.0855369252, 0.0000000020, 19.0855369232, 0.0000000000
     513, 19.0855915273, 0.0000546041, 19.0855369233, 0.0000000001, 19.0855369232, 0.0000000000
     ftn         Clenshaw                    Chebychev                    Gauss-L
    evals    approx       error          approx       error           approx        error
       3, 19.5061466023, 0.4206096791, 0.0000000000, 1.0000000000, 19.0803304585, 0.0052064647
       5, 19.0834145766, 0.0021223465, 0.0000000000, 1.0000000000, 19.0855365951, 0.0000003281
       9, 19.0855369150, 0.0000000082, 0.0000000000, 1.0000000000, 19.0855369232, 0.0000000000
      17, 19.0855369232, 0.0000000000, 0.0000000000, 1.0000000000, 19.0855369232, 0.0000000000
      33, 19.0855369232, 0.0000000000, 0.0000000000, 1.0000000000, 19.0855369232, 0.0000000000
      65, 19.0855369232, 0.0000000000, 0.0000000000, 1.0000000000, 19.0855369232, 0.0000000000
     129, 19.0855369232, 0.0000000000, 0.0000000000, 1.0000000000, 19.0855369232, 0.0000000000
     257, 19.0855369232, 0.0000000000, 0.0000000000, 1.0000000000, 19.0855369232, 0.0000000000
     513, 19.0855369232, 0.0000000000, 0.0000000000, 1.0000000000, 19.0855369232, 0.0000000000
    
    '''
    # use quad8 with small tolerance to get "true" value
    #true1 = quad8(f,a,b,1e-10)
    #[true tol]= gaussq(f,a,b,1e-12)
    #[true tol] = agakron(f,a,b,1e-13)
    true_val, _tol = intg.quad(f, a, b)
    print('true value = %12.8f' % (true_val,))
    kmax = 9
    neval = zeros(kmax, dtype=int)
    qt = zeros(kmax)
    qs = zeros(kmax)
    qb = zeros(kmax)
    qc = zeros(kmax)
    qc2 = zeros(kmax)
    qg = zeros(kmax)

    et = ones(kmax)
    es = ones(kmax)
    eb = ones(kmax)
    ec = ones(kmax)
    ec2 = ones(kmax)
    ec3 = ones(kmax)
    eg = ones(kmax)
    # try various approximations

    for k in xrange(kmax):
        n = 2 ** (k + 1) + 1
        neval[k] = n
        h = (b - a) / (n - 1)
        x = np.linspace(a, b, n)
        y = f(x)

        # trapezoid approximation
        q = np.trapz(y, x)
        #h*( (y(1)+y(n))/2 + sum(y(2:n-1)) )
        qt[k] = q
        et[k] = abs(q - true_val)
        # Simpson approximation
        q = intg.simps(y, x)
        #(h/3)*( y(1)+y(n) + 4*sum(y(2:2:n-1)) + 2*sum(y(3:2:n-2)) )
        qs[k] = q
        es[k] = abs(q - true_val)
        # Boole's rule
        #q = boole(x,y)
        q = (2 * h / 45) * (7 * (y[0] + y[-1]) + 12 * np.sum(y[2:n - 1:4])
           + 32 * np.sum(y[1:n - 1:2]) + 14 * np.sum(y[4:n - 3:4]))
        qb[k] = q
        eb[k] = abs(q - true_val)

        # Clenshaw-Curtis
        [q, ec3[k]] = clencurt(f, a, b, (n - 1) / 2)
        qc[k] = q
        ec[k] = abs(q - true_val)

        # Chebychev
        #ck = chebfit(f,n,a,b)
        #q  = chebval(b,chebint(ck,a,b),a,b)
        #qc2[k] = q; ec2[k] = abs(q - true)

        # Gauss-Legendre quadrature
        q = intg.fixed_quad(f, a, b, n=n)[0]
        #[x, w]=qrule(n,1)
        #x = (b-a)/2*x + (a+b)/2     % Transform base points X.
        #w = (b-a)/2*w               % Adjust weigths.
        #q = sum(feval(f,x)*w)
        qg[k] = q
        eg[k] = abs(q - true_val)


    #% display results
    formats = ['%4.0f, ', ] + ['%10.10f, ', ]*6
    formats[-1] = formats[-1].split(',')[0]
    data = np.vstack((neval, qt, et, qs, es, qb, eb)).T
    print(' ftn         Trapezoid                  Simpson''s                   Boole''s')
    print('evals    approx       error          approx       error           approx        error')
   
    for k in xrange(kmax):
        tmp = data[k].tolist()
        print(''.join(fi % t for fi, t in zip(formats, tmp)))
      
    # display results
    data = np.vstack((neval, qc, ec, qc2, ec2, qg, eg)).T
    print(' ftn         Clenshaw                    Chebychev                    Gauss-L')
    print('evals    approx       error          approx       error           approx        error')
    for k in xrange(kmax):
        tmp = data[k].tolist()
        print(''.join(fi % t for fi, t in zip(formats, tmp)))
      

    plt.loglog(neval, np.vstack((et, es, eb, ec, ec2, eg)).T)
    plt.xlabel('number of function evaluations')
    plt.ylabel('error')
    plt.legend(('Trapezoid', 'Simpsons', 'Booles', 'Clenshaw', 'Chebychev', 'Gauss-L'))
Example #18
0
##
# Return values in the Gumbel distribution
plt.clf()
T = np.r_[1:100000]
sT = gum[0] - gum[1] * np.log(-np.log1p(-1. / T))
plt.semilogx(T, sT)
plt.hold(True)
# ws.edf(Hs).plot()
Nmax = len(Hs)
N = np.r_[1:Nmax + 1]

plt.plot(Nmax / N, sorted(Hs, reverse=True), '.')
plt.title('Return values in the Gumbel model')
plt.xlabel('Return period')
plt.ylabel('Return value')
#wafostamp([],'(ER)')
plt.show()
#disp('Block = 4'),pause(pstate)

## Section 5.2 Generalized Pareto and Extreme Value distributions
## Section 5.2.1 Generalized Extreme Value distribution

# Empirical distribution of significant wave-height with estimated
# Generalized Extreme Value distribution,
gev = ws.genextreme.fit2(Hs)
gev.plotfitsummary()
# wafostamp([],'(ER)')
# disp('Block = 5a'),pause(pstate)

plt.clf()
Example #19
0
def test_tide_filter():
    # import statsmodels.api as sa
    import wafo.spectrum.models as sm
    sd = 10
    Sj = sm.Jonswap(Hm0=4.*sd)
    S = Sj.tospecdata()

    q = (0.1 * sd) ** 2   # variance of process noise s the car operates
    r = (100 * sd) ** 2  # variance of measurement error
    b = 0  # no system input
    u = 0  # no system input

    from scipy.signal import butter, filtfilt, lfilter_zi  # lfilter,
    freq_tide = 1. / (12 * 60 * 60)
    freq_wave = 1. / 10
    freq_filt = freq_wave / 10
    dt = 1.
    freq = 1. / dt
    fn = (freq / 2)

    P = 10 * np.diag([1, 0.01])
    R = r
    H = np.atleast_2d([1, 0])

    F = np.atleast_2d([[0, 1],
                       [0, 0]])
    A, Q = lti_disc(F, L=None, Q=np.diag([0, q]), dt=dt)

    t = np.arange(0, 60 * 12, 1. / freq)
    w = 2 * np.pi * freq  # 1 Hz
    tide = 100 * np.sin(freq_tide * w * t + 2 * np.pi / 4) + 100
    y = tide + S.sim(len(t), dt=1. / freq)[:, 1].ravel()
#     lowess = sa.nonparametric.lowess
#     y2 = lowess(y, t, frac=0.5)[:,1]

    filt = Kalman(R=R, x=np.array([[tide[0]], [0]]), P=P, A=A, Q=Q, H=H, B=b)
    filt2 = Kalman(R=R, x=np.array([[tide[0]], [0]]), P=P, A=A, Q=Q, H=H, B=b)
    # y = tide + 0.5 * np.sin(freq_wave * w * t)
    # Butterworth filter
    b, a = butter(9, (freq_filt / fn), btype='low')
    # y2 = [lowess(y[max(i-60,0):i + 1], t[max(i-60,0):i + 1], frac=.3)[-1,1]
    #    for i in range(len(y))]
    # y2 = [lfilter(b, a, y[:i + 1])[i] for i in range(len(y))]
    # y3 = filtfilt(b, a, y[:16]).tolist() + [filtfilt(b, a, y[:i + 1])[i]
    #    for i in range(16, len(y))]
    # y0 = medfilt(y, 41)
    _zi = lfilter_zi(b, a)
    # y2 = lfilter(b, a, y)#, zi=y[0]*zi)  # standard filter
    y3 = filtfilt(b, a, y)  # filter with phase shift correction
    y4 = []
    y5 = []
    for _i, j in enumerate(y):
        tmp = filt(j, u=u).ravel()
        tmp = filt2(tmp[0], u=u).ravel()
#         if i==0:
#             print(filt.x)
#             print(filt2.x)
        y4.append(tmp[0])
        y5.append(tmp[1])
    _y0 = medfilt(y4, 41)
    print(filt.P)
    # plot

    plt.plot(t, y, 'r.-', linewidth=2, label='raw data')
    # plt.plot(t, y2, 'b.-', linewidth=2, label='lowess @ %g Hz' % freq_filt)
    # plt.plot(t, y2, 'b.-', linewidth=2, label='filter @ %g Hz' % freq_filt)
    plt.plot(t, y3, 'g.-', linewidth=2, label='filtfilt @ %g Hz' % freq_filt)
    plt.plot(t, y4, 'k.-', linewidth=2, label='kalman')
    # plt.plot(t, y5, 'k.', linewidth=2, label='kalman2')
    plt.plot(t, tide, 'y-', linewidth=2, label='True tide')
    plt.legend(frameon=False, fontsize=14)
    plt.xlabel("Time [s]")
    plt.ylabel("Amplitude")
    plt.show('hold')