Exemplo n.º 1
0
    def set_constants(self):
        if self.xcutoff is None:
            trunc_error = 0.1 * self.abseps
            self.nc1c2 = max(1, self.nc1c2)
            xcut = abs(invnorm(trunc_error / (self.nc1c2 * 2)))
            self.xcutoff = max(min(xcut, 8.5), 1.2)
            # self.abseps  = max(self.abseps- truncError,0);
            # self.releps  = max(self.releps- truncError,0);

        if self.method > 0:
            names = [
                "method",
                "xcscale",
                "abseps",
                "releps",
                "coveps",
                "maxpts",
                "minpts",
                "nit",
                "xcutoff",
                "nc1c2",
                "quadno",
                "xsplit",
            ]

            constants = [getattr(self, name) for name in names]
            constants[0] = mod(constants[0], 10)
            rindmod.set_constants(*constants)  # @UndefinedVariable
Exemplo n.º 2
0
    def set_constants(self):
        if self.xcutoff is None:
            trunc_error = 0.1 * self.abseps
            self.nc1c2 = max(1, self.nc1c2)
            xcut = abs(invnorm(trunc_error / (self.nc1c2 * 2)))
            self.xcutoff = max(min(xcut, 8.5), 1.2)
            # self.abseps  = max(self.abseps- truncError,0);
            # self.releps  = max(self.releps- truncError,0);

        if self.method > 0:
            names = ['method', 'xcscale', 'abseps', 'releps', 'coveps',
                     'maxpts', 'minpts', 'nit', 'xcutoff', 'nc1c2', 'quadno',
                     'xsplit']

            constants = [getattr(self, name) for name in names]
            constants[0] = mod(constants[0], 10)
            rindmod.set_constants(*constants)  # @UndefinedVariable
Exemplo n.º 3
0
    def set_constants(self):
        if self.xcutoff is None:
            trunc_error = 0.1 * self.abseps
            self.nc1c2 = max(1, self.nc1c2)
            xcut = abs(invnorm(trunc_error / (self.nc1c2 * 2)))
            self.xcutoff = max(min(xcut, 8.5), 1.2)
            # self.abseps  = max(self.abseps- truncError,0);
            # self.releps  = max(self.releps- truncError,0);

        if self.method > 0:
            names = ['method', 'xcscale', 'abseps', 'releps', 'coveps',
                     'maxpts', 'minpts', 'nit', 'xcutoff', 'nc1c2', 'quadno',
                     'xsplit']

            constants = [getattr(self, name) for name in names]
            constants[0] = mod(constants[0], 10)
            rindmod.set_constants(*constants)  # @UndefinedVariable
Exemplo n.º 4
0
    def initialize(self, speed=None):
        """
        Initializes member variables according to speed.

        Parameter
        ---------
        speed : scalar integer
            defining accuracy of calculations.
            Valid numbers:  1,2,...,10
            (1=slowest and most accurate,10=fastest, but less accuracy)


        Member variables initialized according to speed:
        -----------------------------------------------
        speed : Integer defining accuracy of calculations.
        abseps : Absolute error tolerance.
        releps : Relative error tolerance.
        covep : Error tolerance in Cholesky factorization.
        xcutoff : Truncation limit of the normal CDF
        maxpts : Maximum number of function values allowed.
        quadno : Quadrature formulae used in integration of Xd(i)
                implicitly determining # nodes
        """
        if speed is None:
            return
        self.speed = min(max(speed, 1), 13)

        self.maxpts = 10000
        self.quadno = r_[1:4] + (10 - min(speed, 9)) + (speed == 1)
        if speed in (11, 12, 13):
            self.abseps = 1e-1
        elif speed == 10:
            self.abseps = 1e-2
        elif speed in (7, 8, 9):
            self.abseps = 1e-2
        elif speed in (4, 5, 6):
            self.maxpts = 20000
            self.abseps = 1e-3
        elif speed in (1, 2, 3):
            self.maxpts = 30000
            self.abseps = 1e-4

        if speed < 12:
            tmp = max(abs(11 - abs(speed)), 1)
            expon = mod(tmp + 1, 3) + 1
            self.coveps = self.abseps * ((1.0e-1) ** expon)
        elif speed < 13:
            self.coveps = 0.1
        else:
            self.coveps = 0.5

        self.releps = min(self.abseps, 1.0e-2)

        if self.method == 0:
            # This gives approximately the same accuracy as when using
            # RINDDND and RINDNIT
            #    xCutOff= MIN(MAX(xCutOff+0.5d0,4.d0),5.d0)
            self.abseps = self.abseps * 1.0e-1
        trunc_error = 0.05 * max(0, self.abseps)
        self.xcutoff = max(min(abs(invnorm(trunc_error)), 7), 1.2)
        self.abseps = max(self.abseps - trunc_error, 0)
Exemplo n.º 5
0
    def _trdata_cdf(self, data):
        '''
        Estimate transformation, g, from observed marginal CDF.
        Assumption: a Gaussian process, Y, is related to the
                            non-Gaussian process, X, by Y = g(X).
        Parameters
        ----------
        options = options structure defining how the smoothing is done.
                     (See troptset for default values)
        Returns
        -------
        tr, tr_emp  = smoothed and empirical estimate of the transformation g.

        The empirical CDF is usually very irregular. More than one local
        maximum of the empirical CDF may cause poor fit of the transformation.
        In such case one should use a smaller value of GSM or set a larger
        variance for GVAR.  If X(t) is likely to cross levels higher than 5
        standard deviations then the vector param has to be modified. For
        example if X(t) is unlikely to cross a level of 7 standard deviations
        one can use  param = [-7 7 513].
        '''
        mean = data.mean()
        sigma = data.std()
        cdf = edf(data.ravel())
        Ne = self.ne
        nd = len(cdf.data)
        if nd > self.ntr and self.ntr > 0:
            x0 = np.linspace(cdf.args[Ne], cdf.args[nd - 1 - Ne], self.ntr)
            cdf.data = np.interp(x0, cdf.args, cdf.data)
            cdf.args = x0
            Ne = 0
        uu = np.linspace(*self.param)

        ncr = len(cdf.data)
        ng = len(np.atleast_1d(self.gvar))
        if ng == 1:
            gvar = self.gvar * np.ones(ncr)
        else:
            self.gvar = np.atleast_1d(self.gvar)
            gvar = np.interp(np.linspace(0, 1, ncr), np.linspace(0, 1, ng),
                             self.gvar.ravel())

        ind = np.flatnonzero(np.diff(cdf.args) > 0)  # remove equal points
        nd = len(ind)
        ind1 = ind[Ne:nd - Ne]
        tmp = invnorm(cdf.data[ind])

        x = sigma * uu + mean
        pp_tr = SmoothSpline(cdf.args[ind1],
                             tmp[Ne:nd - Ne],
                             p=self.gsm,
                             lin_extrap=self.linextrap,
                             var=gvar[ind1])
        tr = TrData(pp_tr(x), x, mean=mean, sigma=sigma)
        tr_emp = TrData(tmp, cdf.args[ind], mean=mean, sigma=sigma)
        tr_emp.setplotter('step')

        if self.chkder:
            tr_raw = TrData(tmp[Ne:nd - Ne],
                            cdf.args[ind1],
                            mean=mean,
                            sigma=sigma)
            tr = self._check_tr(tr, tr_raw)

        if self.plotflag > 0:
            tr.plot()
            tr_emp.plot()
        return tr, tr_emp
Exemplo n.º 6
0
    def _trdata_lc(self, level_crossings, mean=None, sigma=None):
        '''
        Estimate transformation, g, from observed crossing intensity.

        Assumption: a Gaussian process, Y, is related to the
                    non-Gaussian process, X, by Y = g(X).

        Parameters
        ----------
        mean, sigma : real scalars
            mean and standard deviation of the process
        **options :
        csm, gsm : real scalars
            defines the smoothing of the crossing intensity and the
            transformation g.
            Valid values must be 0<=csm,gsm<=1. (default csm = 0.9 gsm=0.05)
            Smaller values gives smoother functions.
        param :
            vector which defines the region of variation of the data X.
                     (default [-5, 5, 513]).
        monitor : bool
            if true monitor development of estimation
        linextrap : bool
            if true use a smoothing spline with a constraint on the ends to
            ensure linear extrapolation outside the range of data. (default)
            otherwise use a regular smoothing spline
        cvar, gvar : real scalars
            Variances for the crossing intensity and the empirical
            transformation, g. (default  1)
        ne : scalar integer
            Number of extremes (maxima & minima) to remove from the estimation
            of the transformation. This makes the estimation more robust
            against outliers. (default 7)
        ntr :  scalar integer
            Maximum length of empirical crossing intensity. The empirical
            crossing intensity is interpolated linearly  before smoothing if
            the length exceeds ntr. A reasonable NTR (eg. 1000) will
            significantly speed up the estimation for long time series without
            loosing any accuracy. NTR should be chosen greater than PARAM(3).
            (default inf)

        Returns
        -------
        gs, ge : TrData objects
            smoothed and empirical estimate of the transformation g.

        Notes
        -----
        The empirical crossing intensity is usually very irregular.
        More than one local maximum of the empirical crossing intensity
        may cause poor fit of the transformation. In such case one
        should use a smaller value of GSM or set a larger variance for GVAR.
        If X(t) is likely to cross levels higher than 5 standard deviations
        then the vector param has to be modified.  For example if X(t) is
        unlikely to cross a level of 7 standard deviations one can use
        param = [-7 7 513].

        Example
        -------
        >>> import wafo.spectrum.models as sm
        >>> import wafo.transform.models as tm
        >>> from wafo.objects import mat2timeseries
        >>> Hs = 7.0
        >>> Sj = sm.Jonswap(Hm0=Hs)
        >>> S = Sj.tospecdata()   #Make spectrum object from numerical values
        >>> S.tr = tm.TrOchi(mean=0, skew=0.16, kurt=0,
        ...        sigma=Hs/4, ysigma=Hs/4)
        >>> xs = S.sim(ns=2**16, iseed=10)
        >>> ts = mat2timeseries(xs)
        >>> tp = ts.turning_points()
        >>> mm = tp.cycle_pairs()
        >>> lc = mm.level_crossings()
        >>> g0, g0emp = lc.trdata(monitor=True) # Monitor the development
        >>> g1, g1emp = lc.trdata(gvar=0.5 ) # Equal weight on all points
        >>> g2, g2emp = lc.trdata(gvar=[3.5, 0.5, 3.5])  # Less weight on ends
        >>> int(S.tr.dist2gauss()*100)
        141
        >>> int(g0emp.dist2gauss()*100)
        380995
        >>> int(g0.dist2gauss()*100)
        143
        >>> int(g1.dist2gauss()*100)
        162
        >>> int(g2.dist2gauss()*100)
        120

        g0.plot() # Check the fit.

        See also
          troptset, dat2tr, trplot, findcross, smooth

        NB! the transformated data will be N(0,1)

        Reference
        ---------
        Rychlik , I., Johannesson, P., and Leadbetter, M.R. (1997)
        "Modelling and statistical analysis of ocean wavedata
        using a transformed Gaussian process",
        Marine structures, Design, Construction and Safety,
        Vol 10, pp 13--47
        '''
        if mean is None:
            mean = level_crossings.mean
        if sigma is None:
            sigma = level_crossings.sigma
        lc1, lc2 = level_crossings.args, level_crossings.data
        intensity = level_crossings.intensity

        Ne = self.ne
        ncr = len(lc2)
        if ncr > self.ntr and self.ntr > 0:
            x0 = np.linspace(lc1[Ne], lc1[-1 - Ne], self.ntr)
            lc1, lc2 = x0, np.interp(x0, lc1, lc2)
            Ne = 0
            Ner = self.ne
            ncr = self.ntr
        else:
            Ner = 0

        ng = len(np.atleast_1d(self.gvar))
        if ng == 1:
            gvar = self.gvar * np.ones(ncr)
        else:
            gvar = np.interp(np.linspace(0, 1, ncr), np.linspace(0, 1, ng),
                             self.gvar)

        uu = np.linspace(*self.param)
        g1 = sigma * uu + mean

        if Ner > 0:  # Compute correction factors
            cor1 = np.trapz(lc2[0:Ner + 1], lc1[0:Ner + 1])
            cor2 = np.trapz(lc2[-Ner - 1::], lc1[-Ner - 1::])
        else:
            cor1 = 0
            cor2 = 0

        lc22 = np.hstack((0, cumtrapz(lc2, lc1) + cor1))

        if intensity:
            lc22 = (lc22 + 0.5 / ncr) / (lc22[-1] + cor2 + 1. / ncr)
        else:
            lc22 = (lc22 + 0.5) / (lc22[-1] + cor2 + 1)

        lc11 = (lc1 - mean) / sigma

        lc22 = invnorm(lc22)  # - ymean

        g2 = TrData(lc22.copy(), lc1.copy(), mean=mean, sigma=sigma)
        g2.setplotter('step')
        # NB! the smooth function does not always extrapolate well outside the
        # edges causing poor estimate of g
        # We may alleviate this problem by: forcing the extrapolation
        # to be linear outside the edges or choosing a lower value for csm2.

        inds = slice(Ne, ncr - Ne)  # indices to points we are smoothing over
        slc22 = SmoothSpline(lc11[inds], lc22[inds], self.gsm, self.linextrap,
                             gvar[inds])(uu)

        g = TrData(slc22.copy(), g1.copy(), mean=mean, sigma=sigma)

        if self.chkder:
            tr_raw = TrData(lc22[inds], lc11[inds], mean=mean, sigma=sigma)
            g = self._check_tr(g, tr_raw)

        if self.plotflag > 0:
            g.plot()
            g2.plot()

        return g, g2
Exemplo n.º 7
0
    def _trdata_cdf(self, data):
        '''
        Estimate transformation, g, from observed marginal CDF.
        Assumption: a Gaussian process, Y, is related to the
                            non-Gaussian process, X, by Y = g(X).
        Parameters
        ----------
        options = options structure defining how the smoothing is done.
                     (See troptset for default values)
        Returns
        -------
        tr, tr_emp  = smoothed and empirical estimate of the transformation g.

        The empirical CDF is usually very irregular. More than one local
        maximum of the empirical CDF may cause poor fit of the transformation.
        In such case one should use a smaller value of GSM or set a larger
        variance for GVAR.  If X(t) is likely to cross levels higher than 5
        standard deviations then the vector param has to be modified. For
        example if X(t) is unlikely to cross a level of 7 standard deviations
        one can use  param = [-7 7 513].
        '''
        mean = data.mean()
        sigma = data.std()
        cdf = edf(data.ravel())
        Ne = self.ne
        nd = len(cdf.data)
        if nd > self.ntr and self.ntr > 0:
            x0 = np.linspace(cdf.args[Ne], cdf.args[nd - 1 - Ne], self.ntr)
            cdf.data = np.interp(x0, cdf.args, cdf.data)
            cdf.args = x0
            Ne = 0
        uu = np.linspace(*self.param)

        ncr = len(cdf.data)
        ng = len(np.atleast_1d(self.gvar))
        if ng == 1:
            gvar = self.gvar * np.ones(ncr)
        else:
            self.gvar = np.atleast_1d(self.gvar)
            gvar = np.interp(np.linspace(0, 1, ncr),
                             np.linspace(0, 1, ng), self.gvar.ravel())

        ind = np.flatnonzero(np.diff(cdf.args) > 0)  # remove equal points
        nd = len(ind)
        ind1 = ind[Ne:nd - Ne]
        tmp = invnorm(cdf.data[ind])

        x = sigma * uu + mean
        pp_tr = SmoothSpline(cdf.args[ind1], tmp[Ne:nd - Ne], p=self.gsm,
                             lin_extrap=self.linextrap, var=gvar[ind1])
        tr = TrData(pp_tr(x), x, mean=mean, sigma=sigma)
        tr_emp = TrData(tmp, cdf.args[ind], mean=mean, sigma=sigma)
        tr_emp.setplotter('step')

        if self.chkder:
            tr_raw = TrData(tmp[Ne:nd - Ne], cdf.args[ind1], mean=mean,
                             sigma=sigma)
            tr = self._check_tr(tr, tr_raw)

        if self.plotflag > 0:
            tr.plot()
            tr_emp.plot()
        return tr, tr_emp
Exemplo n.º 8
0
    def _trdata_lc(self, level_crossings, mean=None, sigma=None):
        '''
        Estimate transformation, g, from observed crossing intensity.

        Assumption: a Gaussian process, Y, is related to the
                    non-Gaussian process, X, by Y = g(X).

        Parameters
        ----------
        mean, sigma : real scalars
            mean and standard deviation of the process
        **options :
        csm, gsm : real scalars
            defines the smoothing of the crossing intensity and the
            transformation g.
            Valid values must be 0<=csm,gsm<=1. (default csm = 0.9 gsm=0.05)
            Smaller values gives smoother functions.
        param :
            vector which defines the region of variation of the data X.
                     (default [-5, 5, 513]).
        monitor : bool
            if true monitor development of estimation
        linextrap : bool
            if true use a smoothing spline with a constraint on the ends to
            ensure linear extrapolation outside the range of data. (default)
            otherwise use a regular smoothing spline
        cvar, gvar : real scalars
            Variances for the crossing intensity and the empirical
            transformation, g. (default  1)
        ne : scalar integer
            Number of extremes (maxima & minima) to remove from the estimation
            of the transformation. This makes the estimation more robust
            against outliers. (default 7)
        ntr :  scalar integer
            Maximum length of empirical crossing intensity. The empirical
            crossing intensity is interpolated linearly  before smoothing if
            the length exceeds ntr. A reasonable NTR (eg. 1000) will
            significantly speed up the estimation for long time series without
            loosing any accuracy. NTR should be chosen greater than PARAM(3).
            (default inf)

        Returns
        -------
        gs, ge : TrData objects
            smoothed and empirical estimate of the transformation g.

        Notes
        -----
        The empirical crossing intensity is usually very irregular.
        More than one local maximum of the empirical crossing intensity
        may cause poor fit of the transformation. In such case one
        should use a smaller value of GSM or set a larger variance for GVAR.
        If X(t) is likely to cross levels higher than 5 standard deviations
        then the vector param has to be modified.  For example if X(t) is
        unlikely to cross a level of 7 standard deviations one can use
        param = [-7 7 513].

        Example
        -------
        >>> import wafo.spectrum.models as sm
        >>> import wafo.transform.models as tm
        >>> from wafo.objects import mat2timeseries
        >>> Hs = 7.0
        >>> Sj = sm.Jonswap(Hm0=Hs)
        >>> S = Sj.tospecdata()   #Make spectrum object from numerical values
        >>> S.tr = tm.TrOchi(mean=0, skew=0.16, kurt=0,
        ...        sigma=Hs/4, ysigma=Hs/4)
        >>> xs = S.sim(ns=2**16, iseed=10)
        >>> ts = mat2timeseries(xs)
        >>> tp = ts.turning_points()
        >>> mm = tp.cycle_pairs()
        >>> lc = mm.level_crossings()
        >>> g0, g0emp = lc.trdata(monitor=True) # Monitor the development
        >>> g1, g1emp = lc.trdata(gvar=0.5 ) # Equal weight on all points
        >>> g2, g2emp = lc.trdata(gvar=[3.5, 0.5, 3.5])  # Less weight on ends
        >>> int(S.tr.dist2gauss()*100)
        141
        >>> int(g0emp.dist2gauss()*100)
        380995
        >>> int(g0.dist2gauss()*100)
        143
        >>> int(g1.dist2gauss()*100)
        162
        >>> int(g2.dist2gauss()*100)
        120

        g0.plot() # Check the fit.

        See also
          troptset, dat2tr, trplot, findcross, smooth

        NB! the transformated data will be N(0,1)

        Reference
        ---------
        Rychlik , I., Johannesson, P., and Leadbetter, M.R. (1997)
        "Modelling and statistical analysis of ocean wavedata
        using a transformed Gaussian process",
        Marine structures, Design, Construction and Safety,
        Vol 10, pp 13--47
        '''
        if mean is None:
            mean = level_crossings.mean
        if sigma is None:
            sigma = level_crossings.sigma
        lc1, lc2 = level_crossings.args, level_crossings.data
        intensity = level_crossings.intensity

        Ne = self.ne
        ncr = len(lc2)
        if ncr > self.ntr and self.ntr > 0:
            x0 = np.linspace(lc1[Ne], lc1[-1 - Ne], self.ntr)
            lc1, lc2 = x0, np.interp(x0, lc1, lc2)
            Ne = 0
            Ner = self.ne
            ncr = self.ntr
        else:
            Ner = 0

        ng = len(np.atleast_1d(self.gvar))
        if ng == 1:
            gvar = self.gvar * np.ones(ncr)
        else:
            gvar = np.interp(np.linspace(0, 1, ncr),
                             np.linspace(0, 1, ng), self.gvar)

        uu = np.linspace(*self.param)
        g1 = sigma * uu + mean

        if Ner > 0:  # Compute correction factors
            cor1 = np.trapz(lc2[0:Ner + 1], lc1[0:Ner + 1])
            cor2 = np.trapz(lc2[-Ner - 1::], lc1[-Ner - 1::])
        else:
            cor1 = 0
            cor2 = 0

        lc22 = np.hstack((0, cumtrapz(lc2, lc1) + cor1))

        if intensity:
            lc22 = (lc22 + 0.5 / ncr) / (lc22[-1] + cor2 + 1. / ncr)
        else:
            lc22 = (lc22 + 0.5) / (lc22[-1] + cor2 + 1)

        lc11 = (lc1 - mean) / sigma

        lc22 = invnorm(lc22)  # - ymean

        g2 = TrData(lc22.copy(), lc1.copy(), mean=mean, sigma=sigma)
        g2.setplotter('step')
        # NB! the smooth function does not always extrapolate well outside the
        # edges causing poor estimate of g
        # We may alleviate this problem by: forcing the extrapolation
        # to be linear outside the edges or choosing a lower value for csm2.

        inds = slice(Ne, ncr - Ne)  # indices to points we are smoothing over
        slc22 = SmoothSpline(lc11[inds], lc22[inds], self.gsm, self.linextrap,
                             gvar[inds])(uu)

        g = TrData(slc22.copy(), g1.copy(), mean=mean, sigma=sigma)

        if self.chkder:
            tr_raw = TrData(lc22[inds], lc11[inds], mean=mean, sigma=sigma)
            g = self._check_tr(g, tr_raw)

        if self.plotflag > 0:
            g.plot()
            g2.plot()

        return g, g2
Exemplo n.º 9
0
    def initialize(self, speed=None):
        '''
        Initializes member variables according to speed.

        Parameter
        ---------
        speed : scalar integer
            defining accuracy of calculations.
            Valid numbers:  1,2,...,10
            (1=slowest and most accurate,10=fastest, but less accuracy)


        Member variables initialized according to speed:
        -----------------------------------------------
        speed : Integer defining accuracy of calculations.
        abseps : Absolute error tolerance.
        releps : Relative error tolerance.
        covep : Error tolerance in Cholesky factorization.
        xcutoff : Truncation limit of the normal CDF
        maxpts : Maximum number of function values allowed.
        quadno : Quadrature formulae used in integration of Xd(i)
                implicitly determining # nodes
        '''
        if speed is None:
            return
        self.speed = min(max(speed, 1), 13)

        self.maxpts = 10000
        self.quadno = r_[1:4] + (10 - min(speed, 9)) + (speed == 1)
        if speed in (11, 12, 13):
            self.abseps = 1e-1
        elif speed == 10:
            self.abseps = 1e-2
        elif speed in (7, 8, 9):
            self.abseps = 1e-2
        elif speed in (4, 5, 6):
            self.maxpts = 20000
            self.abseps = 1e-3
        elif speed in (1, 2, 3):
            self.maxpts = 30000
            self.abseps = 1e-4

        if speed < 12:
            tmp = max(abs(11 - abs(speed)), 1)
            expon = mod(tmp + 1, 3) + 1
            self.coveps = self.abseps * ((1.0e-1) ** expon)
        elif speed < 13:
            self.coveps = 0.1
        else:
            self.coveps = 0.5

        self.releps = min(self.abseps, 1.0e-2)

        if self.method == 0:
            # This gives approximately the same accuracy as when using
            # RINDDND and RINDNIT
            #    xCutOff= MIN(MAX(xCutOff+0.5d0,4.d0),5.d0)
            self.abseps = self.abseps * 1.0e-1
        trunc_error = 0.05 * max(0, self.abseps)
        self.xcutoff = max(min(abs(invnorm(trunc_error)), 7), 1.2)
        self.abseps = max(self.abseps - trunc_error, 0)