コード例 #1
0
def single_prewhitening(times,
                        signal,
                        freq,
                        optimize=0,
                        model='sine',
                        full_output=False,
                        correlation_correction=True):
    """
    Fit a functions to a timeseries via a single iteration of prewhitening.
    Use this function in combination with C{find_frequency} to do step-by-step
    prewhitening.

    This function will fit previously found frequencies C{freq} to the original signal
    and optimize the found parameters if needed.

    @return: parameters(, model)
    @rtype: rec array(, ndarray)
    """
    # do the fit including all frequencies
    all_params = getattr(fit, model)(times, signal, freq)

    # if there's a need to optimize, optimize the last n parameters
    if optimize > 0:
        #  todo: ask about this, why the previous residuals??
        residuals_for_optimization = residuals  # signal - getattr(evaluate, model)(times, all_params)
        if optimize <= len(freq):
            model_fixed_params = getattr(evaluate,
                                         model)(times, all_params[:-optimize])
            residuals_for_optimization -= model_fixed_params
        uparams, e_uparams, gain = fit.optimize(times,
                                                residuals_for_optimization,
                                                all_params[-optimize:], model)
        # only accept the optimization if we gained prediction power
        if gain > 0:
            all_params[-optimize:] = uparams
            logger.info('Accepted optimization (gained %g%%)' % gain)

    # compute the model and the errors
    model_func = getattr(evaluate, model)(times, all_params)
    e_allparams = getattr(fit, 'e_' + model)(
        times,
        signal,
        all_params,
        correlation_correction=correlation_correction)

    all_params = numpy_ext.recarr_join(all_params, e_allparams)

    if full_output:
        return all_params, model_func
    else:
        return all_params
コード例 #2
0
def iterative_prewhitening(times,
                           signal,
                           maxiter=1000,
                           optimize=0,
                           method='scargle',
                           model='sine',
                           full_output=False,
                           stopcrit=None,
                           correlation_correction=True,
                           prewhiteningorder_snr=False,
                           prewhiteningorder_snr_window=1.,
                           **kwargs):
    """
    Fit one or more functions to a timeseries via iterative prewhitening.

    This function will use C{find_frequency} to fit the function parameters to
    the original signal (including any previously found parameters),
    optimize the parameters if needed, and remove it from the data to search
    for a new frequency in the residuals.

    It is always the original signal that is used to fit all parameters again;
    B{only the (optimized) frequency is remembered from step to step} (Vanicek's
    method).

    You best set C{maxiter} to some sensable value, to hard-limit the number of
    frequencies that will be searched for. You can additionally use a C{stopcrit}
    and stop looking for frequencies once it is reached. C{stopcrit} should be
    a tuple; the first argument is the function to call, the other arguments
    are passed to the function, after the mandatory arguments C{times,signal,
    modelfunc,allparams,pergram}. See L{stopcrit_scargle_snr} for an example of
    such a function.

    By default, the function looks for the highest (or deepest in the case of the pdm
    method) peak, but instead it is possible to go for the peak having the highest
    SNR before prewhitening by setting C{prewhiteningorder_snr} to True. In this case,
    the noise spectrum is calculated using a convolution with a
    C{prewhiteningorder_snr_window} wide box. Usage of this is strongly encouraged,
    especially combined with L{stopcrit_scargle_snr} as C{stopcrit}.

    @return: parameters, model(, model function)
    @rtype: rec array(, ndarray)
    """
    residuals = signal.copy()
    frequencies = []
    stop_criteria = []
    while maxiter:
        #-- compute the next frequency from the residuals
        params, pergram, this_fit = find_frequency(
            times,
            residuals,
            method=method,
            full_output=True,
            correlation_correction=correlation_correction,
            prewhiteningorder_snr=prewhiteningorder_snr,
            prewhiteningorder_snr_window=prewhiteningorder_snr_window,
            **kwargs)

        #-- do the fit including all frequencies
        frequencies.append(params['freq'][-1])
        allparams = getattr(fit, model)(times, signal, frequencies)

        #-- if there's a need to optimize, optimize the last n parameters
        if optimize > 0:
            residuals_for_optimization = residuals
            if optimize <= len(params):
                model_fixed_params = getattr(evaluate,
                                             model)(times,
                                                    allparams[:-optimize])
                residuals_for_optimization -= model_fixed_params
            uparams, e_uparams, gain = fit.optimize(
                times, residuals_for_optimization, allparams[-optimize:],
                model)
            #-- only accept the optimization if we gained prediction power
            if gain > 0:
                allparams[-optimize:] = uparams
                logger.info('Accepted optimization (gained %g%%)' % gain)

        #-- compute the residuals to use in the next prewhitening step
        modelfunc = getattr(evaluate, model)(times, allparams)
        residuals = signal - modelfunc

        #-- exhaust the counter
        maxiter -= 1

        #-- check stop criterion
        if stopcrit is not None:
            func = stopcrit[0]
            args = stopcrit[1:]
            condition, value = func(times, signal, modelfunc, allparams,
                                    pergram, *args)
            logger.info('Stop criterion (%s): %.3g' % (func.__name__, value))
            stop_criteria.append(value)
            if condition:
                logger.info('Stop criterion reached')
                break

    #-- calculate the errors
    e_allparams = getattr(fit, 'e_' + model)(
        times,
        signal,
        allparams,
        correlation_correction=correlation_correction)

    allparams = numpy_ext.recarr_join(allparams, e_allparams)
    if stopcrit is not None:
        allparams = numpy_ext.recarr_join(
            allparams, np.rec.fromarrays([stop_criteria], names=['stopcrit']))

    if full_output:
        return allparams, modelfunc
    else:
        return allparams
コード例 #3
0
def iterative_prewhitening(times,signal,maxiter=1000,optimize=0,method='scargle',
    model='sine',full_output=False,stopcrit=None,correlation_correction=True,
    prewhiteningorder_snr=False,prewhiteningorder_snr_window=1.,**kwargs):
    """
    Fit one or more functions to a timeseries via iterative prewhitening.
    
    This function will use C{find_frequency} to fit the function parameters to
    the original signal (including any previously found parameters),
    optimize the parameters if needed, and remove it from the data to search
    for a new frequency in the residuals.
    
    It is always the original signal that is used to fit all parameters again;
    B{only the (optimized) frequency is remembered from step to step} (Vanicek's
    method).
    
    You best set C{maxiter} to some sensable value, to hard-limit the number of
    frequencies that will be searched for. You can additionally use a C{stopcrit}
    and stop looking for frequencies once it is reached. C{stopcrit} should be
    a tuple; the first argument is the function to call, the other arguments
    are passed to the function, after the mandatory arguments C{times,signal,
    modelfunc,allparams,pergram}. See L{stopcrit_scargle_snr} for an example of
    such a function.
    
    By default, the function looks for the highest (or deepest in the case of the pdm 
    method) peak, but instead it is possible to go for the peak having the highest 
    SNR before prewhitening by setting C{prewhiteningorder_snr} to True. In this case, 
    the noise spectrum is calculated using a convolution with a 
    C{prewhiteningorder_snr_window} wide box. Usage of this is strongly encouraged, 
    especially combined with L{stopcrit_scargle_snr} as C{stopcrit}.
    
    @return: parameters, model(, model function)
    @rtype: rec array(, ndarray)
    """
    residuals = signal.copy()
    frequencies = []
    stop_criteria = []
    while maxiter:
        #-- compute the next frequency from the residuals
        params,pergram,this_fit = find_frequency(times,residuals,method=method,
                full_output=True,correlation_correction=correlation_correction,
                prewhiteningorder_snr=prewhiteningorder_snr,
                prewhiteningorder_snr_window=prewhiteningorder_snr_window,**kwargs)
        
        #-- do the fit including all frequencies
        frequencies.append(params['freq'][-1])
        allparams = getattr(fit,model)(times,signal,frequencies)
        
        #-- if there's a need to optimize, optimize the last n parameters
        if optimize>0:
            residuals_for_optimization = residuals
            if optimize<=len(params):
                model_fixed_params = getattr(evaluate,model)(times,allparams[:-optimize])
                residuals_for_optimization -= model_fixed_params
            uparams,e_uparams, gain = fit.optimize(times,residuals_for_optimization,allparams[-optimize:],model)
            #-- only accept the optimization if we gained prediction power
            if gain>0:
                allparams[-optimize:] = uparams
                logger.info('Accepted optimization (gained %g%%)'%gain)
        
        #-- compute the residuals to use in the next prewhitening step
        modelfunc = getattr(evaluate,model)(times,allparams)
        residuals = signal - modelfunc
        
        #-- exhaust the counter
        maxiter -= 1
        
        #-- check stop criterion
        if stopcrit is not None:
            func = stopcrit[0]
            args = stopcrit[1:]
            condition,value = func(times,signal,modelfunc,allparams,pergram,*args) 
            logger.info('Stop criterion (%s): %.3g'%(func.__name__,value))
            stop_criteria.append(value)
            if condition:
                logger.info('Stop criterion reached')
                break
        
    #-- calculate the errors
    e_allparams = getattr(fit,'e_'+model)(times,signal,allparams,correlation_correction=correlation_correction)
    
    allparams = numpy_ext.recarr_join(allparams,e_allparams)
    if stopcrit is not None:
        allparams = numpy_ext.recarr_join(allparams,np.rec.fromarrays([stop_criteria],names=['stopcrit']))
    
    if full_output:
        return allparams,modelfunc
    else:
        return allparams
コード例 #4
0
def find_frequency(times,
                   signal,
                   method='scargle',
                   model='sine',
                   full_output=False,
                   optimize=0,
                   max_loops=20,
                   scale_region=0.1,
                   scale_df=0.20,
                   model_kwargs=None,
                   correlation_correction=True,
                   prewhiteningorder_snr=False,
                   prewhiteningorder_snr_window=1.,
                   **kwargs):
    """
    Find one frequency, automatically going to maximum precision and return
    parameters & error estimates.

    This routine makes the frequency grid finer until it is finer than the
    estimated error on the frequency. After that, it will compute (harmonic)
    parameters and estimate errors.

    There is a possibility to escape this optimization by setting scale_df=0 or
    freqregscale=0.

    You can include a nonlinear least square update of the parameters, by
    setting the keyword C{optimize=1} (optimization outside loop) or
    C{optimize=2} (optimization after each iteration).

    The method with which to find the frequency can be set with the keyword
    C{method}, the model used to fit and optimize should be set with C{model}.
    Extra keywords for the model functions should go in C{model_kwargs}. If
    C{method} is a tuple, the first method will be used for the first frequency
    search only. This could be useful to take advantage of such methods as
    fasper which do not allow for iterative zoom-ins. By default, the function looks
    for the highest (or deepest in the case of the pdm method) peak, but instead it is
    possible to go for the peak having the highest SNR before prewhitening by setting
    C{prewhiteningorder_snr} to True. In this case, the noise spectrum is calculated
    using a convolution with a C{prewhiteningorder_snr_window} wide box.

    Possible extra keywords: see definition of the used periodogram function.

    B{Warning}: the timeseries must be B{sorted in time} and B{cannot contain
    the same timepoint twice}. Otherwise, a 'ValueError, concatenation problem'
    can occur.

    Example keywords:
        - 'correlation_correction', default=True
        - 'freqregscale', default=0.5: factor for zooming in on frequency
        - 'dfscale', default = 0.25: factor for optimizing frequency resolution

    Example usage: We generate a sine signal

    >>> times = np.linspace(0,100,1000)
    >>> signal = np.sin(2*np.pi*2.5*times) + np.random.normal(size=len(times))

    Compute the frequency

    >>> parameters, pgram, model = find_frequency(times,signal,full_output=True)

    Make a plot:

    >>> p = pl.figure()
    >>> p = pl.axes([0.1,0.3,0.85,0.65])
    >>> p = pl.plot(pgram[0],pgram[1],'k-')
    >>> p = pl.xlim(2.2,2.8)
    >>> p = pl.ylabel('Amplitude')
    >>> p = pl.axes([0.1,0.1,0.85,0.2])
    >>> p = pl.plot(pgram[0][:-1],np.diff(pgram[0]),'k-')
    >>> p = pl.xlim(2.2,2.8)
    >>> p,q = pl.xlabel('Frequency (c/d)'),pl.ylabel('Frequency resolution $\Delta F$')

    ]]include figure]]timeseries_freqanalyse_06.png]

    @rtype: record array(, 2x1Darray, 1Darray)
    @return: parameters and errors(, periodogram, model function)
    """
    if model_kwargs is None:
        model_kwargs = dict()
    #-- initial values
    e_f = 0
    freq_diff = np.inf
    prev_freq = -np.inf
    counter = 0

    f_max = np.inf
    f_min = 0.  #-np.inf

    #-- calculate periodogram until frequency precision is
    #   under 1/10th of correlation corrected version of frequency error
    method_kwargs = kwargs.copy()  # don't modify the dictionary the user gave

    while freq_diff > e_f / 10.:
        #-- possibly, we might want to use different periodograms for the first
        #   calculation than for the zoom in, since some periodograms are faster
        #   than others but do not have the ability to 'zoom in' (e.g. the FFT)
        if freq_diff == np.inf and not isinstance(method, str):
            method_ = method[1]
            method = method[0]  # override method to be a string the next time
        #-- calculate periodogram
        freqs, ampls = getattr(pergrams, method)(times, signal,
                                                 **method_kwargs)
        f0, fn, df = freqs[0], freqs[-1], freqs[1] - freqs[0]
        #-- now use the second method for the zoom-ins from now on
        if freq_diff == np.inf and not isinstance(method, str):
            method = method_
        #-- extract the frequency: this part should be generalized, but for now,
        #   it will do:
        if method in ['pdm']:
            frequency = freqs[np.argmin(ampls)]
        #-- instead of going for the highest peak, let's get the most significant one
        if prewhiteningorder_snr:
            if counter == 0:  #we calculate a noise spectrum with a convolution in a 1 d-1 window
                windowlength = float(prewhiteningorder_snr_window) / (
                    freqs[1] - freqs[0])
                window = np.ones(int(windowlength)) / float(windowlength)
                ampls_ = np.concatenate(
                    (ampls[::-1], ampls, ampls[::-1])
                )  #we mirror the amplitude spectrum on both ends so the convolution will be better near the edges
                noises_ = np.convolve(ampls_, window, 'same')
                noises = np.split(
                    noises_, 3
                )[1]  #and we recut the resulted convolution to match the original frequency range
                freqs_old = np.copy(freqs)
                noises_old = np.copy(noises)
            else:
                noises = np.interp(
                    freqs, freqs_old, noises_old
                )  #we use the original noise spectrum in this narrower windows too, which should save some time, and avoid the problem of having a wider window for the SNR calculation than the width of the zoom-in window
            frequency = freqs[np.argmax(ampls / noises)]
        else:
            frequency = freqs[np.argmax(ampls)]
        if full_output and counter == 0:
            freqs_, ampls_ = freqs, ampls
        #-- estimate parameters and calculate a fit, errors and residuals
        params = getattr(fit, model)(times, signal, frequency, **model_kwargs)
        if hasattr(fit, 'e_' + model):
            errors = getattr(fit, 'e_' + model)(
                times,
                signal,
                params,
                correlation_correction=correlation_correction)
            e_f = errors['e_freq'][-1]
        #-- possibly there are not errors defined for this fitting functions
        else:
            errors = None
        #-- optimize inside loop if necessary and if we gained prediction
        #   value:
        if optimize == 2:
            params_, errors_, gain = fit.optimize(times, signal, params, model)
            if gain > 0:
                params = params_
                logger.info('Accepted optimization (gained %g%%)' % gain)

        #-- improve precision
        freq_diff = abs(frequency - prev_freq)
        prev_freq = frequency
        freq_region = fn - f0
        f0 = max(f_min, frequency - freq_region * scale_region / 2.)
        fn = min(f_max, frequency + freq_region * scale_region / 2.)
        df *= scale_df
        method_kwargs['f0'] = f0
        method_kwargs['fn'] = fn
        method_kwargs['df'] = df
        #-- possibilities to escape iterative zoom in
        #print '---> {counter}/{max_loops}: freq={frequency} ({f0}-->{fn}/{df}), e_f={e_f}, freq_diff={freq_diff}'.format(**locals()),max(ampls)
        if scale_region == 0 or scale_df == 0:
            break
        if counter >= max_loops:
            logger.error(
                "Frequency precision not reached in %d steps, breaking loop" %
                (max_loops))
            break
        if (fn - f0) / df < 5:
            logger.error(
                "Frequency precision not reached with stepsize %e , breaking loop"
                % (df / scale_df))
            break
        counter += 1
    #-- optimize parameters outside of loop if necessary:
    if optimize == 1:
        params_, errors_, gain = fit.optimize(times, signal, params, model)
        if gain > 0:
            params = params_
            logger.info('Accepted optimization (gained %g%%)' % gain)
    #-- add the errors to the parameter array if possible
    if errors is not None:
        params = numpy_ext.recarr_join(params, errors)
    # logger.info("%s model parameters via %s periodogram:\n"%(model,method)+pl.mlab.rec2txt(params,precision=8))
    # params.tofile('log_params', sep=' ', format='%s')
    # logger.info("%s model parameters via %s periodogram:\n"%(model, method) + np.fromfile('log_params'))
    logger.info("%s model parameters via %s periodogram:\n" % (model, method) +
                str(params))
    #-- when full output is required, return parameters, periodogram and fitting
    #   function
    if full_output:
        mymodel = getattr(evaluate, model)(times, params)
        return params, (freqs_, ampls_), mymodel
    else:
        return params
コード例 #5
0
def find_frequency(times,signal,method='scargle',model='sine',full_output=False,
            optimize=0,max_loops=20, scale_region=0.1, scale_df=0.20, model_kwargs=None,
            correlation_correction=True,prewhiteningorder_snr=False,
            prewhiteningorder_snr_window=1.,**kwargs):
    """
    Find one frequency, automatically going to maximum precision and return
    parameters & error estimates.
    
    This routine makes the frequency grid finer until it is finer than the
    estimated error on the frequency. After that, it will compute (harmonic)
    parameters and estimate errors.
    
    There is a possibility to escape this optimization by setting scale_df=0 or
    freqregscale=0.
    
    You can include a nonlinear least square update of the parameters, by
    setting the keyword C{optimize=1} (optimization outside loop) or
    C{optimize=2} (optimization after each iteration).
    
    The method with which to find the frequency can be set with the keyword
    C{method}, the model used to fit and optimize should be set with C{model}.
    Extra keywords for the model functions should go in C{model_kwargs}. If
    C{method} is a tuple, the first method will be used for the first frequency
    search only. This could be useful to take advantage of such methods as
    fasper which do not allow for iterative zoom-ins. By default, the function looks 
    for the highest (or deepest in the case of the pdm method) peak, but instead it is 
    possible to go for the peak having the highest SNR before prewhitening by setting 
    C{prewhiteningorder_snr} to True. In this case, the noise spectrum is calculated 
    using a convolution with a C{prewhiteningorder_snr_window} wide box.
    
    Possible extra keywords: see definition of the used periodogram function.
    
    B{Warning}: the timeseries must be B{sorted in time} and B{cannot contain
    the same timepoint twice}. Otherwise, a 'ValueError, concatenation problem'
    can occur.
    
    Example keywords:
        - 'correlation_correction', default=True
        - 'freqregscale', default=0.5: factor for zooming in on frequency
        - 'dfscale', default = 0.25: factor for optimizing frequency resolution
        
    Example usage: We generate a sine signal
    
    >>> times = np.linspace(0,100,1000)
    >>> signal = np.sin(2*np.pi*2.5*times) + np.random.normal(size=len(times))
    
    Compute the frequency
    
    >>> parameters, pgram, model = find_frequency(times,signal,full_output=True)
    
    Make a plot:
    
    >>> p = pl.figure()
    >>> p = pl.axes([0.1,0.3,0.85,0.65])
    >>> p = pl.plot(pgram[0],pgram[1],'k-')
    >>> p = pl.xlim(2.2,2.8)
    >>> p = pl.ylabel('Amplitude')
    >>> p = pl.axes([0.1,0.1,0.85,0.2])
    >>> p = pl.plot(pgram[0][:-1],np.diff(pgram[0]),'k-')
    >>> p = pl.xlim(2.2,2.8)
    >>> p,q = pl.xlabel('Frequency (c/d)'),pl.ylabel('Frequency resolution $\Delta F$')
    
    ]]include figure]]timeseries_freqanalyse_06.png]
    
    @rtype: record array(, 2x1Darray, 1Darray)
    @return: parameters and errors(, periodogram, model function)
    """
    if model_kwargs is None:
        model_kwargs = dict()
    #-- initial values
    e_f = 0
    freq_diff = np.inf
    prev_freq = -np.inf
    counter = 0
    
    f_max = np.inf
    f_min = 0.#-np.inf
    
    #-- calculate periodogram until frequency precision is
    #   under 1/10th of correlation corrected version of frequency error
    method_kwargs = kwargs.copy() # don't modify the dictionary the user gave

    while freq_diff>e_f/10.:
        #-- possibly, we might want to use different periodograms for the first
        #   calculation than for the zoom in, since some periodograms are faster
        #   than others but do not have the ability to 'zoom in' (e.g. the FFT)
        if freq_diff==np.inf and not isinstance(method,str):
            method_ = method[1]
            method = method[0]  # override method to be a string the next time
        #-- calculate periodogram
        freqs,ampls = getattr(pergrams,method)(times,signal,**method_kwargs)
        f0,fn,df = freqs[0],freqs[-1],freqs[1]-freqs[0]
        #-- now use the second method for the zoom-ins from now on
        if freq_diff==np.inf and not isinstance(method,str):
            method = method_
        #-- extract the frequency: this part should be generalized, but for now,
        #   it will do:
        if method in ['pdm']:
            frequency = freqs[np.argmin(ampls)]
        #-- instead of going for the highest peak, let's get the most significant one
        if prewhiteningorder_snr:
            if counter == 0: #we calculate a noise spectrum with a convolution in a 1 d-1 window
                windowlength = float(prewhiteningorder_snr_window)/(freqs[1]-freqs[0])
                window = np.ones(int(windowlength))/float(windowlength)
                ampls_ = np.concatenate((ampls[::-1],ampls,ampls[::-1])) #we mirror the amplitude spectrum on both ends so the convolution will be better near the edges
                noises_ = np.convolve(ampls_, window, 'same')
                noises = np.split(noises_,3)[1] #and we recut the resulted convolution to match the original frequency range
                freqs_old = np.copy(freqs)
                noises_old = np.copy(noises)
            else:
                noises = np.interp(freqs,freqs_old,noises_old) #we use the original noise spectrum in this narrower windows too, which should save some time, and avoid the problem of having a wider window for the SNR calculation than the width of the zoom-in window
            frequency = freqs[np.argmax(ampls/noises)]
        else:
            frequency = freqs[np.argmax(ampls)]
        if full_output and counter==0:
            freqs_,ampls_ = freqs,ampls
        #-- estimate parameters and calculate a fit, errors and residuals
        params = getattr(fit,model)(times,signal,frequency,**model_kwargs)
        if hasattr(fit,'e_'+model):
            errors = getattr(fit,'e_'+model)(times,signal,params,correlation_correction=correlation_correction)
            e_f = errors['e_freq'][-1]
        #-- possibly there are not errors defined for this fitting functions
        else:
            errors = None
        #-- optimize inside loop if necessary and if we gained prediction
        #   value:
        if optimize==2:
            params_,errors_,gain = fit.optimize(times,signal,params,model)
            if gain>0:
                params = params_
                logger.info('Accepted optimization (gained %g%%)'%gain)
        
        #-- improve precision
        freq_diff = abs(frequency-prev_freq)
        prev_freq = frequency
        freq_region = fn-f0
        f0 = max(f_min,frequency-freq_region*scale_region/2.)
        fn = min(f_max,frequency+freq_region*scale_region/2.)
        df *= scale_df
        method_kwargs['f0'] = f0
        method_kwargs['fn'] = fn
        method_kwargs['df'] = df
        #-- possibilities to escape iterative zoom in
        #print '---> {counter}/{max_loops}: freq={frequency} ({f0}-->{fn}/{df}), e_f={e_f}, freq_diff={freq_diff}'.format(**locals()),max(ampls)
        if scale_region==0 or scale_df==0:
            break
        if counter >= max_loops:
            logger.error("Frequency precision not reached in %d steps, breaking loop"%(max_loops))
            break
        if (fn-f0)/df<5:
            logger.error("Frequency precision not reached with stepsize %e , breaking loop"%(df/scale_df))
            break
        counter += 1
    #-- optimize parameters outside of loop if necessary:
    if optimize==1:
        params_,errors_,gain = fit.optimize(times,signal,params,model)
        if gain>0:
            params = params_
            logger.info('Accepted optimization (gained %g%%)'%gain)
    #-- add the errors to the parameter array if possible
    if errors is not None:
        params = numpy_ext.recarr_join(params,errors)
    logger.info("%s model parameters via %s periodogram:\n"%(model,method)+pl.mlab.rec2txt(params,precision=8))
    #-- when full output is required, return parameters, periodogram and fitting
    #   function
    if full_output:
        mymodel = getattr(evaluate,model)(times,params)
        return params,(freqs_,ampls_),mymodel
    else:
        return params