Exemplo n.º 1
0
def wind_EWTSII_Davenport(vave, k, T=50, n=23037):
    """
    Algorithm appeared in the European Wind Turbine Standards II (EWTS II).
    Davenport variation.
    
    It uses 10-minute wind speeds to obtain the return period extreme wind
    speed for the ``T`` return period defined.
    
    **Parameters**
    
    vave : float or int
        Long term mean wind speed
    k : float or int
        Weibull k parameter as defined in the wind industry. To obtain the k
        parameter using scipy `have a look here 
        <http://stackoverflow.com/questions/17481672/fitting-a-weibull-distribution-using-scipy/17498673#17498673>`_.
        The `c` parameter in scipy is the k equivalent in the wind industry.
    T : float or int
        Return period in years. Default value is 50 (years).
    n : floar or int
        the number of independent events per year. Default value is 23037 for 
        10-min time steps and 1-yr extrema.
    
    **Returns**
    
    vref : float
        Expected extreme wind speed at the return period defined.
    
    **References**
    
        Dekker JWM, Pierik JTG (1998): 'European Wind Turbine Standards II', 
        ECN-C-99-073, ECN Solar & Wind Energy, Netherlands.
    """
    c1 = 1 - (k - 1) / (k * _np.log(n))
    c2 = 1 + (_np.log(k * _gamma(1 + 1 / k) *
                      ((_np.log(n))**(1 - 1 / k)))) / (k * _np.log(n) - k + 1)
    a = ((_np.log(n))**(1 / k - 1)) / (c1 * k * _gamma(1 + 1 / k))
    b = c1 * c2 * k * _np.log(n) - _np.log(-_np.log(1 - 1 / T))
    res = vave * a * b
    return res
Exemplo n.º 2
0
def wind_EWTSII_Davenport(vave, k, T = 50, n = 23037):
    """
    Algorithm appeared in the European Wind Turbine Standards II (EWTS II).
    Davenport variation.
    
    It uses 10-minute wind speeds to obtain the return period extreme wind
    speed for the ``T`` return period defined.
    
    **Parameters**
    
    vave : float or int
        Long term mean wind speed
    k : float or int
        Weibull k parameter as defined in the wind industry. To obtain the k
        parameter using scipy `have a look here 
        <http://stackoverflow.com/questions/17481672/fitting-a-weibull-distribution-using-scipy/17498673#17498673>`_.
        The `c` parameter in scipy is the k equivalent in the wind industry.
    T : float or int
        Return period in years. Default value is 50 (years).
    n : floar or int
        the number of independent events per year. Default value is 23037 for 
        10-min time steps and 1-yr extrema.
    
    **Returns**
    
    vref : float
        Expected extreme wind speed at the return period defined.
    
    **References**
    
        Dekker JWM, Pierik JTG (1998): 'European Wind Turbine Standards II', 
        ECN-C-99-073, ECN Solar & Wind Energy, Netherlands.
    """
    c1 = 1 - (k - 1) / (k * _np.log(n))
    c2 = 1 + (_np.log(k * _gamma(1+1/k) * ((_np.log(n))**(1-1/k)))) / (k * _np.log(n) - k + 1)
    a = ((_np.log(n))**(1/k-1)) / (c1 * k * _gamma(1+1/k))
    b = c1 * c2 * k * _np.log(n) - _np.log(-_np.log(1-1/T))
    res = vave * a * b
    return res
Exemplo n.º 3
0
def gev_momfit(data):
    """
    Estimate parameters of Generalised Extreme Value distribution using the 
    method of moments. The methodology has been extracted from appendix A.4
    on EVA (see references below).
    
    **Parameters**
    
    data : array_like
        Sample extreme data
    
    **Returns**
    
    tuple
        tuple with the shape, location and scale parameters. In this,
        case, the shape parameter is always 0.
    
    **References**
    
        DHI, (2003): '`EVA(Extreme Value Analysis - Reference manual) 
        <http://www.tnmckc.org/upload/document/wup/1/1.3/Manuals/MIKE%2011/eva/EVA_RefManual.pdf>`_', 
        DHI.
    """
            
    g = lambda n, x : _gamma(1 + n * x)
    
    mean = _np.mean(data)
    std = _np.std(data)
    skew = _st.skew(data)
    
    def minimize_skew(x):
        a = -g(3, x) + 3 * g(1, x) * g(2, x) - 2 * g(1, x)**3
        b = (g(2, x) - (g(1, x))**2)**1.5
        c = abs(a / b - skew)
        return c
        
    c = _op.fmin(minimize_skew, 0)[0] # first guess is set to 0
    scale = std * abs(c) / _np.sqrt((g(2, c) - g(1, c)**2))
    loc = mean - scale * (1 - g(1, c)) / c
    
    return c, loc, scale
Exemplo n.º 4
0
def gev_momfit(data):
    """
    Estimate parameters of Generalised Extreme Value distribution using the 
    method of moments. The methodology has been extracted from appendix A.4
    on EVA (see references below).
    
    **Parameters**
    
    data : array_like
        Sample extreme data
    
    **Returns**
    
    tuple
        tuple with the shape, location and scale parameters. In this,
        case, the shape parameter is always 0.
    
    **References**
    
        DHI, (2003): '`EVA(Extreme Value Analysis - Reference manual) 
        <http://www.tnmckc.org/upload/document/wup/1/1.3/Manuals/MIKE%2011/eva/EVA_RefManual.pdf>`_', 
        DHI.
    """

    g = lambda n, x: _gamma(1 + n * x)

    mean = _np.mean(data)
    std = _np.std(data)
    skew = _st.skew(data)

    def minimize_skew(x):
        a = -g(3, x) + 3 * g(1, x) * g(2, x) - 2 * g(1, x)**3
        b = (g(2, x) - (g(1, x))**2)**1.5
        c = abs(a / b - skew)
        return c

    c = _op.fmin(minimize_skew, 0)[0]  # first guess is set to 0
    scale = std * abs(c) / _np.sqrt((g(2, c) - g(1, c)**2))
    loc = mean - scale * (1 - g(1, c)) / c

    return c, loc, scale
Exemplo n.º 5
0
 def gamma(x, alpha, beta):
     '''检测一致
     alpha 是 shape para
     beta 是 rate para'''
     return ((beta**alpha) / _gamma(alpha)) * (x**(alpha - 1)) * np.exp(
         -beta * x)
Exemplo n.º 6
0
    def compute_mean_residence_time(self,
                                    nb_blocks=1,
                                    filter_artifacts=False,
                                    per_residue=False,
                                    return_average_time=False,
                                    use_filtered=True):
        if use_filtered: results = self.filtered_results
        else: results = self.initial_results

        def func(x, tau, lamda):
            return _np.exp(-(x / tau)**lamda)

        all_residence_time = []
        all_average_time = []
        block_length = int(self.nb_frames / nb_blocks)
        xdata = _np.arange(1, block_length + 1)
        for run in range(nb_blocks):
            s = slice(run * block_length, (run + 1) * block_length, 1)
            intervals = {
                key: _hf.intervals_binary(results[key][s])
                for key in results
            }
            residence_time = _np.zeros(block_length)
            average_time = _np.array([0., 0.])

            if per_residue:
                residence_time = {}
                del_res = []
                average_time = {}
                for pair in results:
                    segn, resn, resi, _, _, _ = _hf.deconst_key(pair, True)
                    residue_key = segn + '-' + resn + '-' + str(resi)
                    work_intervals = intervals[pair]
                    interval_lengths = _np.diff(work_intervals,
                                                1).astype(_np.int).flatten()
                    if filter_artifacts:
                        interval_lengths = interval_lengths[(interval_lengths <
                                                             block_length)]
                    try:
                        average_time[residue_key] += _np.array(
                            [interval_lengths.sum(), interval_lengths.size])
                    except KeyError:
                        average_time[residue_key] = _np.array(
                            [interval_lengths.sum(), interval_lengths.size])
                        residence_time[residue_key] = _np.zeros(block_length)
                    for l in interval_lengths:
                        residence_time[residue_key][:l] += _np.arange(
                            1, l + 1)[::-1]

                for residue_key in residence_time:
                    residence_time[residue_key] /= _np.arange(
                        1, block_length + 1)[::-1]
                    residence_time[residue_key] /= residence_time[residue_key][
                        0]
                    if _np.isnan(residence_time[residue_key]).any():
                        del_res.append(residue_key)
                    average_time[residue_key] = average_time[residue_key][
                        0] / average_time[residue_key][1]
                    try:
                        (tau,
                         lamda), pcov = _curve_fit(func,
                                                   xdata,
                                                   residence_time[residue_key],
                                                   p0=(10.0, 0.5))
                        residence_time[residue_key] = tau / lamda * _gamma(
                            1 / lamda)
                    except:
                        if average_time[residue_key] <= 2.0:
                            residence_time[residue_key] = 1.0
                        else:
                            try:
                                (tau, lamda), pcov = _curve_fit(
                                    func,
                                    xdata,
                                    residence_time[residue_key],
                                    p0=(block_length, 0.5))
                                residence_time[
                                    residue_key] = tau / lamda * _gamma(
                                        1 / lamda)
                            except:
                                residence_time[residue_key] = block_length
                for key in del_res:
                    del residence_time[key]

            else:
                for water in results:
                    work_intervals = intervals[water]
                    interval_lengths = _np.diff(work_intervals,
                                                1).astype(_np.int).flatten()
                    if filter_artifacts:
                        interval_lengths = interval_lengths[(interval_lengths <
                                                             block_length)]
                    average_time += _np.array(
                        [interval_lengths.sum(), interval_lengths.size])
                    for l in interval_lengths:
                        residence_time[:l] += _np.arange(1, l + 1)[::-1]
                residence_time /= _np.arange(1, block_length + 1)[::-1]
                residence_time /= residence_time[0]
                average_time = average_time[0] / average_time[1]
                try:
                    (tau, lamda), pcov = _curve_fit(func,
                                                    xdata,
                                                    residence_time,
                                                    p0=(10.0, 0.5))
                    residence_time = tau / lamda * _gamma(1 / lamda)
                except:
                    if average_time <= 2.0:
                        residence_time = 1.0
                    else:
                        residence_time = block_length
            all_residence_time.append(residence_time)
            all_average_time.append(average_time)
        if per_residue:
            for key in residence_time:
                temp_residence_time = []
                temp_average_time = []
                for residence_dict in all_residence_time:
                    try:
                        temp_residence_time.append(residence_dict[key])
                    except:
                        pass
                for average_dict in all_average_time:
                    try:
                        temp_average_time.append(average_time[key])
                    except:
                        pass
                residence_time[key] = (_np.mean(temp_residence_time),
                                       _np.std(temp_residence_time) /
                                       _np.sqrt(nb_blocks))
                average_time[key] = (_np.mean(temp_average_time),
                                     _np.std(temp_average_time) /
                                     _np.sqrt(nb_blocks))
        else:
            residence_time = (_np.mean(all_residence_time),
                              _np.std(all_residence_time) /
                              _np.sqrt(nb_blocks))
            average_time = (_np.mean(all_average_time),
                            _np.std(all_average_time) / _np.sqrt(nb_blocks))

        if return_average_time: return residence_time, average_time
        else: return residence_time