def weibullRelease(self, period):
        """
        Returns the rate from a user-shaped weibull distribution function 
        at a specific period.  
        """
        par = self.parameters
        if len(par) == 2:  # loc = 0
            if period == 0:
                rate = 0
            else:
                c = par[0]
                scale = par[1]
                frozenWeib = st.exponweib(1, c, 0, scale)
                rate = 0.5 * (frozenWeib.pdf(period - 0.5) -
                              frozenWeib.pdf(period)) + frozenWeib.pdf(period)
            return rate

        elif len(par) == 3:  # loc defined by user
            if period == 0:
                rate = 0
            else:
                c = par[0]
                scale = par[1]
                loc = par[2]
                frozenWeib = st.exponweib(1, c, loc, scale)
                rate = 0.5 * (frozenWeib.pdf(period - 0.5) -
                              frozenWeib.pdf(period)) + frozenWeib.pdf(period)
            return rate
        else:
            print('ERROR:')
            print(
                'Too few or too many arguments for weibull release function.')
            print('Enter two or three arguments for weibull release function.')
    def weibullRelease(self, period):
        """
        Returns the rate from a user-shaped weibull distribution function 
        at a specific period.  
        """
        par = self.parameters
        sum_rate = 0
        if len(par) == 2:   # loc = 0
            c = par[0]
            scale = par[1]
            for j in range (2,30):  #calculate sum over weibull function except for service lifetime = 1
                frozenWeib = st.exponweib(1, c, 0, scale)
                weib_rate = 0.5*(frozenWeib.pdf(j-0.5)-frozenWeib.pdf(j))+frozenWeib.pdf(j)
                sum_rate += weib_rate
                
            if period ==0:  # weibull function should not become infinite if c<1
                   rate = 0

            elif period ==1: 
                   rate = 1-sum_rate #sum over weibull function should equal 1, all material gets released in the end
            else:
                frozenWeib = st.exponweib(1, c, 0, scale)
                rate = 0.5*(frozenWeib.pdf(period-0.5)-frozenWeib.pdf(period))+frozenWeib.pdf(period)
            return rate
            
        elif len(par) == 3: # loc defined by user
            c = par[0]
            scale = par[1]
            loc = par[2]
            ceil = math.ceil(loc)
            for j in range (ceil+1,30): #calculate sum over weibull function except for service lifetime <=loc
                frozenWeib = st.exponweib(1, c, loc, scale)
                weib_rate = 0.5*(frozenWeib.pdf(j-0.5)-frozenWeib.pdf(j))+frozenWeib.pdf(j)
                sum_rate += weib_rate
                
            if period < ceil: # weibull function should not become infinite if c<1
                   rate = 0
                   
            elif period == ceil:
                   rate = 1-sum_rate #sum over weibull function should equal 1, all material gets released in the end
            else:
                frozenWeib = st.exponweib(1, c, loc, scale)
                rate = 0.5*(frozenWeib.pdf(period-0.5)-frozenWeib.pdf(period))+frozenWeib.pdf(period)
            return rate
        else:
            print('ERROR:')
            print('Too few or too many arguments for weibull release function.')
            print('Enter two or three arguments for weibull release function.')
    def reduce(self, parsers):

        # Accumulate grouped data over parsers
        for k, p in parsers.items():

            my_items = [(key, val) for (key, val) in p.emit_data.items()
                        if key[0] == id(self)]

            for (key, val) in my_items:
                if key not in self.data:
                    self.data[key] = np.array(val)
                else:
                    self.data[key] = np.hstack(self.data[key], np.array(val))

        # Now do the stat analysis
        for (key, prognosis) in self.data.items():
            (abi, ) = key[1:2]
            age_range = [BIN_WIDTH * abi, BIN_WIDTH * (abi + 1)]

            # Note dummy parameters in the lambda below kapp necessary variable (lam, kap) in scope
            # Using bin center
            mean_age = np.mean(age_range)

            if mean_age < FERRAND_CHILD_AGE_YEARS:
                self.fun[abi] =  lambda x, a=CHILD_ALPHA, b=CHILD_BETA, m=CHILD_MU, s=CHILD_S:    \
                            a     * sps.expon.cdf([xx*b/DAYS_PER_YEAR for xx in x]) +             \
                            (1-a) * sps.exponweib(1,s).cdf([ np.log(2.0)**(1/s) * xx/m/DAYS_PER_YEAR for xx in x])

            else:
                mean_age = min(mean_age, FERRAND_MAX_AGE_YEARS)
                lam = ADULT_LAMBDA_SLOPE * mean_age + ADULT_LAMBDA_INTERCEPT
                kap = ADULT_KAPPA_SLOPE * mean_age + ADULT_KAPPA_INTERCEPT

                lam = DAYS_PER_YEAR * lam

                self.fun[abi] = lambda x, lam=lam, kap=kap: sps.exponweib(
                    1, kap).cdf([xx / lam for xx in x])

            self.results[abi] = self.kstest(prognosis, self.fun[abi],
                                            self.alpha)

            if self.verbose:
                if self.results[abi]['Valid']:
                    print "Sub-test for age range " + str(
                        age_range) + " passed."
                else:
                    print "Sub-test for age range " + str(
                        age_range) + " failed."
Esempio n. 4
0
 def distModelIndexChanged_hndlr(self):
     '''
     handler for changin item in combobox under probability plot
     :return:
     '''
     index = self.distModelBox.currentIndex()
     if index == 0:
         self.probModel = stats.norm
         self.ddofEdit.setDisabled(True)
     elif index == 1:
         self.probModel = stats.expon
         self.ddofEdit.setDisabled(True)
     elif index == 2:
         self.probModel = stats.laplace
         self.ddofEdit.setDisabled(True)
     elif index == 3:
         try:
             self.df = np.float64(self.ddofEdit.text())
         except:
             self.ddofEdit.setText(str(self.df))
         self.probModel = stats.chi2(self.df)
         self.ddoflabel.setText(_('ProboPlot','Number of degrees of freedom'))
         self.ddofEdit.setEnabled(True)
     elif index == 4:
         try:
             self.df = np.float64(self.ddofEdit.text())
         except:
             self.ddofEdit.setText(str(self.df))
         self.probModel = stats.exponweib(a=1,c=self.df)
         self.ddoflabel.setText(_('ProboPlot','Shape of distribution'))
         self.ddofEdit.setEnabled(True)
     try:
         self.drawProbPlot(self.currDist)
     except:
         return
Esempio n. 5
0
    def _get_weibull_dist(self, qty, mean=None, std=None, scale=1.0, shape=5.0):

        x_line = np.arange(mean - std * 4.0, mean + std * 5.0, 1 * std)

        if self.weibull_dist_method == 'double':
            _data = dweibull(shape, loc=mean, scale=std)
            y_line = _data.pdf(x_line) * qty

        if self.weibull_dist_method == 'inverted':
            _data = invweibull(shape, loc=mean, scale=std)
            y_line = _data.pdf(x_line) * qty

        if self.weibull_dist_method == 'exponential':
            _data = exponweib(scale, shape, loc=mean, scale=std)
            y_line = _data.pdf(x_line) * qty

        if self.weibull_dist_method == 'min':
            _data = weibull_min(shape, loc=mean, scale=std)
            y_line = _data.pdf(x_line) * qty

        if self.weibull_dist_method == 'max':
            _data = weibull_max(shape, loc=mean, scale=std)
            y_line = _data.pdf(x_line) * qty

        line = Line(width=1280, height=600)
        line.add(u'{0}'.format(self.spc_target), x_line, y_line, xaxis_name=u'{0}'.format(self.spc_target),
                 yaxis_name=u'数量(Quantity)',
                 line_color='rgba(0 ,255 ,127,0.5)', legend_pos='center',
                 is_smooth=True, line_width=2,
                 tooltip_tragger='axis', is_fill=True, area_color='#20B2AA', area_opacity=0.4)
        pyecharts.configure(force_js_embed=True)
        return line.render_embed()
def main():
    step = 4

    interarrivals = exponweib(size=10000)

    print(calculate_parameters(interarrivals))
    hours = []
    hour = []
    params = []
    time = 0
    last_time = 0
    for arrival in interarrivals:

        if time + arrival > last_time + 1000 * 60 * 60 * step:
            params.append(calculate_parameters(hour))
            hours.append(hour)
            hour = []
            last_time = time = last_time + 1000 * 60 * 60 * step


        time = time + arrival
        hour.append(arrival)

    fig, ax1 = plt.subplots()

    ax2 = plt.twinx()



    ax1.plot([p[0] for p in params])
    ax2.plot([p[1] for p in params], color='orange')
    plt.show()
Esempio n. 7
0
File: route.py Progetto: pewen/newen
def analisis():

  if request.method == 'POST':
  	# Get the FileStorage instance from request
    file = request.files['file']
    #Analisis estadistico
    new_path = file_path(file, path)
    df_anio, df_meses, histo_breaks, histo_counts = r_analisis(new_path)
    
    shape = df_anio['shape_year'][1]
    scale = df_anio['scale_year'][1]
    distribution = stats.exponweib(1, shape, loc = 0, scale = scale)

    y = distribution.pdf(histo_counts['mids'])
    
    data = []
    for i in range(8):
        data.append(df_meses[[i]].values.tolist())
    data.append(histo_breaks[[0]].values.tolist())
    for i in range(3):
        data.append(histo_counts[[i]].values.tolist())
    data.append(y.tolist())
    
    # Render template with file info
    return render_template('analisis.html', data = data)
            
  return render_template('upload.html')
Esempio n. 8
0
 def weibull(self, weibull, subplot, step):
   shape = weibull[0]
   scale = weibull[1]
   stop = weibull[2]
   numargs = exponweib.numargs
   [a, c] = [weibull[0]] * numargs
   rv = exponweib(a, c, scale=weibull[1])
   x = arange(start=0, stop=weibull[2], step=step/2)
   subplot.plot(x, rv.pdf(x)*100)
Esempio n. 9
0
 def setup(self):
     bins = np.arange(0, 41, 1)
     Vmean = 9
     A = 9
     k = 2
     AVmean = Vmean/(gamma(1+1/k))
     step_size = bins[1]-bins[0]
     self.rv_A = spystats.exponweib(1, k, scale=A, floc=0)
     self.rv_Vmean = spystats.exponweib(1, k, scale=AVmean, floc=0)
     hourly_A = self.rv_A.pdf(bins)*8760*step_size
     normed_A = hourly_A/hourly_A.sum()
     hourly_Vmean = self.rv_Vmean.pdf(bins)*8760*step_size
     normed_Vmean = hourly_Vmean/hourly_Vmean.sum()
     self.hourlyA = pd.DataFrame({'Annual Hours': hourly_A,
                                  'Normalized': normed_A},
                                 index=bins)
     self.hourlyVmean = pd.DataFrame({'Annual Hours': hourly_Vmean,
                                      'Normalized': normed_Vmean},
                                     index=bins)
Esempio n. 10
0
    def reduce(self, parsers):

        # Accumulate grouped data over parsers
        for k,p in parsers.items():

            my_items = [ (key,val) for (key,val) in p.emit_data.items() if key[0] == id(self) ]

            for (key, val) in my_items:
                if key not in self.data:
                    self.data[key] = np.array(val)
                else:
                    self.data[key] = np.hstack( self.data[key], np.array(val) )

        # Now do the stat analysis
        for (key, prognosis) in self.data.items():
            (abi,) = key[1:2]
            age_range = [BIN_WIDTH*abi, BIN_WIDTH*(abi+1)]

            # Note dummy parameters in the lambda below kapp necessary variable (lam, kap) in scope
            # Using bin center
            mean_age = np.mean(age_range)

            if mean_age < FERRAND_CHILD_AGE_YEARS:
                self.fun[abi] =  lambda x, a=CHILD_ALPHA, b=CHILD_BETA, m=CHILD_MU, s=CHILD_S:    \
                            a     * sps.expon.cdf([xx*b/DAYS_PER_YEAR for xx in x]) +             \
                            (1-a) * sps.exponweib(1,s).cdf([ np.log(2.0)**(1/s) * xx/m/DAYS_PER_YEAR for xx in x])

            else:
                mean_age = min(mean_age, FERRAND_MAX_AGE_YEARS)
                lam = ADULT_LAMBDA_SLOPE*mean_age + ADULT_LAMBDA_INTERCEPT
                kap = ADULT_KAPPA_SLOPE*mean_age + ADULT_KAPPA_INTERCEPT

                lam = DAYS_PER_YEAR * lam

                self.fun[abi] = lambda x, lam=lam, kap=kap: sps.exponweib(1,kap).cdf([xx/lam for xx in x]) 

            self.results[abi] = self.kstest(prognosis, self.fun[abi], self.alpha)

            if self.verbose:
                if self.results[abi]['Valid']:
                    print "Sub-test for age range " + str(age_range) + " passed."
                else:
                    print "Sub-test for age range " + str(age_range) + " failed."
Esempio n. 11
0
    def weibull(self,
                column=None,
                ws_intervals=1,
                method='EuroAtlas',
                plot='matplotlib'):
        '''Calculate distribution and weibull parameters from data

        Parameters:
        ___________
        column: tuple, default None
            Column to perform weibull analysis on
        ws_intervals: float, default=1
            Wind Speed intervals on which to bin
        method: string, default 'LeastSq'
            Weibull calculation method.
        plot: string, default 'matplotlib'
            Choose whether or not to plot your data, and what method.
            Currently only supporting matplotlib, but hoping to add
            Bokeh as that library evolves.

        Returns:
        ________
        DataFrame with hourly data distributions
        '''

        ws_data = self.data[column]
        ws_range = np.arange(0, ws_data.max() + ws_intervals, ws_intervals)
        binned = pd.cut(ws_data, ws_range)
        dist_10min = pd.value_counts(binned).reindex(binned.levels)
        dist = pd.DataFrame({'Binned: 10Min': dist_10min})
        dist['Binned: Hourly'] = dist['Binned: 10Min'] / 6
        dist = dist.fillna(0)
        normed = dist['Binned: 10Min'] / dist['Binned: 10Min'].sum()
        ws_normed = normed.values
        x = np.arange(0, len(ws_normed), ws_intervals)

        if method == 'EuroAtlas':
            A, k = west.euro_atlas(ws_data)
        elif method == 'LeastSq':
            A, k = west.least_sq(ws_normed, x)

        A = round(A, 3)
        k = round(k, 3)
        rv = spystats.exponweib(1, k, scale=A, floc=0)

        if plot == 'matplotlib':
            smooth = np.arange(0, 100, 0.1)
            plottools.weibull(smooth,
                              rv.pdf(smooth),
                              binned=True,
                              binned_x=x,
                              binned_data=dist['Binned: Hourly'],
                              align='edge')

        return {'Weibull A': A, 'Weibull k': k, 'Dist': dist}
Esempio n. 12
0
def Weibull(lamda, k, tag=None):
    """
    A Weibull random variate
    
    Parameters
    ----------
    lamda : scalar
        The scale parameter
    k : scalar
        The shape parameter
    """
    assert lamda > 0 and k > 0, 'Weibull "lamda" and "k" parameters must be greater than zero'
    return uv(ss.exponweib(lamda, k), tag=tag)
Esempio n. 13
0
def Weib(lamda, k, tag=None):
    """
    A Weibull random variate
    
    Parameters
    ----------
    lamda : scalar
        The scale parameter
    k : scalar
        The shape parameter
    """
    assert lamda>0 and k>0, 'Weibull scale and shape parameters must be greater than zero'
    return uv(rv=ss.exponweib(lamda, k), tag=tag)
def weibull_hourly(k=None,
                   A=None,
                   Vmean=None,
                   bins=np.arange(0, 41, 1),
                   plot='matplotlib'):
    '''Calculate weibull distribution and annual hours from weibull k and A or
    Vmean parameters. This distribution is based on multiplying the
    PDF by the annual hours for each wind speed bin. Defaults to Vmean for
    calculation of A if both Vmean and A are provided.

    Parameters:
    ----------
    k: float, int
        Weibull k parameters
    A: float, int
        Weibull A parameter
    Vmean: float, int
        Mean wind speed, for calculating weibull with Vmean and k only
    bins: array, default np.arange(0, 41, 1)
        Wind speed bins for estimating and plotting weibull
    plot: string, default 'matplotlib'
        Choose whether or not to plot your data, and what method.
        Currently only supporting matplotlib, but hoping to add
        Bokeh as that library evolves.

    Returns:
    ________
    Dataframe of wind-speed binned annual hours and normed values
    '''

    if Vmean:
        A = Vmean / (gamma(1 + 1 / k))

    step_size = bins[1] - bins[0]
    rv = spystats.exponweib(1, k, scale=A, floc=0)
    hourly = rv.pdf(bins) * 8760 * step_size
    df_hourly = pd.DataFrame(
        {
            'Annual Hours': hourly,
            'Normalized': hourly / hourly.sum()
        },
        index=bins)
    cont_bins = np.arange(0, 100, 0.1)
    if plot == 'matplotlib':
        plottools.weibull(cont_bins,
                          rv.pdf(cont_bins),
                          binned=True,
                          binned_x=bins,
                          binned_data=hourly,
                          align='center')
    return df_hourly
Esempio n. 15
0
    def weibull(self, column=None, ws_intervals=1, method='EuroAtlas',
                plot='matplotlib'):
        '''Calculate distribution and weibull parameters from data

        Parameters:
        ___________
        column: tuple, default None
            Column to perform weibull analysis on
        ws_intervals: float, default=1
            Wind Speed intervals on which to bin
        method: string, default 'LeastSq'
            Weibull calculation method.
        plot: string, default 'matplotlib'
            Choose whether or not to plot your data, and what method.
            Currently only supporting matplotlib, but hoping to add
            Bokeh as that library evolves.

        Returns:
        ________
        DataFrame with hourly data distributions
        '''

        ws_data = self.data[column]
        ws_range = np.arange(0, ws_data.max()+ws_intervals,
                             ws_intervals)
        binned = pd.cut(ws_data, ws_range)
        dist_10min = pd.value_counts(binned).reindex(binned.levels)
        dist = pd.DataFrame({'Binned: 10Min': dist_10min})
        dist['Binned: Hourly'] = dist['Binned: 10Min']/6
        dist = dist.fillna(0)
        normed = dist['Binned: 10Min']/dist['Binned: 10Min'].sum()
        ws_normed = normed.values
        x = np.arange(0, len(ws_normed), ws_intervals)

        if method == 'EuroAtlas':
            A, k = west.euro_atlas(ws_data)
        elif method == 'LeastSq':
            A, k = west.least_sq(ws_normed, x)

        A = round(A, 3)
        k = round(k, 3)
        rv = spystats.exponweib(1, k, scale=A, floc=0)

        if plot == 'matplotlib':
            smooth = np.arange(0, 100, 0.1)
            plottools.weibull(smooth, rv.pdf(smooth), binned=True,
                              binned_x=x, binned_data=dist['Binned: Hourly'],
                              align='edge')

        return {'Weibull A': A, 'Weibull k': k, 'Dist': dist}
Esempio n. 16
0
def extremeDistribution_Weibull(x, x_e, t_x, t_st, locFlag=0):
    '''Approximates the short-term extreme distribution using the all peaks
    Weibull method.

    Parameters
    ----------
        x : np.array
            Independent random variable (global peaks)
        x_e : np.array
            Array of x values at which to evaluate the short-term extreme CDF
        t_x : float
            Time length of the x array
        t_st : float
            Short-term period
        locFlag : boolean
            locFlag = 0: Location parameter of Weibull distribution is forced to zero
            locFlag = 1: Location parameter of Weibull distribution is calculated in fit


    Returns
    -------
        stextreme_dist: ecmDist object
            Probability distribution of the short-term extreme.
        stextreme_dist: ecmDist object
            Probability distribution of the short-term extreme.
        peaks_dist: scipy.stats rv_frozen
            Probability distribution of the peaks.
        peaks_params: np.array length 4
            Parameters of peak's distribution (Weibull)
            [shape_a, shape_c, loc, scale].
    '''
    # peaks distribution
    if locFlag == 0:
        peaks_params = stats.exponweib.fit(x, f0=1, floc=0)
    elif locFlag == 1:
        peaks_params = stats.exponweib.fit(x, f0=1)
    peaks_dist = stats.exponweib(a=peaks_params[0],
                                 c=peaks_params[1],
                                 loc=peaks_params[2],
                                 scale=peaks_params[3])
    # short-term extreme distribution
    ratio = t_st / t_x
    N = len(x)
    N_st = N * ratio
    weib_cdf = peaks_dist.cdf(x_e)
    ste_cdf = weib_cdf ** N_st
    stextreme_dist = ecmDist(x_e, cdf=ste_cdf)
    # return
    return stextreme_dist, peaks_dist, peaks_params
Esempio n. 17
0
def extremeDistribution_Weibull(x, x_e, t_x, t_st, locFlag=0):
    '''Approximates the short-term extreme distribution using the all peaks
    Weibull method.

    Parameters
    ----------
        x : np.array
            Independent random variable (global peaks)
        x_e : np.array
            Array of x values at which to evaluate the short-term extreme CDF
        t_x : float
            Time length of the x array
        t_st : float
            Short-term period
        locFlag : boolean
            locFlag = 0: Location parameter of Weibull distribution is forced to zero
            locFlag = 1: Location parameter of Weibull distribution is calculated in fit


    Returns
    -------
        stextreme_dist: ecmDist object
            Probability distribution of the short-term extreme.
        stextreme_dist: ecmDist object
            Probability distribution of the short-term extreme.
        peaks_dist: scipy.stats rv_frozen
            Probability distribution of the peaks.
        peaks_params: np.array length 4
            Parameters of peak's distribution (Weibull)
            [shape_a, shape_c, loc, scale].
    '''
    # peaks distribution
    if locFlag == 0:
        peaks_params = stats.exponweib.fit(x, f0=1, floc=0)
    elif locFlag == 1:
        peaks_params = stats.exponweib.fit(x, f0=1)
    peaks_dist = stats.exponweib(a=peaks_params[0],
                                 c=peaks_params[1],
                                 loc=peaks_params[2],
                                 scale=peaks_params[3])
    # short-term extreme distribution
    ratio = t_st / t_x
    N = len(x)
    N_st = N * ratio
    weib_cdf = peaks_dist.cdf(x_e)
    ste_cdf = weib_cdf**N_st
    stextreme_dist = ecmDist(x_e, cdf=ste_cdf)
    # return
    return stextreme_dist, peaks_dist, peaks_params
Esempio n. 18
0
 def setup(self):
     bins = np.arange(0, 41, 1)
     Vmean = 9
     A = 9
     k = 2
     AVmean = Vmean / (gamma(1 + 1 / k))
     step_size = bins[1] - bins[0]
     self.rv_A = spystats.exponweib(1, k, scale=A, floc=0)
     self.rv_Vmean = spystats.exponweib(1, k, scale=AVmean, floc=0)
     hourly_A = self.rv_A.pdf(bins) * 8760 * step_size
     normed_A = hourly_A / hourly_A.sum()
     hourly_Vmean = self.rv_Vmean.pdf(bins) * 8760 * step_size
     normed_Vmean = hourly_Vmean / hourly_Vmean.sum()
     self.hourlyA = pd.DataFrame(
         {
             'Annual Hours': hourly_A,
             'Normalized': normed_A
         }, index=bins)
     self.hourlyVmean = pd.DataFrame(
         {
             'Annual Hours': hourly_Vmean,
             'Normalized': normed_Vmean
         },
         index=bins)
Esempio n. 19
0
 def __init__(self, app):
     self.app = app
     self._min = [0, 0, -1, -9, -9, -99, -99, -99, 1000, 0, 0]
     self._max = [0, 0, 1, 9, 9, 99, 99, 99, 9999, 5000, 360]
     self.scales = [1, 1, 360, 1, 1, 1, 20, 10, 1, 1, 1]
     self.rates = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0.1]
     self.g = spstats.exponweib(1, 2)
     self.a = 'abcdefghijklmnopqrstuvwxyz'
     self.i = 0
     self._g = {
         'l': self.linear,
         's': self.synthesis,
         'd': self.sample,
         'r': self._random,
         'q': self.quadratic,
     }
Esempio n. 20
0
def plot_freq_dist(params,
                   data=None,
                   sensor=None,
                   title='Wind speed frequency distribution'):
    '''
    Plots a wind speed frequency distribution
    Parameters:
    ___________
    params: list of float [A,k]
        Array of Weibull A and k parameters
        Can be caluclated using mast.return_weibull_params()
    data: pandas Series, default None
        Measured wind speed data from a sensor
        Can be called using mast.return_primary_ano_data()
    title: string
        Plot title, default 'Wind speed frequency distribution'
    Returns:
    ________
    Frequency distribution plot
    '''
    A, k = params
    ws_bin_edges = np.arange(0.0, 31.0)
    x_smooth = np.linspace(0.0, 31.0, 100)
    weibull_dist = stats.exponweib(1, k, scale=A)

    fig = plt.figure(figsize=(8, 8))
    ax = fig.add_subplot(111)
    if data is not None:
        data.dropna().plot(kind='hist',
                           bins=ws_bin_edges,
                           ax=ax,
                           color='gray',
                           normed=True,
                           alpha=0.6,
                           label='Measured')
    ax.plot(x_smooth,
            weibull_dist.pdf(x_smooth),
            color='darkblue',
            label='Weibull fit')
    ax.legend(loc='best')
    ax.set_xlim([0, 30])
    ax.set_title(title + ' (A: %.2f; k: %.2f)' % (A, k))
    ax.set_ylabel('Frequency')
    ax.set_xlabel('Wind speed [m/s]')
    return fig
Esempio n. 21
0
    def map(self, output_data):
        emit_data = {}

        hiv_infections = output_data[ self.filenames[1] ]    # CSV, has 'data' and 'colMap'
        rows = hiv_infections['data']
        colMap = hiv_infections['colMap']

        # Extract parameters from config
        config_json = output_data[ self.filenames[0] ]
        cp = config_json['parameters']

        lamv = {}
        kapv = {}

        tmp = float(cp['Sexual_Debut_Age_Male_Weibull_Heterogeneity'])
        assert( tmp > 0 )

        tmp = float(cp['Sexual_Debut_Age_Female_Weibull_Heterogeneity'])
        assert( tmp > 0 )

        lamv[MALE]      = float(cp['Sexual_Debut_Age_Male_Weibull_Scale'])          # Lamba = scale
        kapv[MALE]      = 1.0 / float(cp['Sexual_Debut_Age_Male_Weibull_Heterogeneity'])   # Kappa = shape = 1/heterogeneity
        lamv[FEMALE]    = float(cp['Sexual_Debut_Age_Female_Weibull_Scale'])
        kapv[FEMALE]    = 1.0 / float(cp['Sexual_Debut_Age_Female_Weibull_Heterogeneity'])

        for gender in [MALE, FEMALE]:
            gendername = gendernames[gender]

            # Choose rows corresponding to this relationship type
            gender_rows = [r for r in rows if r[colMap['Gender']] is str(gender)]

            # Get debut_age for each gender_row
            debut_age = [ float(r[colMap['DebutAge']])/DAYS_PER_YEAR for r in gender_rows]

            key = ( id(self), gender, lamv[gender], kapv[gender] )
            emit_data[key] = {'DebutAge': debut_age}

            # Note dummy parameters in the lambda below kapp necessary variable (lam, kap) in scope
            if key not in self.fun:
                self.fun[key] = lambda x, lam=lamv[gender], kap=kapv[gender]: \
                    sps.exponweib(1,kap).cdf([xx/lam for xx in x])

        return emit_data
Esempio n. 22
0
    def map(self, output_data):
        emit_data = {}

        hiv_infections = output_data[ self.filenames[1] ]    # CSV, has 'data' and 'colMap'
        rows = hiv_infections['data']
        colMap = hiv_infections['colMap']

        # Extract parameters from config
        config_json = output_data[ self.filenames[0] ]
        cp = config_json['parameters']
        muv = {}
        kapv = {}
        muv[MALE]  = float(cp['Sexual_Debut_Age_Male_Mean'])
        kapv[MALE] = float(cp['Sexual_Debut_Age_Male_Shape'])
        muv[FEMALE]   = float(cp['Sexual_Debut_Age_Female_Mean'])
        kapv[FEMALE]     = float(cp['Sexual_Debut_Age_Female_Shape'])

        for gender in [MALE, FEMALE]:
            gendername = gendernames[gender]

            # Choose rows corresponding to this relationship type
            gender_rows = [r for r in rows if r[colMap['Gender']] is str(gender)]

            # Get debut_age for each gender_row
            debut_age = [ float(r[colMap['DebutAge']])/DAYS_PER_YEAR for r in gender_rows]

            mu = float(muv[gender])
            kap = float(kapv[gender])
            lam = mu / math.gamma(1+1/kap)
            #z = 1.0+1.0/float(kap)
            #gamma_approx_sterling = math.sqrt(2*math.pi/z) * (1/math.e*(z + 1/(12*z - 1/(10*z))))**z
            #gamma_approx_winschitl = (2.5066282746310002 * math.sqrt(1.0/z) * ((z/math.e) * math.sqrt(z*math.sinh(1/z) + 1/(810*z**6)))**z)
            #lam = mu / gamma_approx_winschitl

            key = ( id(self), gender, lam, kap )
            emit_data[key] = {'DebutAge': debut_age}

            # Note dummy parameters in the lambda below kapp necessary variable (lam, kap) in scope
            if key not in self.fun:
                self.fun[key] = lambda x, lam=lam, kap=kap: \
                    sps.exponweib(1,kap).cdf([xx/lam for xx in x])

        return emit_data
Esempio n. 23
0
def weibull_parameters_y_original(w,
                                  z,
                                  weibull_delay_months,
                                  __cache=[None, None]):
    """
    Calculate y, derived from the Weibull distribution

    This is slow - so we remember the result for next time, as this
    tends to get called lots of times with the same values for w and z
    """
    if __cache[0] == (w, z, weibull_delay_months):
        return __cache[1]
    else:
        x = np.linspace(0, 2, weibull_delay_months)
        v_1 = stats.exponweib(w, z)
        y1 = v_1.cdf(x)
        y = np.append([0], y1[1:] - y1[0:weibull_delay_months - 1])
        __cache[0] = (w, z)
        __cache[1] = y
    return y
Esempio n. 24
0
def weibull_hourly(k=None, A=None, Vmean=None, bins=np.arange(0, 41, 1),
                   plot='matplotlib'):
    '''Calculate weibull distribution and annual hours from weibull k and A or
    Vmean parameters. This distribution is based on multiplying the
    PDF by the annual hours for each wind speed bin. Defaults to Vmean for
    calculation of A if both Vmean and A are provided.

    Parameters:
    ----------
    k: float, int
        Weibull k parameters
    A: float, int
        Weibull A parameter
    Vmean: float, int
        Mean wind speed, for calculating weibull with Vmean and k only
    bins: array, default np.arange(0, 41, 1)
        Wind speed bins for estimating and plotting weibull
    plot: string, default 'matplotlib'
        Choose whether or not to plot your data, and what method.
        Currently only supporting matplotlib, but hoping to add
        Bokeh as that library evolves.

    Returns:
    ________
    Dataframe of wind-speed binned annual hours and normed values
    '''

    if Vmean:
        A = Vmean/(gamma(1+1/k))

    step_size = bins[1]-bins[0]
    rv = spystats.exponweib(1, k, scale=A, floc=0)
    hourly = rv.pdf(bins)*8760*step_size
    df_hourly = pd.DataFrame({'Annual Hours': hourly,
                              'Normalized': hourly/hourly.sum()},
                             index=bins)
    cont_bins = np.arange(0, 100, 0.1)
    if plot == 'matplotlib':
        plottools.weibull(cont_bins, rv.pdf(cont_bins), binned=True,
                          binned_x=bins, binned_data=hourly, align='center')
    return df_hourly
Esempio n. 25
0
File: route.py Progetto: pewen/newen
def ejemplo():
  df_anio = read_csv(path + 'data/df_anio')
  df_meses = read_csv(path + 'data/df_meses')
  histo_breaks = read_csv(path + 'data/histo_breaks')
  histo_counts = read_csv(path + 'data/histo_counts')
    
  shape = df_anio['shape_year'][1]
  scale = df_anio['scale_year'][1]
  distribution = stats.exponweib(1, shape, loc = 0, scale = scale)

  y = distribution.pdf(histo_counts['mids'])
    
  data = []
  for i in range(8):
    data.append(df_meses[[i]].values.tolist())
  data.append(histo_breaks[[0]].values.tolist())
  for i in range(3):
    data.append(histo_counts[[i]].values.tolist())
  data.append(y.tolist())
	
  return render_template('analisis.html', data = data)
Esempio n. 26
0
    print("Best fitting distribution: "+str(best_dist))
    print("Best p value: "+ str(best_p))
    print("Parameters for the best fit: "+ str(params[best_dist]))

    return best_dist, best_p, params[best_dist]

for i in conditional_volatilities_stocks_frame.columns:
    print(f"Distribution for {i}")
    get_best_distribution(conditional_volatilities_stocks_frame[i])

get_best_distribution(conditional_volatilities_stocks_frame["AAPL"])

params = (-0.29121,1.60149, 0.37599)
x = np.arange(conditional_volatilities_stocks_frame["AAPL"].min(),conditional_volatilities_stocks_frame["AAPL"].max(),0.001)
rv = stats.exponweib(4.241635662242491, 0.8410328808185763, 1.0254363366567723, 0.42062685618103823)
plt.hist(conditional_volatilities_stocks_frame["AAPL"],bins=100)
plt.plot(x,rv.pdf(x))
plt.axvline(rv.ppf(0.95), color = "r")
plt.show()





"""


dados_para_reg = pd.DataFrame()
conditional_volatilities_frame = pd.DataFrame()
for i in resid_series:
Esempio n. 27
0
def extremeDistribution_WeibullTailFit(x, x_e, t_x, t_st, avg=0, p0=None):
    '''Approximates the short-term extreme distribution using the Weibull tail
    fit method.

    Parameters
    ----------
        x : np.array
            Independent random variable (global peaks)
        x_e : np.array
            Array of x values at which to evaluate the short-term extreme CDF
        t_x : float
            Time length of the x array
        t_st : float
            Short-term period
        avg : float
            The average of the time response, if this was substracted before
            identifying global peaks. Else it is assumed that the average is
            zero.
        p0 : list length 2: [float, float]
            Initial guess for the Weibull parameters [shape, scale]

    Returns
    -------
        stextreme_dist: ecmDist object
            Probability distribution of the short-term extreme.
        stextreme_dist : ecmDist object
            Probability distribution of the short-term extreme.
        peaks_dist : scipy.stats rv_frozen
            Probability distribution of the peaks.
        subset_shape_params : np.array length 7
            Shape parameter for each of the seven Weibull fits for the
            subsets of data corresponding to F>[0.65,0.7,...,0.95].
        subset_scale_params : np.array length 7
            Scale parameter for each of the seven Weibull fits for the
            subsets of data corresponding to F>[0.65,0.7,...,0.95].
        peaks_params: np.array length 4
            Parameters of peak's distribution (Weibull)
            [shape_a, shape_c, loc, scale].
    '''

    # Two-parameter weibull distribution def
    def weibCDF(yy, shape, scale):
        loc = 0
        return 1. - np.exp(-1. * ((yy - loc) / scale)**shape)

    # Initial guess for Weibull parameters
    if p0 is None:
        p0_tmp = stats.exponweib.fit(x, f0=1, floc=0)
        p0 = np.zeros(2)
        p0[0] = p0_tmp[1]
        p0[1] = p0_tmp[3]
    # Approximate CDF
    x = np.sort(x)
    N = len(x)
    F = np.zeros(N)
    for i in range(N):
        F[i] = i / (N + 1.0)
    # Divide into seven sets
    subset_shape_params = np.zeros(7)
    subset_scale_params = np.zeros(7)
    setLim = np.arange(0.60, 0.91, 0.05)
    for set in range(7):
        xset = x[(F > setLim[set])]
        Fset = F[(F > setLim[set])]
        popt, _ = optim.curve_fit(weibCDF, xset, Fset, p0=p0)
        subset_shape_params[set] = popt[0]
        subset_scale_params[set] = popt[1]
    # peaks distribution
    peaks_params = [
        1, np.mean(subset_shape_params), avg,
        np.mean(subset_scale_params)
    ]
    peaks_dist = stats.exponweib(a=peaks_params[0],
                                 c=peaks_params[1],
                                 loc=peaks_params[2],
                                 scale=peaks_params[3])
    # short-term extreme
    ratio = t_st / t_x
    N_st = N * ratio
    weib_cdf = weibCDF(x_e, peaks_params[1], peaks_params[3])
    ste_cdf = weib_cdf**N_st
    stextreme_dist = ecmDist(x_e, cdf=ste_cdf)
    return stextreme_dist, peaks_dist, subset_shape_params, \
               subset_scale_params, peaks_params
Esempio n. 28
0
 def __init__(self, a, c, loc=0.0, scale=1.0):
     super().__init__(stats.exponweib(a, c, loc=loc, scale=scale))
     self.a = a
     self.c = c
     self.loc = loc
     self.scale = scale
Esempio n. 29
0
# It is suggested to always use FULL paths, to avoid relative path issues
faults = [
    'faultlib/leak {0}', 'faultlib/leak {0} l', 'faultlib/memeater {0}',
    'faultlib/memeater {0} l', 'faultlib/dial {0}', 'faultlib/dial {0} l'
]

# The list of benchmarks to be used
benchmarks = ['faultlib/linpack', 'faultlib/stream', 'faultlib/generic']

# Output path of the generated workload
out = 'workloads/gen_workload.csv'
# Maximum time span (in seconds) of the workload
span = 3600 * 48

if __name__ == '__main__':
    generator = WorkloadGenerator(path=out)
    # We set the fault generator so that a Normal distribution is used for the durations, and a Weibull distribution is
    # used for the inter-fault times
    generator.faultDurGenerator.set_distribution(norm(loc=60, scale=6))
    generator.faultTimeGenerator.set_distribution(
        exponweib(a=10, c=1, loc=300, scale=15))
    # We let the workload generator set the benchmark generator automatically, by imposing that roughly 80% of the workload
    # time must be spent in "busy" operation
    generator.autoset_bench_generators(busy_time=0.8,
                                       num_tasks=20,
                                       span_limit=span)
    # We start the workload generation process
    generator.generate(faults, benchmarks, span_limit=span)

    exit()
Esempio n. 30
0
def karst_process(tt, mm, evpt, prp, prpxp, tempp, d18o, d18oxp, dpdf, epdf,
                  soilstorxp, soil18oxp, epxstorxp, epx18oxp, kststor1xp,
                  kststor118oxp, kststor2xp, kststor218oxp, data_rest,
                  calculate_drip, cave_temp):
    #following parameters are mainly unpacked from 'data_rest' which is from the config file
    #store size parameters
    soilsize = data_rest[5][0]
    episize = data_rest[5][3]
    ks1size = data_rest[5][4]
    ks2size = data_rest[5][5]

    #making sure the init sizes don't exceed store capacity
    if soilstorxp > soilsize:
        soilstorxp = soilsize - 1
    if epxstorxp > episize:
        epxstorxp = episize - 1
    if kststor1xp > ks1size:
        kststor1xp = ks1size - 1
    if kststor2xp > ks2size:
        kststor2xp = ks2size - 1

    #overflow parameters
    epicap = data_rest[5][1]
    ovcap = data_rest[5][2]
    #ensuring the overflow parameters are less than the store
    if epicap >= episize:
        epicap = episize - 1
    if ovcap >= ks2size:
        ovcap = ks2size - 1

    #average cave parameters for various months
    drip_interval = data_rest[4][mm - 1]
    drip_pco2 = data_rest[7][mm - 1] / 1000000.0
    cave_pco2 = data_rest[8][mm - 1] / 1000000.0
    h = data_rest[9][mm - 1]
    v = data_rest[10][mm - 1]
    phi = data_rest[11][0]

    #making sure cave values don't become negative
    if v < 0:
        v = 0
    if drip_interval < 0:
        drip_interval = 0
    if drip_pco2 < 0:
        drip_pco2 = 0.0000000000000001
    if cave_pco2 < 0:
        cave_pco2 = 0.0000000000000001
    if h < 0:
        h = 0
    if phi < 0:
        phi = 0

    #making sure some cave values don't exceed one
    if h >= 1:
        h = 0.99
    if phi > 1:
        phi = 1

    #weibull parameters
    w = data_rest[6][0]
    z = data_rest[6][1]
    x = np.linspace(0, 2, 12)
    v_1 = s.exponweib(w, z)
    y1 = v_1.cdf(x)
    y = np.append([0], y1[1:] - y1[0:11])

    #parameterisable coefficients
    k_f1 = data_rest[0][0]  #f1 from soilstore to epikarst
    k_f3 = data_rest[0][1]  #f3 from epikarst to KS1
    k_f8 = data_rest[0][6]
    k_f5 = data_rest[0][2]  #f5 from KS1 to stal5
    k_f6 = data_rest[0][3]  #f6 from KS2 to stal1
    k_f7 = data_rest[0][4]  #f7 overflow from KS2 to KS1
    k_diffuse = data_rest[0][5]  #diffuse flow from Epikarst to KS1
    k_e_evap = data_rest[2][
        0]  #epikarst evap (funct of ET for timestep) Used for both sources???
    k_evapf = data_rest[2][1]  #soil evap d18o fractionation from somepaper????
    k_e_evapf = data_rest[2][
        2]  #epikarst evap d18o fractionation ??? can use same value?
    i = data_rest[1][
        0]  #epikarst in bypass flow mixture to stal1, (<1 & i+j+k=1)
    j = data_rest[1][1]  #rain in bypass flow mixture to stal1, (<1 & i+j+k=1)
    k = data_rest[1][
        2]  #rain from last step in bypass flow mixture to stal1, (<1 & i+j+k=1)
    m = data_rest[1][
        3]  #epikarst in bypass flow mixture to stal2, (<1 & m+n=1)
    n = data_rest[1][4]  #rain in bypass flow mixture to stal2, (<1 & m+n=1)

    #********************************************************************************************
    #starting going through the karst processes in a procedural manner (up-down)
    #making sure the soilstore does not become negative, whilst adding prp and removing evpt
    if soilstorxp + prp - evpt < 0:
        #evpt=0
        #^^^partially agreed can remove the above
        soilstor = 0
    # if prp>=7:
    # soilstor=soilstorxp+prp-evpt
    else:
        soilstor = soilstorxp + prp - evpt

    #ensuring the soilstor does not exceed user-defined capacity
    if soilstor > soilsize:
        soilstor = soilsize

    #prevents any flux when surface is near-frozen. in this case, 0.0 degree c
    if tempp[0] > 0.0:
        f1 = soilstor * k_f1
    else:
        f1 = 0
    #updating the final soil store level (removing the F1 value)
    soilstor = soilstor - f1

    #increases epikarst store volume
    epxstor = epxstorxp + f1
    #draining from bottom first as gravity fed
    f3 = epxstor * k_f3
    #diffuse flow leaving epikarst and going to KS1
    #assuming diffuse flow follows a weibull distrubtion
    dpdf[0] = (epxstor - f3) * k_diffuse
    if epxstor - f3 - dpdf[0] > epicap:
        f4 = (epxstor - f3 - dpdf[0] - epicap)
    else:
        f4 = 0

    #epikarst evaporation starts when soilstore is 10%
    #and increases with decreasing soil store
    #added the (1-4*...) term, can change the '4'
    if prp == 0:
        e_evpt = k_e_evap * evpt
    elif soilstor <= 0.1 * soilsize:
        e_evpt = k_e_evap * evpt * (1 - 4 * soilstor / soilsize)
    #elif condition is a second route for epikarst evaporation through
    #bypass flow which is the same route as used for stal 2 & 3
    else:
        e_evpt = 0

    #calculating final epikarst value
    if epxstor - f3 - f4 - dpdf[0] - e_evpt < 0:
        epxstor = 0
    else:
        epxstor = epxstor - f3 - f4 - dpdf[0] - e_evpt

    #ensuring the epxstor does not exceed user-defined capacity
    if epxstor > episize:
        epxstor = episize

    #fluxes into and out of KS2
    kststor2 = kststor2xp + f4
    if kststor2 > ovcap:
        f7 = (kststor2 - ovcap) * k_f7
    else:
        f7 = 0
    f6 = (kststor2 - f7) * k_f6
    kststor2 = kststor2 - f6 - f7

    #ensuring the kststor2 does not exceed user-defined capacity
    if kststor2 > ks2size:
        kststor2 = ks2size

    #f8 bypass flow from surface rain to KS1
    if prp > 7:
        f8 = prp * k_f8
    else:
        f8 = 0

    #fluxes into and out of KS1
    kststor1 = kststor1xp + f3 + sum(y * dpdf) + f7 + f8
    f5 = kststor1 * k_f5
    kststor1 = kststor1 - f5

    #ensuring the kststor1 does not exceed user-defined capacity
    if kststor1 > ks1size:
        kststor1 = ks1size

    #mixing and fractionation of soil store d18o
    e = prp + soilstorxp
    if e < 0.01:
        e = 0.001
    f = soilstorxp / e
    g = prp / e
    # 0.03 term can be changed to enable evaporative fractionation in soil store
    h_1 = d18o + (evpt * k_evapf)
    #mixing of soil d18o with prp and ???
    soil18o = (f * soil18oxp) + (g * h_1)

    #so if the soil value becomes positive it is reverted to original soild18o. Justified??
    if soil18o > 0.0001:
        soil18o = soil18oxp

    #mixing and fractionation of epikarst store d18o
    a = f1
    b = a + epxstorxp
    #quick fix for divide-by-zero error when b is too small
    if b <= 0.001:
        b = 0.001
    c = (epxstorxp / b) * (epx18oxp + e_evpt * k_e_evapf)
    d = (a / b) * soil18o
    epx18o = c + d
    epdf[0] = epx18o

    #mixing of kststor2 d18o
    if f4 < 0.01:
        kststor218o = kststor218oxp
    else:
        b2 = f4 + kststor2xp
        c2 = (kststor2xp / b2) * kststor218oxp
        d2 = (f4 / b2) * epx18o
        kststor218o = c2 + d2

    #mixing of KS1 d18o
    b1 = f3 + kststor1xp + sum(y * dpdf) + f7 + f8
    c1 = (kststor1xp / b1) * kststor118oxp
    d1 = (f3 / b1) * epx18o
    e1 = (sum(y * dpdf * epdf) / b1)
    g1 = (f7 / b1) * kststor218o
    h1 = f8 / b1 * d18o
    kststor118o = c1 + d1 + e1 + g1 + h1

    #bypass flow (from epikarst and direct from rain)
    p = d18o
    r = d18oxp
    drip118o = (kststor118o * i) + (p * j) + (r * k)
    drip218o = (kststor118o * m) + (p * n)

    #if kststor2 is too low then a clearly outlying for the stal
    if kststor2 < 0.01:
        stal1d18o = -99.9
        drip_interval_ks2 = 9001
    else:
        if calculate_drip == True:
            #drip-interval: user inputted max drip-interval proportioned by store capacity
            drip_interval_ks2 = int(drip_interval * ks2size / kststor2)
        else:
            drip_interval_ks2 = int(drip_interval)
        #running the ISOLUTION part of the model
        stal1d18o = isotope_calcite(drip_interval_ks2, cave_temp, drip_pco2,
                                    cave_pco2, h, v, phi, kststor218o, tt)

    #same drip interal calculation for epikarst store (stalagmite 4)
    if epxstor < 0.01:
        stal4d18o = -99.9
        drip_interval_epi = 9001
    else:
        if calculate_drip == True:
            drip_interval_epi = int(drip_interval * episize / epxstor)
        else:
            drip_interval_epi = int(drip_interval)
        stal4d18o = isotope_calcite(drip_interval_epi, cave_temp, drip_pco2,
                                    cave_pco2, h, v, phi, epx18o, tt)

    #drip interval calculations for Karst Store 1, which includes the bypass stalagmites 2 and 3.
    #Drip interval for these are
    if kststor1 < 0.01:
        stal2d18o = -99.9
        stal3d18o = -99.9
        stal5d18o = -99.99
        drip_interval_ks1 = 9001
        drip_interval_stal3 = 9001
        drip_interval_stal2 = 9001
    else:
        if calculate_drip == True:
            drip_interval_ks1 = int(drip_interval * ks1size / kststor1)
            ks1_temp3 = kststor1 + prp
            drip_interval_stal3 = int(drip_interval * ks1size / ks1_temp3)
            ks1_temp2 = ks1_temp3 + prpxp
            drip_interval_stal2 = int(drip_interval * ks1size / ks1_temp2)
        else:
            drip_interval_ks1 = int(drip_interval)
            drip_interval_stal3 = int(drip_interval)
            drip_interval_stal2 = int(drip_interval)
        stal5d18o = isotope_calcite(drip_interval_ks1, cave_temp, drip_pco2,
                                    cave_pco2, h, v, phi, kststor118o, tt)
        stal3d18o = isotope_calcite(drip_interval_stal3, cave_temp, drip_pco2,
                                    cave_pco2, h, v, phi, drip218o, tt)
        stal2d18o = isotope_calcite(drip_interval_stal2, cave_temp, drip_pco2,
                                    cave_pco2, h, v, phi, drip118o, tt)

    #returning the values to karstolution1.1 module to  be written to output
    return [
        tt, mm, f1, f3, f4, f5, f6, f7, soilstor, epxstor, kststor1, kststor2,
        soil18o, epx18o, kststor118o, kststor218o, dpdf[0], stal1d18o,
        stal2d18o, stal3d18o, stal4d18o, stal5d18o, drip_interval_ks2,
        drip_interval_epi, drip_interval_stal3, drip_interval_stal2,
        drip_interval_ks1, cave_temp
    ]
Esempio n. 31
0
def all_dists():
    # dists param were taken from scipy.stats official
    # documentaion examples
    # Total - 89
    return {
        "alpha":
        stats.alpha(a=3.57, loc=0.0, scale=1.0),
        "anglit":
        stats.anglit(loc=0.0, scale=1.0),
        "arcsine":
        stats.arcsine(loc=0.0, scale=1.0),
        "beta":
        stats.beta(a=2.31, b=0.627, loc=0.0, scale=1.0),
        "betaprime":
        stats.betaprime(a=5, b=6, loc=0.0, scale=1.0),
        "bradford":
        stats.bradford(c=0.299, loc=0.0, scale=1.0),
        "burr":
        stats.burr(c=10.5, d=4.3, loc=0.0, scale=1.0),
        "cauchy":
        stats.cauchy(loc=0.0, scale=1.0),
        "chi":
        stats.chi(df=78, loc=0.0, scale=1.0),
        "chi2":
        stats.chi2(df=55, loc=0.0, scale=1.0),
        "cosine":
        stats.cosine(loc=0.0, scale=1.0),
        "dgamma":
        stats.dgamma(a=1.1, loc=0.0, scale=1.0),
        "dweibull":
        stats.dweibull(c=2.07, loc=0.0, scale=1.0),
        "erlang":
        stats.erlang(a=2, loc=0.0, scale=1.0),
        "expon":
        stats.expon(loc=0.0, scale=1.0),
        "exponnorm":
        stats.exponnorm(K=1.5, loc=0.0, scale=1.0),
        "exponweib":
        stats.exponweib(a=2.89, c=1.95, loc=0.0, scale=1.0),
        "exponpow":
        stats.exponpow(b=2.7, loc=0.0, scale=1.0),
        "f":
        stats.f(dfn=29, dfd=18, loc=0.0, scale=1.0),
        "fatiguelife":
        stats.fatiguelife(c=29, loc=0.0, scale=1.0),
        "fisk":
        stats.fisk(c=3.09, loc=0.0, scale=1.0),
        "foldcauchy":
        stats.foldcauchy(c=4.72, loc=0.0, scale=1.0),
        "foldnorm":
        stats.foldnorm(c=1.95, loc=0.0, scale=1.0),
        # "frechet_r": stats.frechet_r(c=1.89, loc=0.0, scale=1.0),
        # "frechet_l": stats.frechet_l(c=3.63, loc=0.0, scale=1.0),
        "genlogistic":
        stats.genlogistic(c=0.412, loc=0.0, scale=1.0),
        "genpareto":
        stats.genpareto(c=0.1, loc=0.0, scale=1.0),
        "gennorm":
        stats.gennorm(beta=1.3, loc=0.0, scale=1.0),
        "genexpon":
        stats.genexpon(a=9.13, b=16.2, c=3.28, loc=0.0, scale=1.0),
        "genextreme":
        stats.genextreme(c=-0.1, loc=0.0, scale=1.0),
        "gausshyper":
        stats.gausshyper(a=13.8, b=3.12, c=2.51, z=5.18, loc=0.0, scale=1.0),
        "gamma":
        stats.gamma(a=1.99, loc=0.0, scale=1.0),
        "gengamma":
        stats.gengamma(a=4.42, c=-3.12, loc=0.0, scale=1.0),
        "genhalflogistic":
        stats.genhalflogistic(c=0.773, loc=0.0, scale=1.0),
        "gilbrat":
        stats.gilbrat(loc=0.0, scale=1.0),
        "gompertz":
        stats.gompertz(c=0.947, loc=0.0, scale=1.0),
        "gumbel_r":
        stats.gumbel_r(loc=0.0, scale=1.0),
        "gumbel_l":
        stats.gumbel_l(loc=0.0, scale=1.0),
        "halfcauchy":
        stats.halfcauchy(loc=0.0, scale=1.0),
        "halflogistic":
        stats.halflogistic(loc=0.0, scale=1.0),
        "halfnorm":
        stats.halfnorm(loc=0.0, scale=1.0),
        "halfgennorm":
        stats.halfgennorm(beta=0.675, loc=0.0, scale=1.0),
        "hypsecant":
        stats.hypsecant(loc=0.0, scale=1.0),
        "invgamma":
        stats.invgamma(a=4.07, loc=0.0, scale=1.0),
        "invgauss":
        stats.invgauss(mu=0.145, loc=0.0, scale=1.0),
        "invweibull":
        stats.invweibull(c=10.6, loc=0.0, scale=1.0),
        "johnsonsb":
        stats.johnsonsb(a=4.32, b=3.18, loc=0.0, scale=1.0),
        "johnsonsu":
        stats.johnsonsu(a=2.55, b=2.25, loc=0.0, scale=1.0),
        "ksone":
        stats.ksone(n=1e03, loc=0.0, scale=1.0),
        "kstwobign":
        stats.kstwobign(loc=0.0, scale=1.0),
        "laplace":
        stats.laplace(loc=0.0, scale=1.0),
        "levy":
        stats.levy(loc=0.0, scale=1.0),
        "levy_l":
        stats.levy_l(loc=0.0, scale=1.0),
        "levy_stable":
        stats.levy_stable(alpha=0.357, beta=-0.675, loc=0.0, scale=1.0),
        "logistic":
        stats.logistic(loc=0.0, scale=1.0),
        "loggamma":
        stats.loggamma(c=0.414, loc=0.0, scale=1.0),
        "loglaplace":
        stats.loglaplace(c=3.25, loc=0.0, scale=1.0),
        "lognorm":
        stats.lognorm(s=0.954, loc=0.0, scale=1.0),
        "lomax":
        stats.lomax(c=1.88, loc=0.0, scale=1.0),
        "maxwell":
        stats.maxwell(loc=0.0, scale=1.0),
        "mielke":
        stats.mielke(k=10.4, s=3.6, loc=0.0, scale=1.0),
        "nakagami":
        stats.nakagami(nu=4.97, loc=0.0, scale=1.0),
        "ncx2":
        stats.ncx2(df=21, nc=1.06, loc=0.0, scale=1.0),
        "ncf":
        stats.ncf(dfn=27, dfd=27, nc=0.416, loc=0.0, scale=1.0),
        "nct":
        stats.nct(df=14, nc=0.24, loc=0.0, scale=1.0),
        "norm":
        stats.norm(loc=0.0, scale=1.0),
        "pareto":
        stats.pareto(b=2.62, loc=0.0, scale=1.0),
        "pearson3":
        stats.pearson3(skew=0.1, loc=0.0, scale=1.0),
        "powerlaw":
        stats.powerlaw(a=1.66, loc=0.0, scale=1.0),
        "powerlognorm":
        stats.powerlognorm(c=2.14, s=0.446, loc=0.0, scale=1.0),
        "powernorm":
        stats.powernorm(c=4.45, loc=0.0, scale=1.0),
        "rdist":
        stats.rdist(c=0.9, loc=0.0, scale=1.0),
        "reciprocal":
        stats.reciprocal(a=0.00623, b=1.01, loc=0.0, scale=1.0),
        "rayleigh":
        stats.rayleigh(loc=0.0, scale=1.0),
        "rice":
        stats.rice(b=0.775, loc=0.0, scale=1.0),
        "recipinvgauss":
        stats.recipinvgauss(mu=0.63, loc=0.0, scale=1.0),
        "semicircular":
        stats.semicircular(loc=0.0, scale=1.0),
        "t":
        stats.t(df=2.74, loc=0.0, scale=1.0),
        "triang":
        stats.triang(c=0.158, loc=0.0, scale=1.0),
        "truncexpon":
        stats.truncexpon(b=4.69, loc=0.0, scale=1.0),
        "truncnorm":
        stats.truncnorm(a=0.1, b=2, loc=0.0, scale=1.0),
        "tukeylambda":
        stats.tukeylambda(lam=3.13, loc=0.0, scale=1.0),
        "uniform":
        stats.uniform(loc=0.0, scale=1.0),
        "vonmises":
        stats.vonmises(kappa=3.99, loc=0.0, scale=1.0),
        "vonmises_line":
        stats.vonmises_line(kappa=3.99, loc=0.0, scale=1.0),
        "wald":
        stats.wald(loc=0.0, scale=1.0),
        "weibull_min":
        stats.weibull_min(c=1.79, loc=0.0, scale=1.0),
        "weibull_max":
        stats.weibull_max(c=2.87, loc=0.0, scale=1.0),
        "wrapcauchy":
        stats.wrapcauchy(c=0.0311, loc=0.0, scale=1.0),
    }
Esempio n. 32
0
 def weibull(self,event):
     x = np.linspace(0,2.5,25)
     v=s.exponweib(self.data1[6][0],self.data1[6][1])
     q=v.pdf(x)
     plt.plot(x,q)
     plt.show()
Esempio n. 33
0
    def map(self, output_data):
        emit_data = {}

        hiv_infections = output_data[ self.filenames[0] ]    # CSV, has 'data' and 'colMap'
        rows = hiv_infections['data']
        colMap = hiv_infections['colMap']

        iid = [ int(float(r[colMap['Id']])) for r in rows]
        uids = set(iid)

        for stage in range(1,5):
            emit_data[(id(self),stage)]=[]

        start_year = float(rows[0][colMap['Year']])
        end_year = float(rows[-1][colMap['Year']])

        for uid in uids:
            myrows = [r for r in rows if float(r[colMap['Id']]) == uid]

            prognosis = float(myrows[0][colMap['Prognosis']]) / DAYS_PER_YEAR

            max_pf = Stage_Max_Frac_Prog
            if start_year + prognosis > end_year:
                # Sim will terminate before reaching prognosis fraction of 1
                # Need to compute new maximum observable prognosis fraction
                slope = ( float(myrows[-1][colMap['PrognosisCompletedFraction']]) - float(myrows[0][colMap['PrognosisCompletedFraction']]) ) / \
                        ( float(myrows[-1][colMap['Year']]) - float(myrows[0][colMap['Year']]) )
                max_pf = min(Stage_Max_Frac_Prog, slope * (end_year - start_year))

            if self.verbose:
                print (start_year, end_year, prognosis, max_pf)

            for stage in range(1,5):
                key = (id(self),stage)
                prev_stage_key = (id(self),stage-1)

                pf = next( \
                        ( float(r[colMap['PrognosisCompletedFraction']]) \
                        for r in myrows if float(r[colMap['WHOStage']]) >= stage ) \
                    , None)

                if pf is None:
                    continue

                if self.verbose:
                    print "id: %d, stage: %d, pf: %f" % (uid, stage, pf)
                    print 'Found stage '+str(stage)+' for uid='+str(uid)+' start at prog frac of ' + str(pf)

                if stage == 1:
                    initial_stage = next((float(r[colMap['WHOStage']]) for r in rows if float(r[colMap['Id']]) == uid), None)
                    if initial_stage >= 2:
                        print "WARNING: Initial WHO stage for id=%d is %f" % (uid, initial_stage)
                    if key not in self.results:
                        self.results[key] = {'Valid': initial_stage < 2} 
                    else:
                        self.results[key] = {'Valid': self.results[key]['Valid'] and initial_stage < 2}

                    emit_data[key].append(pf)    # Stage 1 entry prognosis fraction
                else:
                    pf_prev = emit_data[prev_stage_key][-1]
                    stage_duration = pf - pf_prev
                    emit_data[key].append(stage_duration)   # Stage duration (actually of previous stage!)

                    k = 'Stage_' + str(stage-1)
                    self.fun[k].append( \
                        lambda prog_frac, \
                            lam=Stage_Duration_Param[k]['Lambda'], \
                            kap=Stage_Duration_Param[k]['Kappa'], \
                            max_delta_pf=max_pf-pf_prev: \
                            [1 if fp > max_delta_pf else sps.exponweib(1,kap).cdf(fp/lam) / sps.exponweib(1,kap).cdf(max_delta_pf/lam) for fp in prog_frac]
                        )

        return emit_data
Esempio n. 34
0
 def weibull(self, event):
     x = np.linspace(0, 2.5, 25)
     v = s.exponweib(self.data1[6][0], self.data1[6][1])
     q = v.pdf(x)
     plt.plot(x, q)
     plt.show()
Esempio n. 35
0
income_model_dict = ct.OrderedDict()
income_model_dict['johnsonsu'] = st.johnsonsu(-5.3839367311065747,
                                              0.84376726932941271,
                                              -224.21280806585787,
                                              79.661998696081355)
income_model_dict['powerlaw'] = st.powerlaw(0.16342470577523971,
                                            -3.1423954341714262e-15,
                                            55664716.096562646)
income_model_dict['exponpow'] = st.exponpow(0.25441022752240294,
                                            -1.8475789041433829e-22,
                                            36120900.670255348)
income_model_dict['nakagami'] = st.nakagami(0.10038339454419823,
                                            -3.0390927147076284e-22,
                                            33062195.426077582)
income_model_dict['exponweib'] = st.exponweib(-3.5157658448986489,
                                              0.44492833350419714,
                                              -15427.454196748848,
                                              2440.0278856175246)

drivingdistance_model_dict = ct.OrderedDict()
drivingdistance_model_dict['nakagami'] = st.nakagami(0.11928581143831021,
                                                     14.999999999999996,
                                                     41.404620910360876)
drivingdistance_model_dict['ncx2'] = st.ncx2(0.30254190304723211,
                                             1.1286538320791935,
                                             14.999999999999998,
                                             8.7361471573932192)
drivingdistance_model_dict['chi'] = st.chi(0.47882729877571095,
                                           14.999999999999996,
                                           44.218301183844645)
drivingdistance_model_dict['recipinvgauss'] = st.recipinvgauss(
    2447246.0546641815, 14.999999999994969, 31.072009722580802)
Esempio n. 36
0
def extremeDistribution_WeibullTailFit(x, x_e, t_x, t_st, avg=0, p0=None):
    '''Approximates the short-term extreme distribution using the Weibull tail
    fit method.

    Parameters
    ----------
        x : np.array
            Independent random variable (global peaks)
        x_e : np.array
            Array of x values at which to evaluate the short-term extreme CDF
        t_x : float
            Time length of the x array
        t_st : float
            Short-term period
        avg : float
            The average of the time response, if this was substracted before
            identifying global peaks. Else it is assumed that the average is
            zero.
        p0 : list length 2: [float, float]
            Initial guess for the Weibull parameters [shape, scale]

    Returns
    -------
        stextreme_dist: ecmDist object
            Probability distribution of the short-term extreme.
        stextreme_dist : ecmDist object
            Probability distribution of the short-term extreme.
        peaks_dist : scipy.stats rv_frozen
            Probability distribution of the peaks.
        subset_shape_params : np.array length 7
            Shape parameter for each of the seven Weibull fits for the
            subsets of data corresponding to F>[0.65,0.7,...,0.95].
        subset_scale_params : np.array length 7
            Scale parameter for each of the seven Weibull fits for the
            subsets of data corresponding to F>[0.65,0.7,...,0.95].
        peaks_params: np.array length 4
            Parameters of peak's distribution (Weibull)
            [shape_a, shape_c, loc, scale].
    '''
    # Two-parameter weibull distribution def
    def weibCDF(yy, shape, scale):
        loc = 0
        return 1. - np.exp(-1. * ((yy - loc) / scale)**shape)
    # Initial guess for Weibull parameters
    if p0 is None:
        p0_tmp = stats.exponweib.fit(x, f0=1, floc=0)
        p0 = np.zeros(2)
        p0[0] = p0_tmp[1]
        p0[1] = p0_tmp[3]
    # Approximate CDF
    x = np.sort(x)
    N = len(x)
    F = np.zeros(N)
    for i in range(N):
        F[i] = i / (N + 1.0)
    # Divide into seven sets
    subset_shape_params = np.zeros(7)
    subset_scale_params = np.zeros(7)
    setLim = np.arange(0.60, 0.91, 0.05)
    for set in range(7):
        xset = x[(F > setLim[set])]
        Fset = F[(F > setLim[set])]
        popt, _ = optim.curve_fit(weibCDF, xset, Fset, p0=p0)
        subset_shape_params[set] = popt[0]
        subset_scale_params[set] = popt[1]
    # peaks distribution
    peaks_params = [1, np.mean(subset_shape_params), avg,
                    np.mean(subset_scale_params)]
    peaks_dist = stats.exponweib(a=peaks_params[0],
                                 c=peaks_params[1],
                                 loc=peaks_params[2],
                                 scale=peaks_params[3])
    # short-term extreme
    ratio = t_st / t_x
    N_st = N * ratio
    weib_cdf = weibCDF(x_e, peaks_params[1], peaks_params[3])
    ste_cdf = weib_cdf ** N_st
    stextreme_dist = ecmDist(x_e, cdf=ste_cdf)
    return stextreme_dist, peaks_dist, subset_shape_params, \
               subset_scale_params, peaks_params
def ppf_tau ( x ):
    return exponweib( 7.38493171518, 2.01255297859 ).ppf( x )
    def map(self, output_data):
        emit_data = {}

        hiv_infections = output_data[
            self.filenames[0]]  # CSV, has 'data' and 'colMap'
        rows = hiv_infections['data']
        colMap = hiv_infections['colMap']

        iid = [int(float(r[colMap['Id']])) for r in rows]
        uids = set(iid)

        for stage in range(1, 5):
            emit_data[(id(self), stage)] = []

        start_year = float(rows[0][colMap['Year']])
        end_year = float(rows[-1][colMap['Year']])

        for uid in uids:
            myrows = [r for r in rows if float(r[colMap['Id']]) == uid]

            prognosis = float(myrows[0][colMap['Prognosis']]) / DAYS_PER_YEAR

            max_pf = Stage_Max_Frac_Prog
            if start_year + prognosis > end_year:
                # Sim will terminate before reaching prognosis fraction of 1
                # Need to compute new maximum observable prognosis fraction
                slope = ( float(myrows[-1][colMap['PrognosisCompletedFraction']]) - float(myrows[0][colMap['PrognosisCompletedFraction']]) ) / \
                        ( float(myrows[-1][colMap['Year']]) - float(myrows[0][colMap['Year']]) )
                max_pf = min(Stage_Max_Frac_Prog,
                             slope * (end_year - start_year))

            if self.verbose:
                print(start_year, end_year, prognosis, max_pf)

            for stage in range(1, 5):
                key = (id(self), stage)
                prev_stage_key = (id(self), stage - 1)

                pf = next( \
                        ( float(r[colMap['PrognosisCompletedFraction']]) \
                        for r in myrows if float(r[colMap['WHOStage']]) >= stage ) \
                    , None)

                if pf is None:
                    continue

                if self.verbose:
                    print "id: %d, stage: %d, pf: %f" % (uid, stage, pf)
                    print 'Found stage ' + str(stage) + ' for uid=' + str(
                        uid) + ' start at prog frac of ' + str(pf)

                if stage == 1:
                    initial_stage = next(
                        (float(r[colMap['WHOStage']])
                         for r in rows if float(r[colMap['Id']]) == uid), None)
                    if initial_stage >= 2:
                        print "WARNING: Initial WHO stage for id=%d is %f" % (
                            uid, initial_stage)
                    if key not in self.results:
                        self.results[key] = {'Valid': initial_stage < 2}
                    else:
                        self.results[key] = {
                            'Valid':
                            self.results[key]['Valid'] and initial_stage < 2
                        }

                    emit_data[key].append(
                        pf)  # Stage 1 entry prognosis fraction
                else:
                    pf_prev = emit_data[prev_stage_key][-1]
                    stage_duration = pf - pf_prev
                    emit_data[key].append(
                        stage_duration
                    )  # Stage duration (actually of previous stage!)

                    k = 'Stage_' + str(stage - 1)
                    self.fun[k].append( \
                        lambda prog_frac, \
                            lam=Stage_Duration_Param[k]['Lambda'], \
                            kap=Stage_Duration_Param[k]['Kappa'], \
                            max_delta_pf=max_pf-pf_prev: \
                            [1 if fp > max_delta_pf else sps.exponweib(1,kap).cdf(fp/lam) / sps.exponweib(1,kap).cdf(max_delta_pf/lam) for fp in prog_frac]
                        )

        return emit_data
def sample_runnable_acet(period, amount=1, scalingFlag=False):
    """Create runnables according to the WATERS benchmark.
    scalingFlag: make WCET out of ACET with scaling
    """
    # Parameters from WATERS 'Real World Automotive Benchmarks For Free'
    if period == 1:
        # Pull scaling factor.
        scaling = np.random.uniform(1.3, 29.11, amount)  # between fmin fmax
        # Pull samples with weibull distribution.
        dist = exponweib(1, 1.044, loc=0, scale=1.0 / 0.214)
        samples = dist.rvs(size=amount)
        while True:
            outliers_detected = False
            for i in range(len(samples)):
                # Check if they are in the range.
                if samples[i] < 0.34 or samples[i] > 30.11:
                    outliers_detected = True
                    samples[i] = dist.rvs(size=1)
            # Case: Some samples had to be pulled again.
            if outliers_detected:
                continue
            # Case: All samples are in the range.
            if scalingFlag:  # scaling
                return list(0.001 * samples * scaling)
            else:
                return list(0.001 * samples)

    # In the following same structure but different values.

    if period == 2:
        scaling = np.random.uniform(1.54, 19.04, amount)
        dist = exponweib(1, 1.0607440083, loc=0, scale=1.0 / 0.2479463059)
        samples = dist.rvs(size=amount)
        while True:
            outliers_detected = False
            for i in range(len(samples)):
                if samples[i] < 0.32 or samples[i] > 40.69:
                    outliers_detected = True
                    samples[i] = dist.rvs(size=1)
            if outliers_detected:
                continue
            if scalingFlag:
                return list(0.001 * samples * scaling)
            else:
                return list(0.001 * samples)

    if period == 5:
        scaling = np.random.uniform(1.13, 18.44, amount)
        dist = exponweib(1, 1.00818633, loc=0, scale=1.0 / 0.09)
        samples = dist.rvs(size=amount)
        while True:
            outliers_detected = False
            for i in range(len(samples)):
                if samples[i] < 0.36 or samples[i] > 83.38:
                    outliers_detected = True
                    samples[i] = dist.rvs(size=1)
            if outliers_detected:
                continue
            if scalingFlag:
                return list(0.001 * samples * scaling)
            else:
                return list(0.001 * samples)

    if period == 10:
        scaling = np.random.uniform(1.06, 30.03, amount)
        dist = exponweib(1, 1.0098, loc=0, scale=1.0 / 0.0985)
        samples = dist.rvs(size=amount)
        while True:
            outliers_detected = False
            for i in range(len(samples)):
                if samples[i] < 0.21 or samples[i] > 309.87:
                    outliers_detected = True
                    samples[i] = dist.rvs(size=1)
            if outliers_detected:
                continue
            if scalingFlag:
                return list(0.001 * samples * scaling)
            else:
                return list(0.001 * samples)

    if period == 20:
        scaling = np.random.uniform(1.06, 15.61, amount)
        dist = exponweib(1,
                         1.01309699673984310,
                         loc=0,
                         scale=1.0 / 0.1138186679)
        samples = dist.rvs(size=amount)
        while True:
            outliers_detected = False
            for i in range(len(samples)):
                if samples[i] < 0.25 or samples[i] > 291.42:
                    outliers_detected = True
                    samples[i] = dist.rvs(size=1)
            if outliers_detected:
                continue
            if scalingFlag:
                return list(0.001 * samples * scaling)
            else:
                return list(0.001 * samples)

    if period == 50:
        scaling = np.random.uniform(1.13, 7.76, amount)
        dist = exponweib(1,
                         1.00324219159296302,
                         loc=0,
                         scale=1.0 / 0.05685450460)
        samples = dist.rvs(size=amount)
        while True:
            outliers_detected = False
            for i in range(len(samples)):
                if samples[i] < 0.29 or samples[i] > 92.98:
                    outliers_detected = True
                    samples[i] = dist.rvs(size=1)
            if outliers_detected:
                continue
            if scalingFlag:
                return list(0.001 * samples * scaling)
            else:
                return list(0.001 * samples)

    if period == 100:
        scaling = np.random.uniform(1.02, 8.88, amount)
        dist = exponweib(1,
                         1.00900736028318527,
                         loc=0,
                         scale=1.0 / 0.09448019812)
        samples = dist.rvs(size=amount)
        while True:
            outliers_detected = False
            for i in range(len(samples)):
                if samples[i] < 0.21 or samples[i] > 420.43:
                    outliers_detected = True
                    samples[i] = dist.rvs(size=1)
            if outliers_detected:
                continue
            if scalingFlag:
                return list(0.001 * samples * scaling)
            else:
                return list(0.001 * samples)

    if period == 200:
        scaling = np.random.uniform(1.03, 4.9, amount)
        dist = exponweib(1,
                         1.15710612360723798,
                         loc=0,
                         scale=1.0 / 0.3706045664)
        samples = dist.rvs(size=amount)
        while True:
            outliers_detected = False
            for i in range(len(samples)):
                if samples[i] < 0.22 or samples[i] > 21.95:
                    outliers_detected = True
                    samples[i] = dist.rvs(size=1)
            if outliers_detected:
                continue
            if scalingFlag:
                return list(0.001 * samples * scaling)
            else:
                return list(0.001 * samples)

    if period == 1000:
        # No weibull since the range from 0.37 to 0.46 is too short to be
        # modeled by weibull properly.
        scaling = np.random.uniform(1.84, 4.75, amount)
        if scalingFlag:
            return list(0.001 * np.random.uniform(0.37, 0.46, amount) *
                        scaling)
        else:
            return list(0.001 * np.random.uniform(0.37, 0.46, amount))
    "KS Test Exponencial: ",
    stats.kstest(datos,
                 cdf='expon',
                 args=(parametros_exponencial[0], parametros_exponencial[1])))
print(
    "KS Test Normal: ",
    stats.kstest(datos,
                 cdf='norm',
                 args=(parametros_normal[0], parametros_normal[1])))
print(
    "KS Test Weibull: ",
    stats.kstest(datos,
                 cdf='exponweib',
                 args=(parametros_weibull[0], parametros_weibull[1],
                       parametros_weibull[2], parametros_weibull[3])))
print("=======================================")

## Representación gráfica del histograma de los datos de aterrizajes frente a los ajustes de las diferentes distribuciones.
rv_exponential = expon(parametros_exponencial[0], parametros_exponencial[1])
rv_weibull = exponweib(parametros_weibull[0], parametros_weibull[1],
                       parametros_weibull[2], parametros_weibull[3])
rv_normal = norm(parametros_normal[0], parametros_normal[1])
plt.figure()
_, x, _ = plt.hist(datos, normed=True)
plt.plot(x, rv_exponential.pdf(np.array(x)), label='exponencial')
plt.plot(x, rv_weibull.pdf(np.array(x)), label='weibull')
plt.plot(x, rv_normal.pdf(np.array(x)), label='normal')
plt.legend(loc='upper right')
plt.title("DESEMBARQUES")
plt.show()
Esempio n. 41
0
def karst_process(tt,mm,evpt,prp,prpxp,tempp,d18o,d18oxp,dpdf,epdf,soilstorxp,soil18oxp,
epxstorxp,epx18oxp,kststor1xp,kststor118oxp,kststor2xp,kststor218oxp,data_rest,calculate_drip,cave_temp):
    #following parameters are mainly unpacked from 'data_rest' which is from the config file
    #store size parameters
    soilsize=data_rest[5][0]
    episize=data_rest[5][3]
    ks1size=data_rest[5][4]  
    ks2size=data_rest[5][5]
    
    #making sure the init sizes don't exceed store capacity
    if soilstorxp>soilsize:
        soilstorxp=soilsize-1
    if epxstorxp>episize:
        epxstorxp=episize-1
    if kststor1xp>ks1size:
        kststor1xp=ks1size-1
    if kststor2xp>ks2size:
        kststor2xp=ks2size-1
    
    #overflow parameters
    epicap=data_rest[5][1]
    ovcap=data_rest[5][2]
    #ensuring the overflow parameters are less than the store
    if epicap>=episize:
        epicap=episize-1
    if ovcap >= ks2size:
        ovcap=ks2size-1
        
    #average cave parameters for various months
    drip_interval = data_rest[4][mm-1]
    drip_pco2=data_rest[7][mm-1]/1000000.0
    cave_pco2 = data_rest[8][mm-1]/1000000.0
    h = data_rest[9][mm-1]
    v = data_rest[10][mm-1]
    phi = data_rest[11][0]
    
    #making sure cave values don't become negative
    if v<0:
        v=0
    if drip_interval<0:
        drip_interval=0
    if drip_pco2<0:
        drip_pco2=0.0000000000000001
    if cave_pco2<0:
        cave_pco2=0.0000000000000001
    if h<0:
        h=0
    if phi<0:
        phi=0
        
    #making sure some cave values don't exceed one
    if h>=1:
        h =0.99
    if phi>1:
        phi=1
    
    #weibull parameters
    w=data_rest[6][0] 
    z=data_rest[6][1]
    x=np.linspace(0,2,12)    
    v_1=s.exponweib(w,z)
    y1=v_1.cdf(x)
    y=np.append([0],y1[1:]-y1[0:11])
    
    #parameterisable coefficients
    k_f1=data_rest[0][0]        #f1 from soilstore to epikarst
    k_f3=data_rest[0][1]        #f3 from epikarst to KS1
    k_f8=data_rest[0][6]
    k_f5=data_rest[0][2]        #f5 from KS1 to stal5
    k_f6=data_rest[0][3]        #f6 from KS2 to stal1
    k_f7=data_rest[0][4]        #f7 overflow from KS2 to KS1
    k_diffuse=data_rest[0][5]   #diffuse flow from Epikarst to KS1
    k_e_evap=data_rest[2][0]    #epikarst evap (funct of ET for timestep) Used for both sources???
    k_evapf=data_rest[2][1]     #soil evap d18o fractionation from somepaper????
    k_e_evapf=data_rest[2][2]   #epikarst evap d18o fractionation ??? can use same value?  
    i=data_rest[1][0]           #epikarst in bypass flow mixture to stal1, (<1 & i+j+k=1)
    j=data_rest[1][1]           #rain in bypass flow mixture to stal1, (<1 & i+j+k=1)
    k=data_rest[1][2]           #rain from last step in bypass flow mixture to stal1, (<1 & i+j+k=1)
    m=data_rest[1][3]           #epikarst in bypass flow mixture to stal2, (<1 & m+n=1)
    n=data_rest[1][4]           #rain in bypass flow mixture to stal2, (<1 & m+n=1)
    
    #********************************************************************************************
    #starting going through the karst processes in a procedural manner (up-down)
    #making sure the soilstore does not become negative, whilst adding prp and removing evpt
    if soilstorxp + prp - evpt < 0:
        #evpt=0
        #^^^partially agreed can remove the above
        soilstor=0
    # if prp>=7:
        # soilstor=soilstorxp+prp-evpt
    else:
        soilstor=soilstorxp+prp-evpt
    
    #ensuring the soilstor does not exceed user-defined capacity    
    if soilstor>soilsize:
        soilstor=soilsize
    
    #prevents any flux when surface is near-frozen. in this case, 0.0 degree c     
    if tempp[0]>0.0:
        f1=soilstor*k_f1
    else:
        f1=0
    #updating the final soil store level (removing the F1 value)
    soilstor=soilstor-f1 
    
    #increases epikarst store volume
    epxstor=epxstorxp+f1
    #draining from bottom first as gravity fed
    f3 = epxstor*k_f3
    #diffuse flow leaving epikarst and going to KS1
    #assuming diffuse flow follows a weibull distrubtion
    dpdf[0]=(epxstor-f3)*k_diffuse
    if epxstor-f3-dpdf[0]> epicap:
        f4=(epxstor-f3-dpdf[0]-epicap)
    else:
        f4=0
    
    #epikarst evaporation starts when soilstore is 10% 
    #and increases with decreasing soil store
    #added the (1-4*...) term, can change the '4'
    if prp==0:
        e_evpt=k_e_evap*evpt
    elif soilstor<=0.1*soilsize:
        e_evpt=k_e_evap*evpt*(1-4*soilstor/soilsize)
    #elif condition is a second route for epikarst evaporation through
    #bypass flow which is the same route as used for stal 2 & 3
    else:
        e_evpt=0
    
    #calculating final epikarst value
    if epxstor-f3-f4-dpdf[0]-e_evpt<0:
        epxstor=0
    else:    
        epxstor=epxstor-f3-f4-dpdf[0]-e_evpt
        
    #ensuring the epxstor does not exceed user-defined capacity 
    if epxstor>episize:
        epxstor=episize
    
    #fluxes into and out of KS2
    kststor2=kststor2xp+f4 
    if kststor2 > ovcap:
        f7=(kststor2-ovcap)*k_f7
    else: 
        f7=0 
    f6=(kststor2-f7)*k_f6
    kststor2=kststor2-f6-f7

    #ensuring the kststor2 does not exceed user-defined capacity 
    if kststor2>ks2size:
        kststor2=ks2size
    
    #f8 bypass flow from surface rain to KS1
    if prp>7:
        f8=prp*k_f8
    else:
        f8=0
    
    #fluxes into and out of KS1
    kststor1=kststor1xp+f3+sum(y*dpdf)+f7+f8
    f5=kststor1*k_f5
    kststor1=kststor1-f5
    
    #ensuring the kststor1 does not exceed user-defined capacity 
    if kststor1>ks1size:
        kststor1=ks1size
    
    #mixing and fractionation of soil store d18o
    e=prp+soilstorxp
    if e<0.01:
        e=0.001
    f=soilstorxp/e
    g=prp/e
    # 0.03 term can be changed to enable evaporative fractionation in soil store
    h_1=d18o+(evpt*k_evapf)
    #mixing of soil d18o with prp and ???
    soil18o=(f*soil18oxp)+(g*h_1) 
    
    #so if the soil value becomes positive it is reverted to original soild18o. Justified??
    if soil18o>0.0001:
        soil18o=soil18oxp
        
    #mixing and fractionation of epikarst store d18o
    a=f1
    b=a+epxstorxp
    #quick fix for divide-by-zero error when b is too small
    if b<=0.001:
        b=0.001
    c=(epxstorxp/b)*(epx18oxp+e_evpt*k_e_evapf)
    d=(a/b)*soil18o
    epx18o=c+d
    epdf[0]=epx18o
    
    #mixing of kststor2 d18o
    if f4<0.01:
        kststor218o=kststor218oxp 
    else:
        b2=f4+kststor2xp 
        c2=(kststor2xp/b2)*kststor218oxp 
        d2=(f4/b2)*epx18o 
        kststor218o=c2+d2 
    
    #mixing of KS1 d18o
    b1=f3+kststor1xp+sum(y*dpdf)+f7+f8
    c1=(kststor1xp/b1)*kststor118oxp 
    d1=(f3/b1)*epx18o 
    e1=(sum(y*dpdf*epdf)/b1)
    g1=(f7/b1)*kststor218o
    h1=f8/b1*d18o
    kststor118o=c1+d1+e1+g1+h1
    
    #bypass flow (from epikarst and direct from rain)
    p=d18o 
    r=d18oxp
    drip118o=(kststor118o*i)+(p*j)+(r*k)
    drip218o=(kststor118o*m)+(p*n)
    
    
    #if kststor2 is too low then a clearly outlying for the stal
    if kststor2<0.01:
        stal1d18o=-99.9
        drip_interval_ks2=9001
    else:
        if calculate_drip==True:
            #drip-interval: user inputted max drip-interval proportioned by store capacity
            drip_interval_ks2=int(drip_interval*ks2size/kststor2)
        else:
            drip_interval_ks2=int(drip_interval)
        #running the ISOLUTION part of the model
        stal1d18o=isotope_calcite(drip_interval_ks2, cave_temp, drip_pco2, cave_pco2, h, v, phi, 
        kststor218o,tt)


    #same drip interal calculation for epikarst store (stalagmite 4)
    if epxstor<0.01:
        stal4d18o=-99.9
        drip_interval_epi=9001
    else:
        if calculate_drip==True:
            drip_interval_epi=int(drip_interval*episize/epxstor)
        else:
            drip_interval_epi=int(drip_interval)
        stal4d18o=isotope_calcite(drip_interval_epi, cave_temp, drip_pco2, cave_pco2, h, v, phi,
        epx18o,tt)
    
    #drip interval calculations for Karst Store 1, which includes the bypass stalagmites 2 and 3. 
    #Drip interval for these are 
    if kststor1<0.01:
        stal2d18o=-99.9
        stal3d18o=-99.9
        stal5d18o=-99.99
        drip_interval_ks1=9001
        drip_interval_stal3=9001
        drip_interval_stal2=9001
    else:
        if calculate_drip==True:
            drip_interval_ks1=int(drip_interval*ks1size/kststor1)
            ks1_temp3=kststor1+prp
            drip_interval_stal3=int(drip_interval*ks1size/ks1_temp3)
            ks1_temp2=ks1_temp3+prpxp
            drip_interval_stal2=int(drip_interval*ks1size/ks1_temp2)
        else:
            drip_interval_ks1=int(drip_interval)
            drip_interval_stal3=int(drip_interval)
            drip_interval_stal2=int(drip_interval)
        stal5d18o=isotope_calcite(drip_interval_ks1, cave_temp, drip_pco2, cave_pco2, h, v, 
        phi,kststor118o,tt)
        stal3d18o=isotope_calcite(drip_interval_stal3, cave_temp, drip_pco2, cave_pco2, h, v, 
        phi,drip218o,tt)
        stal2d18o=isotope_calcite(drip_interval_stal2, cave_temp, drip_pco2, cave_pco2, h, v, 
        phi,drip118o,tt)
    
    #returning the values to karstolution1.1 module to  be written to output
    return [tt,mm,f1,f3,f4,f5,f6,f7,soilstor,epxstor,kststor1,kststor2,soil18o,epx18o,kststor118o,
    kststor218o,dpdf[0],stal1d18o,stal2d18o,stal3d18o,stal4d18o,stal5d18o,drip_interval_ks2,
    drip_interval_epi,drip_interval_stal3,drip_interval_stal2,drip_interval_ks1,cave_temp]
Esempio n. 42
0
x = np.linspace(exponweib.ppf(0.01, a, c), exponweib.ppf(0.99, a, c), 100)
ax.plot(x,
        exponweib.pdf(x, a, c),
        'r-',
        lw=5,
        alpha=0.6,
        label='exponweib pdf')

# Alternatively, the distribution object can be called (as a function)
# to fix the shape, location and scale parameters. This returns a "frozen"
# RV object holding the given parameters fixed.

# Freeze the distribution and display the frozen ``pdf``:

rv = exponweib(a, c)
ax.plot(x, rv.pdf(x), 'k-', lw=2, label='frozen pdf')

# Check accuracy of ``cdf`` and ``ppf``:

vals = exponweib.ppf([0.001, 0.5, 0.999], a, c)
np.allclose([0.001, 0.5, 0.999], exponweib.cdf(vals, a, c))
# True

# Generate random numbers:

r = exponweib.rvs(a, c, size=1000)

# And compare the histogram:

ax.hist(r, density=True, histtype='stepfilled', alpha=0.2)