コード例 #1
0
ファイル: pricing_test.py プロジェクト: teopir/maxestimation
maxPrice=10;

#gamma Parameter
shape=2;
scale=1.5

nsamples=300;

#np.random.seed(852952)
tau = np.random.gamma(shape,scale,nsamples)
actions= np.random.rand(nsamples)*(maxPrice-minPrice) + minPrice;

nbins=10;
actionsBins = np.linspace(minPrice, maxPrice, nbins+1);

print(gamma.mean(shape,scale=scale));
print(actionsBins)
discreteActions=np.digitize(actions,actionsBins);
print(actions[:10]);
print(discreteActions[:10])

rewardsW=np.zeros(actions.size);
rewardsDiscrete=np.zeros(actions.size);
rewardsW[actions<=tau]=actions[actions<=tau]; 
rewardsDiscrete[discreteActions<=tau]=discreteActions[discreteActions<=tau];
print(rewardsW);
#plt.scatter(actions,rewardsW)
#plt.show()


コード例 #2
0
 def mean(self, dist):
     return gamma.mean(*self._get_params(dist))
コード例 #3
0
ファイル: Gamma_dist.py プロジェクト: Gautam-v-ml/Math
 def mean(self, n, p):
     mu = gamma.mean(self, n, p)
     return mu
コード例 #4
0
    plt.hist(mean_val, density=True, alpha=0.5, label='hist mean n ' + str(n))

    # мат. ожидание sigma нормального распределения
    sigma = np.math.sqrt(Dx / n)
    print('мат.ожидание=', Ex)
    print('sigma=', sigma)
    # зададим нормальное распределенние
    norm_rv = sts.norm(loc=Ex, scale=sigma)
    x = np.linspace(6, 14, 100)
    pdf = norm_rv.pdf(x)
    plt.plot(x, pdf, 'r-', lw=3, alpha=0.7, label='erlang pdf n ' + str(n))
    plt.ylabel('samples')
    plt.xlabel('$x$')
    plt.legend(loc='upper right')
    plt.show()


# Вычисление теоритических EX, std, DX  распределения
EX = gamma.mean(k)
std = gamma.std(k)
DX = std**2
print('Ex=', EX, ' STD=', std, ' DX=', DX)

gamma_func(5, EX, DX)
gamma_func(10, EX, DX)
gamma_func(50, EX, DX)

# ## Вывод:
# Распределение выборочных средних для функции gamma хорошо описывается нормальным распределением.
# С ростом n точность аппроксимации увеличивается.
コード例 #5
0
 def mean(self) -> np.ndarray:
     return np.atleast_1d(
         gamma.mean(self._shape, scale=self._scale)
     )
コード例 #6
0
def analytical_MPVS(
        infection_ts: pd.DataFrame, 
        smoothing: Callable,
        alpha: float = 3.0,                # shape 
        beta:  float = 2.0,                # rate
        CI:    float = 0.95,               # confidence interval 
        infectious_period: int = 5*days,   # inf period = 1/gamma,
        variance_shift: float = 0.99,      # how much to scale variance parameters by when anomaly detected 
        totals: bool = True                # are these case totals or daily new cases?
    ):
    """Estimates Rt ~ Gamma(alpha, 1/beta), and implements an analytical expression for a mean-preserving variance increase whenever case counts fall outside the CI defined by a negative binomial distribution"""
    # infection_ts = infection_ts.copy(deep = True)
    dates = infection_ts.index
    if totals:
        # daily_cases = np.diff(infection_ts.clip(lower = 0)).clip(min = 0) # infection_ts clipped because COVID19India API does weird stuff
        daily_cases = infection_ts.clip(lower = 0).diff().clip(lower = 0).iloc[1:]
    else: 
        daily_cases = infection_ts 
    total_cases = np.cumsum(smoothing(np.squeeze(daily_cases)))

    v_alpha, v_beta = [], []

    RR_pred, RR_CI_upper, RR_CI_lower = [], [], []

    T_pred, T_CI_upper, T_CI_lower = [], [], []

    new_cases_ts = []

    anomalies     = []
    anomaly_dates = []

    for i in range(2, len(total_cases)):
        new_cases     = max(0, total_cases[i]   - total_cases[i-1])
        old_new_cases = max(0, total_cases[i-1] - total_cases[i-2])

        alpha += new_cases
        beta  += old_new_cases
        v_alpha.append(alpha)
        v_beta.append(beta)

        RR_est   = max(0, 1 + infectious_period*np.log(Gamma.mean(     a = alpha, scale = 1/beta)))
        RR_upper = max(0, 1 + infectious_period*np.log(Gamma.ppf(CI,   a = alpha, scale = 1/beta)))
        RR_lower = max(0, 1 + infectious_period*np.log(Gamma.ppf(1-CI, a = alpha, scale = 1/beta)))
        RR_pred.append(RR_est)
        RR_CI_upper.append(RR_upper)
        RR_CI_lower.append(RR_lower)

        if (new_cases == 0 or old_new_cases == 0):
            if new_cases == 0:
                logger.debug("new_cases at time %s: 0", i)
            if old_new_cases == 0:
                logger.debug("old_new_cases at time %s: 0", i)
            T_pred.append(0)
            T_CI_upper.append(10) # <- where does this come from?
            T_CI_lower.append(0)
            new_cases_ts.append(0)

        if (new_cases > 0 and old_new_cases > 0):
            new_cases_ts.append(new_cases)

            r, p = alpha, beta/(old_new_cases + beta)
            T_pred.append(nbinom.mean(r, p))
            T_upper = nbinom.ppf(CI,   r, p)
            T_lower = nbinom.ppf(1-CI, r, p)
            T_CI_upper.append(T_upper)
            T_CI_lower.append(T_lower)

            _np = p
            _nr = r 
            anomaly_noted = False
            counter = 0
            while not (T_lower < new_cases < T_upper):
                if not anomaly_noted:
                    anomalies.append(new_cases)
                    anomaly_dates.append(dates[i])
                
                # logger.debug("anomaly identified at time %s: %s < %s < %s, r: %s, p: %s, annealing iteration: %s", i, T_lower, new_cases, T_upper, _nr, _np, counter+1)
                # nnp = 0.95 *_np # <- where does this come from 
                _nr = variance_shift * _nr * ((1-_np)/(1-variance_shift*_np) )
                _np = variance_shift * _np 
                T_upper = nbinom.ppf(CI,   _nr, _np)
                T_lower = nbinom.ppf(1-CI, _nr, _np)
                T_lower, T_upper = sorted((T_lower, T_upper))
                if T_lower == T_upper == 0:
                    T_upper = 1
                    logger.debug("CI collapse, setting T_upper -> 1")
                anomaly_noted = True

                counter += 1
                if counter >= 10000:
                    raise ValueError("Number of iterations exceeded")
            else:
                if anomaly_noted:
                    alpha = _nr # update distribution on R with new parameters that enclose the anomaly 
                    beta = _np/(1-_np) * old_new_cases

                    T_pred[-1] = nbinom.mean(_nr, _np)
                    T_CI_lower[-1] = nbinom.ppf(CI,   _nr, _np)
                    T_CI_upper[-1] = nbinom.ppf(1-CI, _nr, _np)

                    # annealing leaves the RR mean unchanged, but we need to adjust its widened CI
                    RR_upper = max(0, 1 + infectious_period * np.log(Gamma.ppf(CI    , a = alpha, scale = 1/beta)))
                    RR_lower = max(0, 1 + infectious_period * np.log(Gamma.ppf(1 - CI, a = alpha, scale = 1/beta)))

                    # replace latest CI time series entries with adjusted CI 
                    RR_CI_upper[-1] = RR_upper
                    RR_CI_lower[-1] = RR_lower
    return (
        dates[2:], 
        RR_pred, RR_CI_upper, RR_CI_lower, 
        T_pred, T_CI_upper, T_CI_lower, 
        total_cases, new_cases_ts, 
        anomalies, anomaly_dates
    )
コード例 #7
0
 def update_lambda(self, data):
     self.occurrence_shape += sum(data)
     self.occurrence_scale += len(data) * self.interval
     self._gamma_map = self._gamma_mode(self.occurrence_shape, self.occurrence_scale)
     self._gamma_mean = gamma.mean(self.occurrence_shape, scale=1/float(self.occurrence_scale))
コード例 #8
0
ファイル: testGamma.py プロジェクト: pslota/Ice-modelling
    d0 = d0[0]/d0[0]

    return dm2, dm1, d0, d1, d2, d3

#dm2, dm1, d0, d1, d2, d3 = getProbabilities(5)


# Example from:
# http://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.stats.gamma.html
# http://docs.scipy.org/doc/scipy-0.14.0/reference/tutorial/integrate.html

# gamma function
a = 5
rv = gamma(a, loc=0., scale = 1.)       # scale = 1.0 / lambda.
median = gamma.median(a)
mean = gamma.mean(a)

x0 = scipy.optimize.fsolve(lambda x: rv.pdf(x), 0.1)       # 0.3 is the starting point

# find the top
increase = True
df0 = 0
delta = 0.01
x = 0.01

while increase == True:
    h = rv.pdf(x+delta) - rv.pdf(x)
    if h < 0:
        increase = False
    else:
        x = x + delta
コード例 #9
0
 def reset(self):
     self.occurrence_scale = 1.0
     self.occurrence_shape = 1.1
     self._gamma_map = self._gamma_mode(self.occurrence_shape, self.occurrence_scale)
     self._gamma_mean = gamma.mean(self.occurrence_shape, scale=1/float(self.occurrence_scale))
コード例 #10
0
ファイル: rate.py プロジェクト: ferdianjovan/spectral_popp
 def update_rate(self, data):
     self.alpha += sum(data)
     self.beta += len(data) * self.interval
     self.mode = self._mode(self.alpha, self.beta)
     self.mean = gamma.mean(self.alpha, scale=1/float(self.beta))
コード例 #11
0
ファイル: rate.py プロジェクト: ferdianjovan/spectral_popp
 def reset(self):
     self.beta = 1.1
     self.alpha = 1.1
     self.mode = self._mode(self.alpha, self.beta)
     self.mean = gamma.mean(self.alpha, scale=1/float(self.beta))
コード例 #12
0
 def mean(self):
     return gamma.mean(self.shape, self.loc, self.scale)
コード例 #13
0
    return dm2, dm1, d0, d1, d2, d3


# dm2, dm1, d0, d1, d2, d3 = getProbabilities(5)


# Example from:
# http://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.stats.gamma.html
# http://docs.scipy.org/doc/scipy-0.14.0/reference/tutorial/integrate.html

# gamma function
a = 5
rv = gamma(a, loc=0.0, scale=1.0)  # scale = 1.0 / lambda.
median = gamma.median(a)
mean = gamma.mean(a)

x0 = scipy.optimize.fsolve(lambda x: rv.pdf(x), 0.1)  # 0.3 is the starting point

# find the top
increase = True
df0 = 0
delta = 0.01
x = 0.01

while increase == True:
    h = rv.pdf(x + delta) - rv.pdf(x)
    if h < 0:
        increase = False
    else:
        x = x + delta