Esempio n. 1
0
def calculate_posteriors(installs_A, installs_B, views_A, views_B):
    # Prior Parameters
    alpha = .5
    beta = 100

    # Update Equations
    alpha_post_A, beta_post_A = installs_A + alpha, beta + views_A - installs_A
    alpha_post_B, beta_post_B = installs_B + alpha, beta + views_B - installs_B

    # Draw samples from posteriors
    posterior_A = beta_dist(a=alpha_post_A, b=beta_post_A)
    posterior_B = beta_dist(a=alpha_post_B, b=beta_post_B)

    return posterior_A, posterior_B
Esempio n. 2
0
def calculate_posteriors(installs_A, installs_B, views_A, views_B):
    # Prior Parameters
    alpha = .5
    beta = 100

    # Update Equations
    alpha_post_A, beta_post_A = installs_A + alpha, beta + views_A - installs_A
    alpha_post_B, beta_post_B = installs_B + alpha, beta + views_B - installs_B

    # Draw samples from posteriors
    posterior_A = beta_dist(a=alpha_post_A, b=beta_post_A)
    posterior_B = beta_dist(a=alpha_post_B, b=beta_post_B)

    return posterior_A, posterior_B
 def values(self):
     quantiles = []
     for i in range(self.k):
         a, b = self.compute_ab(i)
         quantiles.append(
             beta_dist(a, b).ppf(1 - 1 / (self.t * np.log(self.n)**self.c)))
     return quantiles
    def __init__(self, k, alpha=1, beta=1, max_steps=None):
        self.k = k
        self.alpha = alpha
        self.beta = beta
        self.max_steps = max_steps

        self.dist = beta_dist(self.alpha, self.beta)
Esempio n. 5
0
def beta_pdf(alpha, beta):
    """ Produce curve of beta distribution.

    :param alpha: Alpha parameter of beta distribution
    :param beta: Beta parameter of beta distribution
    :return: curve of beta distribution
    """
    x = np.linspace(0, 1, 1000)
    return x, beta_dist(1 + alpha, 1 + beta).pdf(x)
Esempio n. 6
0
    def _epsilon_dist(self):
        """
        """
        mu = self.param_dict['elliptical_shape_mu_1_' + self.gal_type]
        sigma = self.param_dict['elliptical_shape_sigma_1_' + self.gal_type]

        alpha, beta = _beta_params(mu, sigma**2)

        d = beta_dist(alpha, beta)
        return d
Esempio n. 7
0
 def plot(self, x, n):
     """ plots the prior pdf density over the data histogram"""
     # add various plotting args
     x_ax = np.linspace(0, 1, 1000)
     rv = beta_dist(self.alpha, self.beta)
     p = x / n
     plt.hist(p, density=True)
     plt.plot(x_ax, rv.pdf(x_ax))
     plt.title(f'Beta({self.alpha.round(2)},{self.beta.round(2)})')
     plt.show()
Esempio n. 8
0
 def median(self):
     """ Calculates the median
     
     Returns
     -------
     Array:
         Array of median values.
     """
     median = (beta_dist(self.alpha, self.beta).median() *
               self.range) + self.a
     return median
Esempio n. 9
0
    def _gamma_prime_dist(self):
        """
        gamma_prime = 1 - C/B
        """
        mu = self.param_dict['elliptical_shape_mu_2_' + self.gal_type]
        sigma = self.param_dict['elliptical_shape_sigma_2_' + self.gal_type]

        alpha, beta = _beta_params(mu, sigma**2)

        d = beta_dist(alpha, beta)
        return d
def sample_init_Z_bernoulli(alpha, beta, n_samples=10):

    pbeta = beta_dist(a=alpha, b=beta)
    pi = pbeta.rvs(size=n_samples)

    elements = np.zeros(n_samples)
    for i, p in enumerate(pi):
        elements[i] = bernoulli.rvs(p)
    mean = elements.mean()
    # unbiased variance
    var = elements.var(ddof=1)
    return mean, var
Esempio n. 11
0
    def rvs(self, size=1, random_state=None):
        """ Returns a randompy-sampled value from the PERT
        
        Parameters
        ----------
        size: int (default 1)
            Indicates how many random values should be returned
        random_state: int (default none)
            Seed value for random sample RNG.
            
        Returns
        -------
        Array:
            Randomly sampled values from the PERT dristribution.
        """

        rvs_vals = (beta_dist(self.alpha, self.beta).rvs(
            size=size, random_state=random_state) * self.range) + self.a
        return rvs_vals
Esempio n. 12
0
def main():
    fig = plt.figure()
    x = np.linspace(0, 1, 100)
    sizes = [1, 2, 20, 50]
    fig_row, fig_col = 2, 4

    # Mean of i.i.d uniform
    for i, n in enumerate(sizes):
        ax = fig.add_subplot(fig_row, fig_col, i + 1)
        data, gaussian = central_limit(uniform_dist.rvs, n, 1000)
        ax.hist(data, bins=20, normed=True, alpha=0.7)
        plt.plot(x, gaussian.pdf(x), 'r')
        plt.title('n={0}'.format(n))

    # Mean of i.i.d beta(1, 2)
    for i, n in enumerate(sizes):
        ax = fig.add_subplot(fig_row, fig_col, i + fig_col + 1)
        data, gaussian = central_limit(beta_dist(1, 2).rvs, n, 1000)
        ax.hist(data, bins=20, normed=True, alpha=0.7)
        plt.plot(x, gaussian.pdf(x), 'r')
        plt.title('n={0}'.format(n))

    plt.show()
Esempio n. 13
0
beta_examples = [(0, 0), (2, 0), (2, 5), (18, 9), (32, 50), (130, 80)]

# colors for the plots
beta_colors = ['blue', 'orange', 'green', 'red', 'purple', 'brown']

# opening figure
fig, ax = plt.subplots(figsize=(14, 6), dpi=150, nrows=2, ncols=3)

# loop for each
for i, example in enumerate(beta_examples):

    # points to sample for drawing the curve
    X = np.linspace(0, 1, 1000)

    # generating the curve
    dist = beta_dist(1 + example[0], 1 + example[1])
    curve = dist.pdf(X)

    # plotly data
    ax[int(i / 3)][(i % 3)].fill_between(X,
                                         0,
                                         curve,
                                         color=beta_colors[i],
                                         alpha=0.7)
    ax[int(i / 3)][(i % 3)].set_title('Sucesses: {} | Failures: {}'.format(
        example[0], example[1]),
                                      fontsize=14)

# some adjustments
ax[0][0].set(ylim=[0, 2])
plt.tight_layout()
Esempio n. 14
0
def alpha(m_0, mod_m):
    return ((1 - m_0) / (m_0 * mod_m ** 2) - 1) * m_0


def beta(m_0, mod_m):
    return ((1 - m_0) / (m_0 * mod_m ** 2) - 1) * (1 - m_0)


if __name__ == "__main__":

    # Number of measurements of FPOL
    n = 10
    # True mean intrinsic FPOL
    m_0_true = 0.2
    # True modulation index
    mod_m_true = 0.4
    print("True mean FPOL: ", m_0_true)
    print("True modulation index: ", mod_m_true)
    # True variable FPOL values
    ms_i_true = beta_dist(alpha(m_0_true, mod_m_true), beta(m_0_true, mod_m_true)).rvs(n)
    # Some errors
    sigmas = 0.03*np.ones(n)
    # Observed variable FPOL values
    ms_obs = rice_dist(ms_i_true / sigmas, scale=sigmas).rvs(n)

    print("Observed FPOL: ", ms_obs)
    print("Errors of FPOL: ", sigmas)

    res = fit(ms_obs, sigmas)
    print("Estimated mean FPOL: ", res[0])
    print("Estimated modulation index: ", res[1])
Esempio n. 15
0
 def _reset_distribution(self):
     self._distribution: rv_continuous = beta_dist(a=self.alpha,
                                                   b=self.beta,
                                                   loc=self._a,
                                                   scale=self._c - self._a)
Esempio n. 16
0
 def _reset_distribution(self):
     self._distribution: rv_continuous = beta_dist(self._alpha, self._beta)
Esempio n. 17
0
elif opts.log_eff_prior:
    # logarithmic uniform prior
    lower_bound, upper_bound = opts.log_eff_bounds.split(',')
    grb_efficiency_axis = linspace(float(lower_bound), float(upper_bound),
                                   1000)

    Norm = log(float(upper_bound) / float(lower_bound))
    grb_efficiency_prior = Norm / grb_efficiency_axis

    outputname = opts.outputname + "_logEffPrior-%s-%s" % (lower_bound,
                                                           upper_bound)

elif opts.berno_eff_prior:
    # bernoulli trial parameter
    prior_dist = beta_dist(0.5, 0.5)
    grb_efficiency_axis = linspace(0.01, 0.99, 1000)
    grb_efficiency_prior = prior_dist.pdf(grb_efficiency_axis)

    outputname = opts.outputname + "_bernoEffPrior"

elif opts.beta_eff_prior:
    # beta distribution prior
    beta_vals = opts.beta_vals.split(',')
    prior_dist = beta_dist(float(beta_vals[0]), float(beta_vals[1]))

    grb_efficiency_axis = linspace(0.01, 0.99, 1000)
    grb_efficiency_prior = prior_dist.pdf(grb_efficiency_axis)

    outputname = opts.outputname + "_betaEffPrior-%s-%s" % (beta_vals[0],
                                                            beta_vals[1])
Esempio n. 18
0
    outputname=opts.outputname+"_flatEffPrior-%s-%s"%(lower_bound,upper_bound)

elif opts.log_eff_prior:
    # logarithmic uniform prior
    lower_bound,upper_bound=opts.log_eff_bounds.split(',')
    grb_efficiency_axis=linspace(float(lower_bound),float(upper_bound),1000)

    Norm=log(float(upper_bound)/float(lower_bound))
    grb_efficiency_prior = Norm  / grb_efficiency_axis 

    outputname=opts.outputname+"_logEffPrior-%s-%s"%(lower_bound,upper_bound)

elif opts.berno_eff_prior:
    # bernoulli trial parameter
    prior_dist=beta_dist(0.5,0.5)
    grb_efficiency_axis=linspace(0.01,0.99,1000)
    grb_efficiency_prior=prior_dist.pdf(grb_efficiency_axis)

    outputname=opts.outputname+"_bernoEffPrior"

elif opts.beta_eff_prior:
    # beta distribution prior
    beta_vals=opts.beta_vals.split(',')
    prior_dist=beta_dist(float(beta_vals[0]),float(beta_vals[1]))

    grb_efficiency_axis=linspace(0.01,0.99,1000)
    grb_efficiency_prior=prior_dist.pdf(grb_efficiency_axis)

    outputname=opts.outputname+"_betaEffPrior-%s-%s"%(beta_vals[0],beta_vals[1])
 def values(self):
     samples = []
     for i in range(self.k):
         a, b = self.compute_ab(i)
         samples.append(beta_dist(a, b).rvs())
     return samples
Esempio n. 20
0
def get_beta_pdf(alpha, beta):
    X = np.linspace(0, 1, 1000)
    return X, beta_dist(1 + alpha, 1 + beta).pdf(X)