예제 #1
0
def joint_prob(params):
    # For the given parameters I compute the joint denisty
    # Here I store the priors!!!!!!!
    betas = params[0]
    theta = params[1]
    delta = params[2]
    v = params[3]
    alpha = params[4]
    rho = params[5]
    mu_d = params[6]
    sigma_d = params[7]
    mu_f = params[8]
    sigma_f = params[9]

    p1 = beta.pdf(betas, 9.28, 0.386)
    p2 = gamma.pdf(theta, 2)
    p3 = beta.pdf(delta, 89.9, 809.1)
    p4 = gamma.pdf(v, 2)
    p5 = beta.pdf(alpha, 182, 369.5)
    p6 = beta.pdf(rho, 162.88, 6.78)
    p7 = norm.pdf(mu_d, 0, 0.04)
    p8 = invgamma.pdf(sigma_d, 2.0025, 0, 0.010025)
    p9 = norm.pdf(mu_f, 0, 0.04)
    p10 = invgamma.pdf(sigma_f, 2.0625, 0, 0.053125)

    P = p1 * p2 * p3 * p4 * p5 * p6 * p7 * p8 * p9 * p10
    return P
예제 #2
0
def PMH(y, x0, phi=1, N=100, iterations=100):
    # A = np.zeros(iterations)
    beta = np.zeros(iterations)
    sigma = np.zeros(iterations)
    z = np.zeros(iterations)
    step1 = 0.01

    # A[0] = .5
    beta[0] = .5
    sigma[0] = .1

    z[0] = log_BPF(phi, sqrt(beta[0]), sqrt(sigma[0]), y, N, sampling_method="multi")[2]

    for i in tqdm(range(1, iterations)):
        # Propose new parameters
        # A_proposed = A[i - 1] + step1 * np.random.normal(size=1)
        beta_proposed = beta[i - 1] + step1 * np.random.normal(size=1)
        sigma_proposed = sigma[i - 1] + step1 * np.random.normal(size=1)
        # while (abs(A_proposed) > 1):
        #     A_proposed = A[i - 1] + step1 * np.random.normal(size=1)
        while (beta_proposed <= 0):
            beta_proposed = beta[i - 1] + step1 * np.random.normal(size=1)  # inv_gamma(.01,.01,x0)
        while (sigma_proposed <= 0):
            sigma_proposed = sigma[i - 1] + step1 * np.random.normal(size=1)  # inv_gamma(.01,.01,x0)

        # Run a PF to evaluate the likelihood
        # z_hat = log_BPF(y=y, A=A_proposed, Q=Q, R=R, x0=x0, N=N)
        z_hat = log_BPF(phi, sqrt(beta_proposed), sqrt(sigma_proposed), y, N, sampling_method="multi")[2]
        # Sample from uniform
        u = stats.uniform.rvs()

        # Compute the acceptance probability
        # numerator = z_hat + stats.norm.logpdf(A_proposed)
        numerator = z_hat + log(invgamma.pdf(a=.01, scale=.01, x=beta_proposed)) + log(
            invgamma.pdf(a=.01, scale=.01, x=sigma_proposed))
        # denominator = z[i - 1] + stats.norm.logpdf(A[i - 1])
        denominator = z[i - 1] + log(invgamma.pdf(a=.01, scale=.01, x=beta[i - 1])) + log(
            invgamma.pdf(a=.01, scale=.01, x=sigma[i - 1]))

        # Acceptance probability
        alpha = np.exp(numerator - denominator)
        # alpha = min(1,alpha)
        # Set next state with acceptance probability
        if u <= alpha:
            z[i] = z_hat
            # A[i] = A_proposed
            beta[i] = beta_proposed
            sigma[i] = sigma_proposed

        else:
            z[i] = z[i - 1]
            # A[i] = A[i-1]
            beta[i] = beta[i - 1]
            sigma[i] = sigma[i - 1]

    # return A
    return beta, sigma, z
예제 #3
0
파일: demo.py 프로젝트: ying531/MCMC-SymReg
def fStruc(node, n_feature, alpha1, alpha2, beta):
    loglike = 0
    loglike_para = 0

    # contribution of hyperparameter sigma_theta
    if node.depth == 0:  #root node
        loglike += np.log(invgamma.pdf(node.sigma_a, 1))
        loglike += np.log(invgamma.pdf(node.sigma_b, 1))

    # contribution of splitting the node or becoming terminal
    if node.type == 0:  #terminal node
        loglike += np.log(1 - (alpha1 + alpha2)) + beta * np.log(
            node.depth
        )  #* np.power(node.depth,beta) #contribution of choosing terminal
        loglike -= np.log(n_feature)  #contribution of feature selection
    elif node.type == 1:  #unitary operator
        # contribution of splitting
        if node.depth == 0:  #root node
            loglike += np.log(alpha1 / (alpha1 + alpha2))
        else:
            loglike += np.log(alpha1) + np.log(node.depth) * beta - np.log(3)
        # contribution of parameters of linear nodes
        if node.operator == 'ln':
            loglike_para -= np.power((node.a - 1), 2) / (2 * node.sigma_a)
            loglike_para -= np.power(node.b, 2) / (2 * node.sigma_b)
            loglike_para -= 0.5 * np.log(2 * np.pi * node.sigma_a)
            loglike_para -= 0.5 * np.log(2 * np.pi * node.sigma_b)
    else:  #binary operator
        # contribution of splitting
        if node.depth == 0:  #root node
            loglike += np.log(alpha2 / (alpha1 + alpha2))
        else:
            loglike += np.log(alpha2) + np.log(node.depth) * beta - np.log(2)

    # contribution of child nodes
    if node.left is None:  #no child nodes
        return [loglike, loglike_para]
    else:
        fleft = fStruc(node.left, n_feature, alpha1, alpha2, beta)
        loglike += fleft[0]
        loglike_para += fleft[1]
        if node.right is None:  #only one child
            return [loglike, loglike_para]
        else:
            fright = fStruc(node.right, n_feature, alpha1, alpha2, beta)
            loglike += fright[0]
            loglike_para += fright[1]

    return [loglike, loglike_para]
예제 #4
0
def test1():
    np.random.seed(0)
    nside = 8
    ell = np.arange(3. * nside)
    Cltrue = np.zeros_like(ell)
    Cltrue[2:] = 1 / ell[2:]**2
    m = hp.synfast(Cltrue, nside)
    Clhat = hp.anafast(m)

    sigma_l = (2 * ell + 1) * Clhat
    sigma_l = Clhat

    Cl = np.linspace(5e-3, 5, 1000)

    sl = sigma_l[2]
    l = ell[2]
    y = np.exp(-(2 * l + 1) * sl / (2 * Cl)) / np.sqrt(Cl**(2 * l + 1))
    plt.semilogx(Cl, y / y.max())
    alpha = (2 * l - 1) / 2
    beta = (2 * l + 1) * sl / 2
    pdf = invgamma.pdf(Cl, alpha, scale=beta)
    plt.semilogx(Cl, pdf / pdf.max())
    pdf = invwishart.pdf(Cl, df=alpha * 2, scale=beta * 2)
    plt.semilogx(Cl, pdf / pdf.max())
    plt.axvline(Clhat[2])
    plt.axvline(Cltrue[2], color='k', linestyle='--')
    plt.savefig('test1.pdf')
    #plt.show()
    plt.close()

    return
예제 #5
0
 def first_img(self):
     X = np.linspace(self.mu - 3 * sqrt((self.beta / (self.alpha + 1)) / self.n_null),
                     self.mu + 3 * sqrt((self.beta / (self.alpha + 1)) / self.n_null), 500)
     Y = np.linspace(min(0, 0.5 * self.beta / (self.alpha + 1)), 2 * self.beta / (self.alpha + 1), 500)
     X, Y = np.meshgrid(X, Y)
     Z = norm.pdf(X, self.mu, np.sqrt(Y / self.n_null)) * invgamma.pdf(Y, self.alpha, scale=self.beta)
     fig = plt.figure()
     ax = fig.gca(projection='3d')
     ax.plot_surface(X, Y, Z, rstride=8, cstride=8, alpha=0.4, linewidth=0)
     cset = ax.contour(X, Y, Z, zdir='z', offset=0, cmap=cm.coolwarm, alpha=1)
     cset = ax.contour(X, Y, Z, zdir='x', offset=self.mu + 3 * sqrt((self.beta / (self.alpha + 1)) / self.n_null), cmap=cm.coolwarm,
                       alpha=1)
     cset = ax.contour(X, Y, Z, zdir='y', offset=2 * self.beta / (self.alpha + 1), cmap=cm.coolwarm, alpha=1)
     ax.set_xlabel('mu')
     ax.set_ylabel('Sigma^2')
     ax.set_zlabel('Density')
     ax.text2D(-0.11, 0.95, "Parameter Estimate at Peak (true value): \n mu= %.2f (%.2f) \n sigma**2= %.2f (%.2f)" % (self.mu, self.truemu, self.beta / (self.alpha + 1),(self.truesigma)**2),
               transform=ax.transAxes)
     ax.text2D(-0.11, 0.90, 'Number of total trials: %s' % (self.n_null-1), transform=ax.transAxes)
     # if not os.path.isdir('static/NIGdist'):
     #
     #     os.mkdir('static/NIGdist')
     # else:
     #     # Remove old plot files
     #     for filename in glob.glob(os.path.join('static/NIGdist', '*.png')):
     #         os.remove(filename)
     # Use time since Jan 1, 1970 in filename in order make
     # a unique filename that the browser has not chached
     plotfile = os.path.join('static', str(time.time()) + '.png')
     plt.savefig(plotfile)
     return plotfile
예제 #6
0
def pull_and_update(b, n):
    outcome = b.pull(n)
    b.update(outcome)
    X = np.linspace(b.mu - 3 * sqrt((b.beta / (b.alpha - 1)) / b.n_null),
                    b.mu + 3 * sqrt((b.beta / (b.alpha - 1)) / b.n_null), 500)
    Y = np.linspace(min(0, 0.5 * b.beta / (b.alpha + 1)), 2 * b.beta / (b.alpha + 1), 500)
    X, Y = np.meshgrid(X, Y)
    Z = norm.pdf(X, b.mu, np.sqrt(Y / b.n_null)) * invgamma.pdf(Y, b.alpha, scale=b.beta)
    fig = plt.figure()
    ax = fig.gca(projection='3d')
    ax.plot_surface(X, Y, Z, rstride=8, cstride=8, alpha=0.4, linewidth=0)
    cset = ax.contour(X, Y, Z, zdir='z', offset=0, cmap=cm.coolwarm, alpha=1)
    cset = ax.contour(X, Y, Z, zdir='x', offset=b.mu + 3 * sqrt((b.beta / (b.alpha - 1)) / b.n_null), cmap=cm.coolwarm,
                      alpha=1)
    cset = ax.contour(X, Y, Z, zdir='y', offset=2 * b.beta / (b.alpha + 1), cmap=cm.coolwarm, alpha=1)
    ax.set_xlabel('mu')
    ax.set_ylabel('Sigma^2')
    ax.set_zlabel('Density')
    ax.text2D(-0.11, 0.95, "Parameter Estimate at Peak (true value): \n mu= %.2f (%.2f) \n sigma**2= %.2f (%.2f)" % (b.mu, b.truemu, b.beta / (b.alpha + 1),(b.truesigma)**2),
              transform=ax.transAxes)
    ax.text2D(-0.11, 0.90, 'Number of total trials: %s' % (b.n_null-1), transform=ax.transAxes)
    #ax.set_title('Normal-inverse-gamma distribution of the Parameters mu and sigma^2')

    for filename in glob.glob(os.path.join('static', '*.png')):
        os.remove(filename)
    # Use time since Jan 1, 1970 in filename in order make
    # a unique filename that the browser has not chached
    plotfile = os.path.join('static', str(time.time()) + '.png')
    plt.savefig(plotfile)
    return plotfile, outcome
예제 #7
0
파일: Main.py 프로젝트: ms271/Pccf_old
    def test_inv_chi2_pdf(self):
        """
        Test to the Octave function.
        >> format long
        >> chi2pdf(1.5^-1, 4.0)
           ans =  0.119421885095632

        Inverse-chi-square distribution Calculator
        http://keisan.casio.com/exec/system/1304908316 gives
        gives (1.5, 4) -> 0.0530763933


        R
        > library(geoR)
        > dinvchisq(1.5, df=4)
        > ans=0.05307639

        """
        octave_chi2pdf = 0.119421885095632
        online_calculator = 0.0530763933
        r_value = 0.05307639
        v1 = 1.5
        v2 = 4.0
        my_func = inv_chi2(v1, v2)
        using_inv_gamma = invgamma.pdf(v1, v2 / 2.0, 0.5)
        print("... Value obtained using inv-gamma :", using_inv_gamma)
        scipy_chi2pdf = chi2.pdf(v1**-1, v2)
        print("... Value obtained using chi2 :", scipy_chi2pdf)
        self.assertAlmostEqual(octave_chi2pdf, scipy_chi2pdf, places=5)
        self.assertAlmostEqual(my_func, octave_chi2pdf, places=5)
예제 #8
0
def lnpriorgamma(x):
    gamma = x[ntemp + nvmr]
    invg = invgamma.pdf(gamma, 1, scale=5e-5)
    invgprob = invg
    if invgprob > 0:
        return log(invgprob)
    else:
        return -np.inf
예제 #9
0
파일: calcs.py 프로젝트: pkan0583/QFL
def plot_invgamma(a, b, x=None):
    a = float(a)
    b = float(b)
    if x == None:
        mean = b / (a - 1)
        stdev = (b**2 / ((a - 1)**2 * (a - 2)))**0.5
        points = 5
        x = np.array(range(-3 * points, 3 * points)) * stdev / points + mean
    y = invgamma.pdf(x, a, scale=b)
    plt.plot(y, x)
    return pd.DataFrame(data=y, index=x, columns=['density']), mean, stdev
예제 #10
0
    def _plot_prior_posterior(self, chains, show_charts):
        n_bins = int(sqrt(chains.shape[0]))
        n_cols = int(self.n_param**0.5)
        n_rows = n_cols + 1 if self.n_param > n_cols**2 else n_cols
        subplot_shape = (n_rows, n_cols)

        plt.figure(figsize=(7 * 1.61, 7))

        for count, param in enumerate(list(self.params)):
            mu = self.prior_info.loc[str(param)]['mean']
            sigma = self.prior_info.loc[str(param)]['std']
            a = self.prior_info.loc[str(param)]['param a']
            b = self.prior_info.loc[str(param)]['param b']
            dist = self.prior_info.loc[str(param)]['distribution']

            ax = plt.subplot2grid(subplot_shape,
                                  (count // n_cols, count % n_cols))
            ax.hist(chains[str(param)],
                    bins=n_bins,
                    density=True,
                    color='royalblue',
                    edgecolor='black')
            ax.set_title(self.prior_dict[param]['label'])
            x_min, x_max = ax.get_xlim()
            x = linspace(x_min, x_max, n_bins)

            if dist == 'beta':
                y = beta.pdf(x, a, b)
                ax.plot(x, y, color='red')

            elif dist == 'gamma':
                y = gamma.pdf(x, a, scale=b)
                ax.plot(x, y, color='red')

            elif dist == 'invgamma':
                y = (b**a) * invgamma.pdf(x, a) * exp((1 - b) / x)
                ax.plot(x, y, color='red')

            elif dist == 'uniform':
                y = uniform.pdf(x, loc=a, scale=b - a)
                ax.plot(x, y, color='red')

            else:  # Normal
                y = norm.pdf(x, loc=mu, scale=sigma)
                ax.plot(x, y, color='red')

        plt.tight_layout()

        if show_charts:
            plt.show()
예제 #11
0
def true_posterior(data, m_0, s_sq_0, v_0, k_0, n):
    m_data = np.mean(data)
    n_data = len(data)
    sum_sq_diff_data = np.sum([(x - m_data)**2 for x in data])

    k_n = k_0 + n_data
    v_n = v_0 + n_data
    m_n = k_0 * m_0 / k_n + n_data * m_data / k_n
    v_n_times_s_sq_n = v_0 * s_sq_0 + sum_sq_diff_data + k_0 * n_data / k_n * (
        m_data - m_0)**2

    alpha = v_n / 2
    beta = v_n_times_s_sq_n / 2

    x = np.linspace(invgamma.ppf(1 / n, alpha, scale=beta),
                    invgamma.ppf(1 - 1 / n, alpha, scale=beta), n * 10)
    y = invgamma.pdf(x, alpha, scale=beta)

    return alpha, beta, m_n, k_n, x, y
예제 #12
0
def main():
    """
    Main CLI handler.
    """

    parser = argparse.ArgumentParser(description=__description__)
    parser.add_argument("--version",
                        action="version",
                        version="%(prog)s " + __version__)
    parser.add_argument("alpha",
                        type=float,
                        default=3.00,
                        help="Alpha parameter value [default=%(default)s].")
    parser.add_argument("beta",
                        type=float,
                        default=0.001,
                        help="Beta parameter value [default=%(default)s].")
    parser.add_argument("-f",
                        "--input-format",
                        type=str,
                        default="newick",
                        choices=["nexus", "newick"],
                        help="Input data format (default='%(default)s')")

    args = parser.parse_args()
    args.output_prefix = None
    args.show_plot_on_screen = True
    a = args.alpha
    b = args.beta
    rv = invgamma(a, loc=0, scale=b)
    print("Mean: {}".format(rv.mean()))
    print("Variance: {}".format(rv.var()))
    fig, ax = plt.subplots(1, 1)
    x = np.linspace(invgamma.ppf(0.01, a, scale=b),
                    invgamma.ppf(0.99, a, scale=b), 100)
    ax.plot(x, rv.pdf(x), 'k-', lw=2, label='frozen pdf')
    ax.plot(x,
            invgamma.pdf(x, a, scale=b),
            'r-',
            lw=5,
            alpha=0.6,
            label='invgamma pdf')
    spdw.render_output(args, "InverseGamma")
예제 #13
0
def large_sites_get_fit(energy_arr, n, lam, alpha, beta_grating, L, beta_arr,
                        bayesian_array_path):
    '''
    Parameters
    ----------
    energy_arr: array of possible energy_values for an nxn ising model, as
    calculated by get_energy_range(n)
    N: size of NxN ising model in any one dimension
    lam: lambda value for beta distribution
    alpha: alpha value for beta distribution
    L: length of beta array
    beta_arr: beta array
    bayesian_array_path: /PATH/TO/BAYESIAN/ARRAYS

    Returns:
    ----------
    arr: Un-normalized energy distribution
    p_1: bayesian array transformed to energy pdf
    p_beta: beta distribution
    '''

    p_1 = load_bayesian(n, energy_arr, bayesian_array_path)
    (r, c) = np.shape(p_1)
    print(np.shape(p_1))
    p_beta = np.zeros((r, 1))

    for i in range(len(energy_range)):
        filename = "bayesian_array_n=" + str(n).zfill(2) + "_final_1.txt"
        arr = iter_loadtxt(filename, 'single', i, 0)
        for j in range(L):
            p_1[j][i] = arr[j]
    for row in range(r):
        Z = 0
        for col in range(c):
            Z += p_1[row][col]
        for col in range(c):
            p_1[row][col] = p_1[row][col] / Z

    p_beta = invgamma.pdf(beta_arr, alpha, loc=0, scale=lam)
    p_1 = np.transpose(p_1)
    arr = np.matmul(p_1, p_beta)

    return arr, p_1, p_beta
예제 #14
0
def small_sites_get_fit(beta_arr, p_1, lam, alpha):
    '''
    Parameters
    -----------
    beta_arr: array of beta values
    p_1: bayesian array transformed to energy pdf
    lam: lambda value for beta distribution
    alpha: alpha value for beta distribution

    Returns:
    ----------
    arr: Un-normalized energy distribution
    p_1: bayesian array transformed to energy pdf
    p_beta: beta distribution
    '''

    p_beta = np.zeros((len(beta_arr), 1))
    p_beta = invgamma.pdf(beta_arr, alpha, loc=0, scale=lam)
    arr = np.matmul(p_1, p_beta)

    return arr, p_1, p_beta
예제 #15
0
    def test_pdf(self):
        shapes = np.arange(0.01, 20, 0.5)
        scales = np.arange(0.01, 10, 0.5)
        xs = np.arange(0.001, 10, 0.5)

        test_params = cartesian([xs, shapes, scales]).astype(np.float64)

        python_results = np.zeros(test_params.shape[0])
        for ind in range(test_params.shape[0]):
            x = test_params[ind, 0]
            shape = test_params[ind, 1]
            scale = test_params[ind, 2]

            python_results[ind] = invgamma.pdf(x, shape, scale=scale)

        opencl_results = invgamma_pdf().evaluate(
            {
                'x': test_params[:, 0],
                'shape': test_params[:, 1],
                'scale': test_params[..., 2]
            }, test_params.shape[0])

        assert_allclose(opencl_results, python_results, atol=1e-7, rtol=1e-7)
예제 #16
0
def inverseGammaVisualize(alpha,beta):
    x = np.linspace(invgamma.ppf(0.01, alpha,scale=beta),invgamma.ppf(0.99,alpha,scale=beta), 100)
    plt.plot(x, invgamma.pdf(x, alpha,scale=beta),
             'r-', lw=5, alpha=0.6, label='IG density for alpha={0}, beta={1}'.format(alpha,beta))
    plt.legend()
    return plt.gca()
예제 #17
0
def newProp(Roots, count, sigma, y, indata, n_feature, Ops, Op_weights,
            Op_type, beta, sigma_a, sigma_b):
    # number of components
    K = len(Roots)
    # the root to edit
    Root = copy.deepcopy(Roots[count])
    [oldRoot, Root, lnPointers, change, Q, Qinv, last_a, last_b,
     cnode] = Prop(Root, n_feature, Ops, Op_weights, Op_type, beta, sigma_a,
                   sigma_b)
    # print("change:",change)
    # display(genList(Root))
    # allcal(Root,train_data)
    sig = 4
    new_sigma = invgamma.rvs(sig)

    # matrix of outputs
    new_outputs = np.zeros((len(y), K))
    old_outputs = np.zeros((len(y), K))

    # auxiliary propose
    if change == 'shrinkage':
        [hratio, detjacob, new_sa2,
         new_sb2] = auxProp(change, oldRoot, Root, lnPointers, sigma_a,
                            sigma_b, last_a, last_b, cnode)
    elif change == 'expansion':
        [hratio, detjacob, new_sa2,
         new_sb2] = auxProp(change, oldRoot, Root, lnPointers, sigma_a,
                            sigma_b, last_a, last_b, cnode)
    else:  # no dimension jump
        # the parameters are upgraded as well
        [new_sa2, new_sb2] = auxProp(change, oldRoot, Root, lnPointers,
                                     sigma_a, sigma_b, last_a, last_b, cnode)

    for i in np.arange(K):
        if i == count:
            temp = allcal(Root, indata)
            temp.shape = (temp.shape[0])
            new_outputs[:, i] = temp
            temp = allcal(oldRoot, indata)
            temp.shape = (temp.shape[0])
            old_outputs[:, i] = temp
        else:
            temp = allcal(Roots[i], indata)
            temp.shape = (temp.shape[0])
            new_outputs[:, i] = temp
            old_outputs[:, i] = temp

    if np.linalg.matrix_rank(new_outputs) < K:
        Root = oldRoot
        return [False, sigma, copy.deepcopy(oldRoot), sigma_a, sigma_b]

    if change == 'shrinkage':
        # contribution of f(y|S,Theta,x)
        # print("new sigma:",round(new_sigma,3))
        yllstar = ylogLike(y, new_outputs, new_sigma)
        # print("sigma:",round(sigma,3))
        yll = ylogLike(y, old_outputs, sigma)

        log_yratio = yllstar - yll
        # print("log yratio:",log_yratio)

        # contribution of f(Theta,S)
        strucl = fStruc(Root, n_feature, Ops, Op_weights, Op_type, beta,
                        new_sa2, new_sb2)
        struclstar = fStruc(oldRoot, n_feature, Ops, Op_weights, Op_type, beta,
                            sigma_a, sigma_b)
        sl = strucl[0] + strucl[1]
        slstar = struclstar[0] + struclstar[1]
        log_strucratio = slstar - sl  # struclstar / strucl
        # print("log strucratio:",log_strucratio)

        # contribution of proposal Q and Qinv
        log_qratio = np.log(max(1e-5, Qinv / Q))
        # print("log qratio:",log_qratio)

        # R
        logR = log_yratio + log_strucratio + log_qratio + np.log(
            max(1e-5, hratio)) + np.log(max(1e-5, detjacob))
        logR = logR + np.log(invgamma.pdf(new_sigma, sig)) - np.log(
            invgamma.pdf(sigma, sig))
        # print("logR:",logR)

    elif change == 'expansion':
        # contribution of f(y|S,Theta,x)
        yllstar = ylogLike(y, new_outputs, new_sigma)
        yll = ylogLike(y, old_outputs, sigma)

        log_yratio = yllstar - yll

        # contribution of f(Theta,S)
        strucl = fStruc(Root, n_feature, Ops, Op_weights, Op_type, beta,
                        new_sa2, new_sb2)
        struclstar = fStruc(oldRoot, n_feature, Ops, Op_weights, Op_type, beta,
                            sigma_a, sigma_b)
        sl = strucl[0] + strucl[1]
        slstar = struclstar[0] + struclstar[1]
        log_strucratio = slstar - sl  # struclstar / strucl

        # contribution of proposal Q and Qinv
        log_qratio = np.log(max(1e-5, Qinv / Q))

        # R
        logR = log_yratio + log_strucratio + log_qratio + np.log(
            max(1e-5, hratio)) + np.log(max(1e-5, detjacob))
        logR = logR + np.log(invgamma.pdf(new_sigma, sig)) - np.log(
            invgamma.pdf(sigma, sig))

    else:  # no dimension jump
        # contribution of f(y|S,Theta,x)
        yllstar = ylogLike(y, new_outputs, new_sigma)
        yll = ylogLike(y, old_outputs, sigma)

        log_yratio = yllstar - yll
        # yratio = np.exp(yllstar-yll)

        # contribution of f(Theta,S)
        strucl = fStruc(Root, n_feature, Ops, Op_weights, Op_type, beta,
                        new_sa2, new_sb2)[0]
        struclstar = fStruc(oldRoot, n_feature, Ops, Op_weights, Op_type, beta,
                            sigma_a, sigma_b)[0]
        log_strucratio = struclstar - strucl

        # contribution of proposal Q and Qinv
        log_qratio = np.log(max(1e-5, Qinv / Q))

        # R
        logR = log_yratio + log_strucratio + log_qratio
        logR = logR + np.log(invgamma.pdf(new_sigma, sig)) - np.log(
            invgamma.pdf(sigma, sig))

    alpha = min(logR, 0)
    test = np.random.uniform(low=0, high=1, size=1)[0]
    if np.log(test) >= alpha:  # no accept
        # print("no accept")
        Root = oldRoot
        return [False, sigma, copy.deepcopy(oldRoot), sigma_a, sigma_b]
    else:
        # print("||||||accepted||||||")
        return [True, new_sigma, copy.deepcopy(Root), new_sa2, new_sb2]
예제 #18
0
        super(igamma_gen, self)._munp(self, n, *args)
#TODO: is this robust for differential entropy in this case? closed form or
#shortcuts in special?
    def _entropy(self, *args):
        def integ(x):
            val = self._pdf(x, *args)
            return val*log(val)

        entr = -integrate.quad(integ, self.a, self.b)[0]
        if not np.isnan(entr):
            return entr
        else:
            raise ValueError("Problem with integration.  Returned nan.")

igamma = igamma_gen(a=0.0, name='invgamma', longname="An inverted gamma",
            shapes = 'a,b', extradoc="""

Inverted gamma distribution

invgamma.pdf(x,a,b) = b**a*x**(-a-1)/gamma(a) * exp(-b/x)
for x > 0, a > 0, b>0.
""")


#NOTE: the above is unnecessary.  B takes the same role as the scale parameter
# in inverted gamma

palpha = np.random.gamma(400.,.005, size=10000)
print("First moment: %s\nSecond moment: %s" % (palpha.mean(),palpha.std()))
palpha = palpha[0]
예제 #19
0
def newProp(Root, sigma, y, indata, n_feature, alpha1, alpha2, beta, sigma_a,
            sigma_b):

    [oldRoot, Root, lnPointers, change, Q, Qinv, last_a, last_b,
     cnode] = Prop(Root, n_feature, alpha1, alpha2, beta, sigma_a, sigma_b)
    new_sigma = invgamma.rvs(2)

    if change == 'shrinkage':
        # the parameters are upgraded as well
        # contribution of h and determinant of jacobian
        [hratio, detjacob, new_sa2,
         new_sb2] = auxProp(change, oldRoot, Root, lnPointers, sigma_a,
                            sigma_b, last_a, last_b, cnode)
        #print("hratio:",hratio)
        #print("detjacob:",detjacob)

        # contribution of f(y|S,Theta,x)
        #print("new sigma:",round(new_sigma,3))
        yllstar = ylogLike(y, indata, Root, new_sigma)
        #print("sigma:",round(sigma,3))
        yll = ylogLike(y, indata, oldRoot, sigma)

        log_yratio = yllstar - yll
        #print("log yratio:",log_yratio)

        # contribution of f(Theta,S)
        strucl = fStruc(Root, n_feature, alpha1, alpha2, beta, new_sa2,
                        new_sb2)
        struclstar = fStruc(oldRoot, n_feature, alpha1, alpha2, beta, sigma_a,
                            sigma_b)
        sl = strucl[0] + strucl[1]
        slstar = struclstar[0] + struclstar[1]
        log_strucratio = slstar - sl  #struclstar / strucl
        #print("log strucratio:",log_strucratio)

        # contribution of proposal Q and Qinv
        log_qratio = np.log(Qinv / Q)
        #print("log qratio:",log_qratio)

        # R
        logR = log_yratio + log_strucratio + log_qratio + np.log(
            hratio) + np.log(detjacob)
        logR = logR + np.log(invgamma.pdf(new_sigma, 2)) - np.log(
            invgamma.pdf(sigma, 2))
        #print("logR:",logR)

    elif change == 'expansion':
        # the parameters are upgraded as well
        # contribution of h and determinant of jacobian
        [hratio, detjacob, new_sa2,
         new_sb2] = auxProp(change, oldRoot, Root, lnPointers, sigma_a,
                            sigma_b, last_a, last_b, cnode)
        #print("hratio:",hratio)
        #print("detjacob:",detjacob)

        # contribution of f(y|S,Theta,x)
        #print("new sigma:",round(new_sigma,3))
        yllstar = ylogLike(y, indata, Root, new_sigma)
        #print("sigma:",round(sigma,3))
        yll = ylogLike(y, indata, oldRoot, sigma)

        log_yratio = yllstar - yll
        #print("log yratio:",log_yratio)

        # contribution of f(Theta,S)
        strucl = fStruc(Root, n_feature, alpha1, alpha2, beta, new_sa2,
                        new_sb2)
        struclstar = fStruc(oldRoot, n_feature, alpha1, alpha2, beta, sigma_a,
                            sigma_b)
        sl = strucl[0] + strucl[1]
        slstar = struclstar[0] + struclstar[1]
        log_strucratio = slstar - sl  #struclstar / strucl
        #print("log strucratio:",log_strucratio)

        # contribution of proposal Q and Qinv
        log_qratio = np.log(Qinv / Q)
        #print("log qratio:",log_qratio)

        # R
        logR = log_yratio + log_strucratio + log_qratio + np.log(
            hratio) + np.log(detjacob)
        logR = logR + np.log(invgamma.pdf(new_sigma, 2)) - np.log(
            invgamma.pdf(sigma, 2))
        #print("logR:",logR)

    else:  # no dimension jump
        # the parameters are upgraded as well
        # contribution of fratio
        [new_sa2, new_sb2] = auxProp(change, oldRoot, Root, lnPointers,
                                     sigma_a, sigma_b, last_a, last_b, cnode)

        # contribution of f(y|S,Theta,x)
        #print("new sigma:",round(new_sigma,3))
        yllstar = ylogLike(y, indata, Root, new_sigma)
        #print("sigma:",round(sigma,3))
        yll = ylogLike(y, indata, oldRoot, sigma)

        log_yratio = yllstar - yll
        #print("log yratio:",log_yratio)
        #yratio = np.exp(yllstar-yll)

        # contribution of f(Theta,S)
        strucl = fStruc(Root, n_feature, alpha1, alpha2, beta, new_sa2,
                        new_sb2)[0]
        struclstar = fStruc(oldRoot, n_feature, alpha1, alpha2, beta, sigma_a,
                            sigma_b)[0]
        log_strucratio = struclstar - strucl
        #print("log strucratio:",log_strucratio)

        # contribution of proposal Q and Qinv
        log_qratio = np.log(Qinv / Q)
        #print("log qratio:",log_qratio)

        # R
        logR = log_yratio + log_strucratio + log_qratio
        logR = logR + np.log(invgamma.pdf(new_sigma, 2)) - np.log(
            invgamma.pdf(sigma, 2))
        #print("logR:",logR)

    alpha = min(logR, 0)
    test = np.random.uniform(low=0, high=1, size=1)[0]
    if np.log(test) >= alpha:  #no accept
        #print("no accept")
        Root = oldRoot
        return [False, sigma, copy.deepcopy(oldRoot), sigma_a, sigma_b]
    else:
        #print("||||||accepted||||||")
        return [True, new_sigma, copy.deepcopy(Root), new_sa2, new_sb2]
예제 #20
0
def scaled_inverse_chi_squared_pdf(x, dof, scale):
    # The scaled inverse Chi-squared distribution with the provided params
    # is equal to an inverse-gamma distribution with the following parameters:
    ig_shape = dof / 2
    ig_scale = dof * scale / 2
    return invgamma.pdf(x, ig_shape, loc=0, scale=ig_scale)
예제 #21
0
def auxProp(change,
            oldRoot,
            Root,
            lnPointers,
            sigma_a,
            sigma_b,
            last_a,
            last_b,
            cnode=None):
    # record the informations of linear nodes other than the shrinked or expanded
    odList = []  #list of orders of linear nodes
    Tree = genList(Root)

    for i in np.arange(0, len(Tree)):
        if Tree[i].operator == 'ln':
            odList.append(i)

    # sample new sigma_a2 and sigma_b2
    new_sa2 = invgamma.rvs(1)
    new_sb2 = invgamma.rvs(1)
    old_sa2 = sigma_a
    old_sb2 = sigma_b

    if change == 'shrinkage':
        prsv_aList = []
        prsv_bList = []
        cut_aList = []
        cut_bList = []
        # find the preserved a's
        for i in np.arange(0, len(lnPointers)):
            if lnPointers[i].operator == 'ln':  #still linear
                prsv_aList.append(last_a[i])
                prsv_bList.append(last_b[i])
            else:  #no longer linear
                cut_aList.append(last_a[i])
                cut_bList.append(last_b[i])
        # substitute those cutted with newly added if cut and add
        for i in np.arange(0, len(odList) - len(prsv_aList)):
            prsv_aList.append(cut_aList[i])
            prsv_bList.append(cut_bList[i])

        n0 = len(prsv_aList)

        # sample auxiliary U
        UaList = []
        UbList = []
        for i in np.arange(0, n0):
            UaList.append(norm.rvs(loc=0, scale=np.sqrt(new_sa2)))
            UbList.append(norm.rvs(loc=0, scale=np.sqrt(new_sb2)))

        # generate inverse auxiliary U*
        NaList = []  #Theta* with a
        NbList = []  #Theta* with b
        NUaList = []  #U* with a
        NUbList = []  #U* with b
        for i in np.arange(0, n0):
            NaList.append(prsv_aList[i] + UaList[i])
            NbList.append(prsv_bList[i] + UbList[i])
            NUaList.append(prsv_aList[i] - UaList[i])
            NUbList.append(prsv_bList[i] - UbList[i])
        NUaList = NUaList + last_a
        NUbList = NUbList + last_b

        # hstar is h(U*|Theta*,S*,S^t) (here we calculate the log) corresponding the shorter para
        # h is h(U|Theta,S^t,S*) corresponding longer para
        # Theta* is the
        logh = 0
        loghstar = 0

        # contribution of new_sa2 and new_sb2
        logh += np.log(invgamma.pdf(new_sa2, 1))
        logh += np.log(invgamma.pdf(new_sb2, 1))
        loghstar += np.log(invgamma.pdf(old_sa2, 1))
        loghstar += np.log(invgamma.pdf(old_sb2, 1))

        for i in np.arange(0, len(UaList)):
            # contribution of UaList and UbList
            logh += np.log(norm.pdf(UaList[i], loc=0, scale=np.sqrt(new_sa2)))
            logh += np.log(norm.pdf(UbList[i], loc=0, scale=np.sqrt(new_sb2)))

        for i in np.arange(0, len(NUaList)):
            # contribution of NUaList and NUbList
            loghstar += np.log(
                norm.pdf(NUaList[i], loc=1, scale=np.sqrt(old_sa2)))
            loghstar += np.log(
                norm.pdf(NUbList[i], loc=0, scale=np.sqrt(old_sb2)))

        hratio = np.exp(loghstar - logh)
        #print("hratio:",hratio)

        # determinant of jacobian
        detjacob = np.power(2, 2 * len(prsv_aList))
        #print("detjacob:",detjacob)

        #### assign Theta* to the variables
        # new values of Theta
        # new sigma_a, sigma_b are directly returned
        for i in np.arange(0, len(odList)):
            Tree[odList[i]].a = NaList[i]
            Tree[odList[i]].b = NbList[i]

        return [hratio, detjacob, new_sa2, new_sb2]

    elif change == 'expansion':
        # sample new sigma_a2 and sigma_b2
        new_sa2 = invgamma.rvs(1)
        new_sb2 = invgamma.rvs(1)
        old_sa2 = sigma_a
        old_sb2 = sigma_b

        # lists of theta_0 and expanded ones
        # last_a is the list of all original a's
        # last_b is the list of all original b's
        odList = []
        for i in np.arange(0, len(Tree)):
            if Tree[i].operator == 'ln':
                odList.append(i)

        # sample auxiliary U
        UaList = []
        UbList = []
        for i in np.arange(0, len(last_a)):
            UaList.append(norm.rvs(loc=1, scale=np.sqrt(new_sa2)))
            UbList.append(norm.rvs(loc=0, scale=np.sqrt(new_sb2)))

        # generate inverse auxiliary U* and new para Theta*
        NaList = []  #Theta*_a
        NbList = []  #Theta*_b
        NUaList = []  #U*_a
        NUbList = []  #U*_b
        for i in np.arange(0, len(last_a)):
            NaList.append((last_a[i] + UaList[i]) / 2)
            NbList.append((last_b[i] + UbList[i]) / 2)
            NUaList.append((last_a[i] - UaList[i]) / 2)
            NUbList.append((last_b[i] - UbList[i]) / 2)

        # append newly generated a and b into NaList and NbList
        nn = len(odList) - len(last_a)  # number of newly added ln
        for i in np.arange(0, nn):
            u_a = norm.rvs(loc=1, scale=np.sqrt(new_sa2))
            u_b = norm.rvs(loc=0, scale=np.sqrt(new_sb2))
            NaList.append(u_a)
            NbList.append(u_b)

        # calculate h ratio
        # logh is h(U|Theta,S,S*) correspond to jump from short to long (new)
        # loghstar is h(Ustar|Theta*,S*,S) correspond to jump from long to short
        logh = 0
        loghstar = 0

        # contribution of sigma_ab
        logh += np.log(invgamma.pdf(new_sa2, 1))
        logh += np.log(invgamma.pdf(new_sb2, 1))
        loghstar += np.log(invgamma.pdf(old_sa2, 1))
        loghstar += np.log(invgamma.pdf(old_sb2, 1))

        # contribution of u_a, u_b
        for i in np.arange(len(last_a), nn):
            logh += norm.pdf(NaList[i], loc=1, scale=np.sqrt(new_sa2))
            logh += norm.pdf(NbList[i], loc=1, scale=np.sqrt(new_sb2))

        # contribution of U_theta
        for i in np.arange(0, len(UaList)):
            logh += np.log(norm.pdf(UaList[i], loc=1, scale=np.sqrt(new_sa2)))
            logh += np.log(norm.pdf(UbList[i], loc=0, scale=np.sqrt(new_sb2)))

        for i in np.arange(0, len(NUaList)):
            loghstar += np.log(
                norm.pdf(NUaList[i], loc=0, scale=np.sqrt(old_sa2)))
            loghstar += np.log(
                norm.pdf(NUbList[i], loc=0, scale=np.sqrt(old_sb2)))

        # compute h ratio
        hratio = np.exp(loghstar - logh)

        # determinant of jacobian
        detjacob = 1 / np.power(2, 2 * len(last_a))

        #### assign Theta* to the variables
        # new values of sigma_a sigma_b
        # new values of Theta
        for i in np.arange(0, len(odList)):
            Tree[odList[i]].a = NaList[i]
            Tree[odList[i]].b = NbList[i]

        return [hratio, detjacob, new_sa2, new_sb2]

    else:  # same set of parameters
        # record the informations of linear nodes other than the shrinked or expanded
        odList = []  #list of orders of linear nodes
        Tree = genList(Root)

        old_sa2 = sigma_a
        old_sb2 = sigma_b

        for i in np.arange(0, len(Tree)):
            if Tree[i].operator == 'ln':
                odList.append(i)

        NaList = []
        NbList = []
        new_sa2 = invgamma.rvs(1)
        new_sb2 = invgamma.rvs(1)
        for i in np.arange(0, len(odList)):
            NaList.append(norm.rvs(loc=1, scale=np.sqrt(new_sa2)))
            NbList.append(norm.rvs(loc=0, scale=np.sqrt(new_sb2)))
        '''
        # log likelihood of parameters (including sigma_ab)
        logT = np.log(invgamma.pdf(old_sa2,1)) + np.log(invgamma.pdf(old_sb2,1))
        for i in np.arange(0,len(aList)):
            logT += np.log(norm.pdf(aList[i],loc=1,scale=np.sqrt(old_sa2)))
            logT += np.log(norm.pdf(bList[i],loc=0,scale=np.sqrt(old_sb2)))
        # log likelihood of new parameters
        logTstar = np.log(invgamma.pdf(new_sa2,1)) + np.log(invgamma.pdf(new_sb2,1))
        for i in np.arange(0,len(NaList)):
            logTstar += np.log(norm.pdf(NaList[i],loc=1,scale=np.sqrt(new_sa2)))
            logTstar += np.log(norm.pdf(NbList[i],loc=0,scale=np.sqrt(new_sb2)))
        
        fratio = np.exp(logT-logTstar)
        return fratio
        '''

        # new values of Theta
        for i in np.arange(0, len(odList)):
            Tree[odList[i]].a = NaList[i]
            Tree[odList[i]].b = NbList[i]

        return [new_sa2, new_sb2]
예제 #22
0
 def pdf(self, x: float) -> float:
     # TODO: where is self.beta
     return float(invgamma.pdf(x, self.alpha))
예제 #23
0
파일: downtime.py 프로젝트: ribeiroale/ares
def downtime_accepted_models(D=list(), alpha=.05):
    params = list()
    params.append(uniform.fit(D))
    params.append(expon.fit(D))
    params.append(rayleigh.fit(D))
    params.append(weibull_min.fit(D))
    params.append(gamma.fit(D))
    params.append(gengamma.fit(D))
    params.append(invgamma.fit(D))
    params.append(gompertz.fit(D))
    params.append(lognorm.fit(D))
    params.append(exponweib.fit(D))

    llf_value = list()
    llf_value.append(log(product(uniform.pdf(D, *params[0]))))
    llf_value.append(log(product(expon.pdf(D, *params[1]))))
    llf_value.append(log(product(rayleigh.pdf(D, *params[2]))))
    llf_value.append(log(product(weibull_min.pdf(D, *params[3]))))
    llf_value.append(log(product(gamma.pdf(D, *params[4]))))
    llf_value.append(log(product(gengamma.pdf(D, *params[5]))))
    llf_value.append(log(product(invgamma.pdf(D, *params[6]))))
    llf_value.append(log(product(gompertz.pdf(D, *params[7]))))
    llf_value.append(log(product(lognorm.pdf(D, *params[8]))))
    llf_value.append(log(product(exponweib.pdf(D, *params[9]))))

    AIC = list()
    AIC.append(2 * len(params[0]) - 2 * llf_value[0])
    AIC.append(2 * len(params[1]) - 2 * llf_value[1])
    AIC.append(2 * len(params[2]) - 2 * llf_value[2])
    AIC.append(2 * len(params[3]) - 2 * llf_value[3])
    AIC.append(2 * len(params[4]) - 2 * llf_value[4])
    AIC.append(2 * len(params[5]) - 2 * llf_value[5])
    AIC.append(2 * len(params[6]) - 2 * llf_value[6])
    AIC.append(2 * len(params[7]) - 2 * llf_value[7])
    AIC.append(2 * len(params[8]) - 2 * llf_value[8])
    AIC.append(2 * len(params[9]) - 2 * llf_value[9])

    model = list()
    model.append(
        ["uniform", params[0],
         kstest(D, "uniform", params[0])[1], AIC[0]])
    model.append(
        ["expon", params[1],
         kstest(D, "expon", params[1])[1], AIC[1]])
    model.append(
        ["rayleigh", params[2],
         kstest(D, "rayleigh", params[2])[1], AIC[2]])
    model.append([
        "weibull_min", params[3],
        kstest(D, "weibull_min", params[3])[1], AIC[3]
    ])
    model.append(
        ["gamma", params[4],
         kstest(D, "gamma", params[4])[1], AIC[4]])
    model.append(
        ["gengamma", params[5],
         kstest(D, "gengamma", params[5])[1], AIC[5]])
    model.append(
        ["invgamma", params[6],
         kstest(D, "invgamma", params[6])[1], AIC[6]])
    model.append(
        ["gompertz", params[7],
         kstest(D, "gompertz", params[7])[1], AIC[7]])
    model.append(
        ["lognorm", params[8],
         kstest(D, "lognorm", params[8])[1], AIC[8]])
    model.append(
        ["exponweib", params[9],
         kstest(D, "exponweib", params[9])[1], AIC[9]])

    accepted_models = [i for i in model if i[2] > alpha]

    if accepted_models:
        aic_values = [i[3] for i in accepted_models]
        final_model = min(range(len(aic_values)), key=aic_values.__getitem__)
        return accepted_models, accepted_models[final_model]
    elif not accepted_models:
        aic_values = [i[3] for i in model]
        final_model = min(range(len(aic_values)), key=aic_values.__getitem__)
        return model, model[final_model]
예제 #24
0
def fit_and_plot(args):
    """
    Read in the TGAS parallax error data (or an already calculated KDE of the parallax error
    distribution) and the "fit" the KDE by with a Gamma distribution. Fit quality is simply judged by
    eye!

    Parameters
    ----------

    args - command line arguments.
    """

    offset, alpha, beta = args['distroParams']
    tgasPlxErrPdfFile = 'TGAS-parallax-errors-pdf.csv'

    if path.isfile(tgasPlxErrPdfFile):
        errpdf = np.genfromtxt(tgasPlxErrPdfFile,
                               comments='#',
                               skip_header=1,
                               delimiter=',',
                               names=['err', 'logdens'],
                               dtype=None)
        x = errpdf['err'][:, np.newaxis]
        logdens = errpdf['logdens']
    else:
        tgasData = fits.open('TGAS-allPlxErrorsVsGmag.fits')[1].data
        eplx = tgasData['parallax_error'][:, np.newaxis]
        kde = KernelDensity(kernel='epanechnikov', bandwidth=0.008).fit(eplx)
        x = np.linspace(0, 1, 1001)[:, np.newaxis]
        logdens = kde.score_samples(x)
        f = open('TGAS-errors-kde.csv', mode='w')
        f.write('#err,logdens\n')
        for xx, yy in zip(x[:, 0], logdens):
            f.write("{0},{1}\n".format(xx, yy))
        f.close()

    xx = np.linspace(0.004, 1, 1000)
    ig = invgamma.pdf(xx, alpha, loc=offset, scale=beta)
    norm = invgamma.cdf(1.0, alpha, loc=offset, scale=beta)
    ig = ig / norm

    useagab(usetex=False, fontfam='sans', sroncolours=False)

    fig = plt.figure(figsize=(12, 10))
    ax = fig.add_subplot(111)
    apply_tufte(ax)
    ax.plot(x[:, 0],
            np.exp(logdens),
            label='KDE of $\\sigma_\\varpi$ distribution',
            lw=3)
    ax.plot(
        xx,
        ig,
        label="Fit with InvGamma($x-{0:.3f}|\\alpha={1:.3f}$, $\\beta={2:.3f})$"
        .format(offset, alpha, beta))
    ax.set_xlabel('$\\varpi$ [mas]')
    ax.set_ylabel('pdf')
    ax.legend(loc='upper right', fontsize=12)
    ax.set_title('Parallax error distribution for TGAS', fontsize=14)

    basename = 'FitOfParallaxErrorDistribution'
    if args['pdfOutput']:
        plt.savefig(basename + '.pdf')
    elif args['pngOutput']:
        plt.savefig(basename + '.png')
    else:
        plt.show()
import matplotlib.pyplot as plt
from scipy.stats import invwishart, invgamma
x = np.linspace(0.01, 1, 100)
iw = invwishart.pdf(x, df=6, scale=1)
iw[:3]
# array([  1.20546865e-15,   5.42497807e-06,   4.45813929e-03])
ig = invgamma.pdf(x, 6/2., scale=1./2)
ig[:3]
# array([  1.20546865e-15,   5.42497807e-06,   4.45813929e-03])
plt.plot(x, iw)

# The input quantiles can be any shape of array, as long as the last
# axis labels the components.
예제 #26
0
def plot_report(train_x,
                inliers,
                outliers,
                p,
                threshold,
                title=None,
                xscale="linear",
                xaxis="score"):

    X = (train_x, inliers, outliers)
    max_x = max(np.vectorize(max)(X))
    min_x = min(np.vectorize(min)(X))
    min_x = max(min_x, 1e-2)

    # Plot the training scores
    n_bins = 150
    if xscale == "log":
        bins = np.logspace(np.log10(min_x), np.log10(max_x), n_bins)
    else:
        bins = np.linspace(min_x, max_x, n_bins)

    fig = plt.figure(figsize=(15, 15))
    ax1 = fig.add_axes([0.0, 0.5, 0.8, 0.4])
    ax2 = fig.add_axes([0.0, 0.1, 0.8, 0.4])
    ax3 = ax2.twinx()
    ax4 = ax1.twinx()

    histogram = lambda ax, X, color, density, label: ax.hist(x=X,
                                                             bins=bins,
                                                             alpha=0.35,
                                                             color=color,
                                                             histtype=
                                                             "barstacked",
                                                             density=density,
                                                             stacked=True,
                                                             label=label)
    histogram(ax1, train_x, "blue", True, "training data")
    histogram(ax2, inliers, "yellow", True, "inliers")
    histogram(ax3, outliers, "black", False, "outliers")

    gamma = invgamma.pdf(bins, *p)

    # A lot of hacky stuff here

    # this curve is a bit bugged, adjusting the height fixes it
    ax4.set_ylim(0, max(gamma))
    ax4.plot(bins, gamma)

    [ax.set_xscale(xscale) for ax in (ax1, ax2, ax3, ax4)]

    # Ignore this, the library is being obstinate
    miny, maxy = plt.ylim()
    ax1.vlines(threshold, miny, maxy, color="black")
    ax2.vlines(threshold, miny, maxy, color="black")
    ax1.legend(("training data", "threshold"), loc="upper right")
    ax2.legend(("inliers", "threshold"), loc="upper right")
    ax4.legend(["Gamma curve of best fit"], loc="center right")
    ax3.legend(["outliers"], loc="center right")
    plt.xlabel(xaxis)
    plt.ylabel("Density")
    plt.title(title)
    plt.show()
예제 #27
0
from scipy.stats import invgamma
import numpy as np
import numpy.random as rd
import matplotlib.pyplot as plt
from math import pi
from scipy.special import i0

#データ数
NUM = 100
a = 0.1
"""ガンマ分布"""

fig, ax = plt.subplots(1, 1)
a = 4.07
mean, var, skew, kurt = invgamma.stats(a, moments='mvsk')
x = np.linspace(invgamma.ppf(0.01, a), invgamma.ppf(0.99, a), 100)
ax.plot(x, invgamma.pdf(x, a), 'r-', lw=5, alpha=0.6, label='invgamma pdf')
plt.hist(invgamma.rvs(a, NUM),
         histtype='step',
         bins=500,
         range=(0, 10),
         normed=True)
plt.title("Inv-Gamma dist : Inv-Gam(x|a,b)")
plt.legend(loc="upper right")
filename = "invGamma.png"
plt.savefig(filename)
plt.show()
예제 #28
0
    def _entropy(self, *args):
        def integ(x):
            val = self._pdf(x, *args)
            return val * log(val)

        entr = -integrate.quad(integ, self.a, self.b)[0]
        if not np.isnan(entr):
            return entr
        else:
            raise ValueError("Problem with integration.  Returned nan.")

igamma = igamma_gen(a=0.0,
                    name='invgamma',
                    longname="An inverted gamma",
                    shapes='a,b',
                    extradoc="""

Inverted gamma distribution

invgamma.pdf(x,a,b) = b**a*x**(-a-1)/gamma(a) * exp(-b/x)
for x > 0, a > 0, b>0.
""")

#NOTE: the above is unnecessary.  B takes the same role as the scale parameter
# in inverted gamma

palpha = np.random.gamma(400., .005, size=10000)
print("First moment: %s\nSecond moment: %s" % (palpha.mean(), palpha.std()))
palpha = palpha[0]

prho = np.random.beta(49.5, 49.5, size=1e5)
예제 #29
0
def do_gibbs_uninformative_prior():
    μ = 5  ####### GIBBS: initialize
    σ = 6  ####### GIBBS: initialize

    μs = [μ]
    σs = [σ]

    ITER = 20000  #*100
    BURN = 100
    PLOT_AT = [200 // 2]
    PLOT_NICE_AT = [0, 1, 2, 3, 4]
    md('### Running the Gibbs sampler, showing the successive conditional probabilities'
       )
    for i in range(ITER // 2):  ####### GIBBS: loop

        # scipy normal distribution uses the standard deviation
        μ = norm.rvs(np.mean(X), σ / N**0.5)  ####### GIBBS: one step
        μs.append(μ)
        σs.append(σ)

        σ = invgamma.rvs(N / 2 - 1, 0,
                         np.sum(
                             (X - μ)**2) / 2)**0.5  ####### GIBBS: another step
        μs.append(μ)
        σs.append(σ)

        if i in PLOT_NICE_AT:
            minx = min(base_mean - 0.3, np.min(μs))
            maxx = max(base_mean + 0.3, np.max(μs))
            miny = min(base_stdev / 1.3, np.min(σs))
            maxy = max(base_stdev * 1.3, np.max(σs))
            ax = plt.subplot(3, 3, (4, 8))
            plt.plot(μs, σs, alpha=0.6)
            plt.scatter(μs, σs, marker='x')

            plt.subplot(3, 3, (1, 2), sharex=ax)
            x = np.linspace(minx, maxx, 151)
            plt.plot(x, norm.pdf(x, np.mean(X), σs[-3] / N**0.5))

            plt.subplot(3, 3, (6, 9), sharey=ax)
            x = np.linspace(miny, maxy, 151)
            plt.plot(
                invgamma.pdf(x**2, N / 2 - 1, 0,
                             np.sum((X - μs[-2])**2) / 2), x)

            ax.scatter(μs[-3:], σs[-3:], c='r')
            plt.show()
        if i in PLOT_AT:
            md('### Plotting after ' + str(i) + ' samples')
            plt.plot(μs, σs, alpha=0.2)
            plt.scatter(μs, σs, marker='.')
            plt.title('All samples')
            plt.show()

            plt.scatter(μs[BURN:], σs[BURN:], marker='x')
            plt.xlim(np.min(μs), np.max(μs))
            plt.ylim(np.min(σs), np.max(σs))
            plt.title('Samples after burn-in period')
            plt.show()

            plt.scatter(μs[BURN:], σs[BURN:], marker='x')
            plt.title('Samples after burn-in period (zoomed)')
            plt.show()

    print(
        'Estimating the p(θ|X) as an histogram from samples (true posterior with lines)'
    )
    show_heatmap(μs[BURN:], σs[BURN:], bins=25)
    plot_true_distribution()
    plt.show()
    return obj_dic(locals())
예제 #30
0
def target(X,y1,y2,y3,mu1,sig1,alpha,beta,a,b):
	tar = gsn_likelihood(X,y1,y2,y3)*norm.pdf(y1,loc=mu1,scale=sig1)*invgamma.pdf(y2,alpha,scale=beta)*betadis.pdf(y3,a,b)
	# print("tar=",gsn_likelihood(X,y1,y2,y3),norm.pdf(y1,loc=mu1,scale=sig1),invgamma.pdf(y2,alpha,scale=beta),betadis.pdf(y3,a,b))
	return tar