Esempio n. 1
0
def em_gmm_vect(xs, pis, mus, tol=0.01, max_iter=100):

    n, p = xs.shape
    k = len(pis)

    ll_old = 0
    for i in range(max_iter):
        exp_A = []
        exp_B = []
        ll_new = 0

        # E-step
        ws = np.zeros((k, n))
        for j in range(k):
            ws[j, :] = pis[j] * mvn(mus[j]).pdf(xs)
        ws /= ws.sum(0)

        # M-step
        pis = ws.sum(axis=1)
        pis /= n

        mus = np.dot(ws, xs)
        mus /= ws.sum(1)[:, None]

        # update complete log likelihoood
        ll_new = 0
        for pi, mu in zip(pis, mus):
            ll_new += pi * mvn(mu).pdf(xs)
        ll_new = np.log(ll_new).sum()

        if np.abs(ll_new - ll_old) < tol:
            break
        ll_old = ll_new

    return ll_new, pis, mus
def test_multivariate():
    from scipy.stats import multivariate_normal as mvn
    from numpy.random import rand

    mean = 3
    var = 1.5

    assert near_equal(mvn(mean,var).pdf(0.5),
                      multivariate_gaussian(0.5, mean, var))

    mean = np.array([2.,17.])
    var = np.array([[10., 1.2], [1.2, 4.]])

    x = np.array([1,16])
    assert near_equal(mvn(mean,var).pdf(x),
                      multivariate_gaussian(x, mean, var))

    for i in range(100):
        x = np.array([rand(), rand()])
        assert near_equal(mvn(mean,var).pdf(x),
                          multivariate_gaussian(x, mean, var))

        assert near_equal(mvn(mean,var).pdf(x),
                          norm_pdf_multivariate(x, mean, var))


    mean = np.array([1,2,3,4])
    var = np.eye(4)*rand()

    x = np.array([2,3,4,5])

    assert near_equal(mvn(mean,var).pdf(x),
                      norm_pdf_multivariate(x, mean, var))
def EM(X, m, epsilon):
    n, p = X.shape
    # dimension - 2d
    l = 2

    # initalize - initial guesses for parameters
    # mu: m*l matrix (m gaussians and 2 dimensions)
    mu = np.random.random((m, l))
    # sig: l*l matrix for each m => m*l vectors
    sig = np.random.random((m, l))
    # prior, evenly divided for initial value
    prior = np.ones(m) / m

    old_LL = np.inf
    max_iter = 100

    for i in range(max_iter):
        # E-step - get probability of each
        E = np.zeros((m, n))
        for j in range(m):
            for i in range(n):
                E[j, i] = prior[j] * mvn(mu[j], sig[j]).pdf(X[i])
        # expected
        E = E / np.sum(E, axis=0)
        E = E.T

        # M-step - Create new mu, sig, prior vectors
        mu = []
        sigma = []
        prior = []
        for j in range(m):
            # sum by column
            rk = np.sum(E[:, j], axis=0)

            #update mu
            mu_e = np.sum(X * E[:, j].reshape(len(X), 1), axis=0)
            mu_e = mu_e / rk
            mu.append(mu_e)

            # update sigma
            result = (np.array(E[:, j]).reshape(len(X), 1) * (X - mu_e)).T
            sig_e = np.dot(result, (X - mu_e)) / rk
            sigma.append(sig_e)

            # update prior
            prior_e = rk / np.sum(E)
            prior.append(prior_e)

        # plot learned parameters
        plt_2dGaussians(X, mu, sigma, m)

        # update complete log likelihoood
        new_LL = 0
        for j in range(m):
            for i in range(n):
                new_LL = np.log(prior[j] * mvn(mu[j], sigma[j]).pdf(X[i]))
        if np.abs(new_LL - old_LL) < epsilon: break
        old_LL = new_LL

    return mu, sigma, prior
Esempio n. 4
0
def plotsampledGaussians3d(n, c):
    dimension = 2
    N = n
    cov = 1 * np.eye(dimension)
    mean = np.array([0, 0])

    samples = drawSamples(N, mean, cov, c)[0]

    #Create grid and multivariate normal
    x = np.linspace(-10, 10, 500)
    y = np.linspace(-10, 10, 500)
    X, Y = np.meshgrid(x, y)
    pos = np.empty(X.shape + (2, ))
    pos[:, :, 0] = X
    pos[:, :, 1] = Y
    rv = mvn(mean, cov)
    overlap = lambda pos: c * mvn(mean, cov).pdf(pos)
    #Make a 3D plot
    fig = plt.figure()
    ax = fig.gca(projection='3d')
    ax.plot_surface(X, Y, rv.pdf(pos), cmap='viridis', linewidth=0)
    ax.plot_wireframe(X, Y, overlap(pos), cmap='viridis', linewidth=0.5)
    ax.set_xlabel('X axis')
    ax.set_ylabel('Y axis')
    ax.set_zlabel('Z axis')
    plt.show()
Esempio n. 5
0
def em_gmm(X, pis, mus, cov, max_iter=30):
    rows= X.shape[0]
    k = len(pis)
    loglike = []
    for i in range(max_iter):
        
        ws = np.zeros((rows, k))
        for j in range(k):
            ws[:, j] = mvn(mus[j], cov[j]).pdf(X) * pis[j]
        ws = np.apply_along_axis(lambda x:x/np.sum(x), 1, ws)

        n_sum = np.sum(ws, axis=0)
        pis = n_sum/rows
        mus = np.transpose(np.dot(np.transpose(X), ws) / n_sum)

        for j in range(k):
            phis = ws[:,j]
            nk = n_sum[j]
            sigma = np.dot(((X - mus[j]).T * phis),(X - mus[j]))/ nk
            cov[j] = sigma

        tmp_ll=  0
        for pi, mu, sigma in zip(pis, mus, cov):
            tmp_ll += pi*mvn(mu, sigma).pdf(X)
        loglike.append(np.log(tmp_ll).sum())
        
    return (pis, mus, cov, loglike)
Esempio n. 6
0
 def __init__(self,
              mu_pos,
              sig_pos,
              p_pos,
              mu_neg,
              sig_neg,
              p_neg,
              alpha,
              n_pos,
              n_ul,
              batch_size=1024):
     self.components_pos = [
         mvn(mean=mu, cov=sig) for (mu, sig) in zip(mu_pos, sig_pos)
     ]
     self.components_neg = [
         mvn(mean=mu, cov=sig) for (mu, sig) in zip(mu_neg, sig_neg)
     ]
     dist_pos = mixture(self.components_pos, p_pos)
     dist_neg = mixture(self.components_neg, p_neg)
     super(MVNormalMixDG, self).__init__(dist_p=dist_pos,
                                         dist_n=dist_neg,
                                         alpha=alpha,
                                         n_p=n_pos,
                                         n_u=n_ul,
                                         batch_size=batch_size)
def test_multivariate():
    from scipy.stats import multivariate_normal as mvn
    from numpy.random import rand

    mean = 3
    var = 1.5

    assert near_equal(mvn(mean,var).pdf(0.5),
                      multivariate_gaussian(0.5, mean, var))

    mean = np.array([2.,17.])
    var = np.array([[10., 1.2], [1.2, 4.]])

    x = np.array([1,16])
    assert near_equal(mvn(mean,var).pdf(x),
                      multivariate_gaussian(x, mean, var))

    for i in range(100):
        x = np.array([rand(), rand()])
        assert near_equal(mvn(mean,var).pdf(x),
                          multivariate_gaussian(x, mean, var))

        assert near_equal(mvn(mean,var).pdf(x),
                          norm_pdf_multivariate(x, mean, var))


    mean = np.array([1,2,3,4])
    var = np.eye(4)*rand()

    x = np.array([2,3,4,5])

    assert near_equal(mvn(mean,var).pdf(x),
                      norm_pdf_multivariate(x, mean, var))
Esempio n. 8
0
def test_pdf(model, x, y, cov_matrix_0, cov_matrix_1):
    if model == 0:
        rv = mvn([10, 2], cov_matrix_0)
        f = rv.pdf([x, y])
    if model == 1:
        rv = mvn([2, 10], cov_matrix_1)
        f = rv.pdf([x, y])
    return f
Esempio n. 9
0
 def sample(self, mu):
     if self.local:
         # if local random walk
         s = mvn(mu, self.Sigma, allow_singular=True).rvs()
     else:
         # if global random walk
         s = mvn(self.mu, self.Sigma, allow_singular=True).rvs()
     return s
Esempio n. 10
0
def em_algorithm(df2,k,mus,sigmas,pis,eps,maxitr):
    """ This is the implementation of EM algorithm. it consists
        of two steps - Expectation and Maximization. It repeats
        (ll_new-ll_old) becomes less than epsilon value or reaches
        the maximum number of iterations"""
    data = df2.to_numpy()     # convert to numpy array
    n, d = data.shape    # get row and attribute count
    ll_old = 0

    for iteration in range(maxitr):
        ll_new = 0

        # Expectation-step
        ws = np.zeros((k, n))
        for i in range(k):
            for j in range(n):
                # posterior probability
                ws[i, j] = pis[i] * mvn(mus[i], sigmas[i]).pdf(data[j])
        ws /= ws.sum(0)

        # Maximization-step

        # re-estimate mus
        mus = np.zeros((k, d))
        for i in range(k):
            for j in range(n):
                mus[i] += ws[i, j] * data[j]
            mus[i] /= ws[i, :].sum()

        #   re-estimate sigmas
        sigmas = np.zeros((k, d, d))
        for i in range(k):
            for j in range(n):
                ys = np.reshape(data[j] - mus[i], (d, 1))
                sigmas[i] += ws[i, j] * np.dot(ys, ys.T)
            sigmas[i] /= ws[i, :].sum()

        # re-estimate pis
        pis = np.zeros(k)
        for i in range(k):
            for j in range(n):
                pis[i] += ws[i, j]
        pis /= n

        # convergence test
        ll_new = 0.0
        for i in range(k):
            s = 0
            for j in range(n):
                s += pis[i] * mvn(mus[i], sigmas[i]).pdf(data[j])
            ll_new += np.log(s)

        if np.abs(ll_new - ll_old) < eps:   # if less than epsilon then stop iteration
            break

        ll_old = ll_new

    return ll_new, mus, sigmas, pis, iteration, ws
Esempio n. 11
0
def gp_sampling(num_samples,
                num_agents,
                pred_len,
                gp_pred_x,
                gp_pred_x_cov,
                gp_pred_y,
                gp_pred_y_cov,
                samples_x,
                samples_y,
                include_mean=True):
    """
    generate samples from gp posteriors
    :param num_samples: number of samples for each agent
    :param include_mean: whether include gp mean as a sample
    :return: generated samples
    """
    time_seed = int(time.time() * 1000) % 1000
    print("random seed: ", time_seed)
    np.random.seed(time_seed)

    if include_mean is True:
        print("GP mean included as sample")
    else:
        print("GP mean NOT included as sample")
    samples_x = np.zeros((num_agents * num_samples, pred_len))
    samples_y = np.zeros((num_agents * num_samples, pred_len))
    pdf_x = np.zeros((num_agents, num_samples), dtype=np.float128)
    pdf_y = np.zeros((num_agents, num_samples), dtype=np.float128)
    for i in range(num_agents):
        if pred_len > 1:
            rv_x = mvn(mean=gp_pred_x[i], cov=gp_pred_x_cov[i])
            samples_x[i * num_samples:(i + 1) *
                      num_samples] = rv_x.rvs(size=num_samples).copy()
            rv_y = mvn(mean=gp_pred_y[i], cov=gp_pred_y_cov[i])
            samples_y[i * num_samples:(i + 1) *
                      num_samples] = rv_y.rvs(size=num_samples).copy()
        else:
            rv_x = mvn(mean=gp_pred_x[i], cov=gp_pred_x_cov[i])
            samples_x[i * num_samples:(i + 1) *
                      num_samples] = rv_x.rvs(size=num_samples).copy()[:, None]
            rv_y = mvn(mean=gp_pred_y[i], cov=gp_pred_y_cov[i])
            samples_y[i * num_samples:(i + 1) *
                      num_samples] = rv_y.rvs(size=num_samples).copy()[:, None]
        if include_mean:
            # if include gp mean as sample, replace the first sample as gp mean
            samples_x[i * num_samples] = gp_pred_x[i]
            samples_y[i * num_samples] = gp_pred_y[i]
        pdf_x[i] = rv_x.pdf(samples_x[i * num_samples:(i + 1) * num_samples])
        pdf_y[i] = rv_y.pdf(samples_y[i * num_samples:(i + 1) * num_samples])
    scale = np.max(pdf_x[0])
    pdf_x /= scale
    pdf_y /= scale
    # print("sample_x", samples_x)
    # print("sample_y", samples_y)

    samples_pdf = pdf_x * pdf_y
    return samples_x, samples_y, samples_pdf
Esempio n. 12
0
    def __init__(self, mean1, mean2, cov1, cov2, prob1, prob2):

        self.mean1 = mean1
        self.mean2 = mean2
        self.cov1 = cov1
        self.cov2 = cov2
        self.prob1 = prob1
        self.prob2 = prob2
        self.gauss1 = mvn(mean=self.mean1, cov=self.cov1)
        self.gauss2 = mvn(mean=self.mean2, cov=self.cov2)
Esempio n. 13
0
def em(df, k, eps):
    # init some values
    (mus, sigmas, probs) = em_init(df, k)

    n = len(df)
    ll_old = 0

    i = 0
    diff = 1
    while diff > eps and i < 1000000:
        ws = np.zeros((k, n))

        # for each cluster get posterior probability
        for j in range(k):
            ws[j, :] = probs[j] * mvn(mus[j], sigmas[j]).pdf(df.loc[:,0:3])
        ws /= ws.sum(0)

        #print(f'ws: {ws[0,:]}')
        #print(f'sums: {ws.sum(axis=1)}')
        # update probabilities
        probs = ws.sum(axis=1)
        probs /= n

        # update means
        mus = np.dot(ws, df.loc[:,0:3])
        mus /= ws.sum(1)[:, None]

        #print(mus)
        # update sigmas
        sigmas = np.zeros((k, 4, 4))

        for j in range(k):
            # get values from data frame, subtract mean values and convert to numpy array
            ys = (df.loc[:,0:3] - mus[j, :]).to_numpy()

            # Calculate sigmas using matrix multiply. gives a deprecation warning but couldn't figure it out with transpose
            sigmas[j] = (ws[j, :, None, None] * mm(ys[:, :, None], ys[:, None, :])).sum(axis=0)
        sigmas /= ws.sum(axis=1)[:, None, None]

        # init temporary log likelihood variable
        ll_new = 0

        # caclulate probability for each
        for p, mu, sigma in zip(probs, mus, sigmas):
            ll_new += p * mvn(mu, sigma).pdf(df.loc[:,0:3].to_numpy())

        ll_new = np.log(ll_new).sum()

        diff = np.abs(ll_new - ll_old)
        ll_old = ll_new

        # increment counter
        i += 1

    return diff, ll_new, probs, mus, sigmas, i, ws
Esempio n. 14
0
def EM_algo(x, pis, mus, sigmas, tol=0.0001, maxiter=100):
    # n denotes oberservation count, p denotes oberservation dimension
    n, p = x.shape
    # k denotes normal distrubutions count
    k = len(pis)
    # ll_old denotes old log likelyhood
    ll_old = 0
    iter_count = 0
    mu_vector = []
    mu_vector.append(mus)
    for i in xrange(maxiter):
        iter_count += 1
        # E-step
        ts = np.zeros((k, n))
        for j in xrange(k):
            for i in xrange(n):
                ts[j, i] = pis[j] * mvn(mus[j], sigmas[j]).pdf(x[i])
        ts /= ts.sum(0)

        # M-step
        # update pi
        pis = np.zeros(k)
        for j in xrange(k):
            for i in xrange(n):
                pis[j] += ts[j, i]
        pis /= n

        # update mu
        mus = np.zeros((k, p))
        for j in xrange(k):
            for i in xrange(n):
                mus[j] += ts[j, i] * x[i]
            mus[j] /= ts[j, :].sum()
        mu_vector.append(mus)
        # update sigma
        sigmas = np.zeros((k, p, p))
        for j in xrange(k):
            for i in xrange(n):
                ys = np.reshape(x[i] - mus[j], (2, 1))
                sigmas[j] += ts[j, i] * np.dot(ys, ys.T)
            sigmas[j] /= ts[j, :].sum()

        ll_new = 0.0
        for i in xrange(n):
            s = 0
            for j in xrange(k):
                s += pis[j] * mvn(mus[j], sigmas[j]).pdf(x[i])
            ll_new += np.log(s)

        if np.abs(ll_new - ll_old) < tol:
            break
        ll_old = ll_new
    mu_vector = np.array(mu_vector)
    # print iter_count, mus
    return iter_count, mu_vector, pis, mus, sigmas
Esempio n. 15
0
def gen_six_figures(n: int, m: int, V: float):
    """
    This function generates the following 6 figures:
        - Random point cloud in R3
        - circle
        - sphere
        - 3 clusters
        - 3 clusters each with 3 clusters
        - torus

    :param n: number of data points in each figure
    :param m: number of examples per figure
    :param V: Variance parameter for the error sampling
    :return: np.array with the data points dim: [V, m, n, 3
    """

    ## random circle centered at 0.5
    circ_points = mvn(mean=[0, 0]).rvs(size=(m, n))
    error = mvn(cov=np.diag([V**2, V**2])).rvs(size=(m, n))
    for i in range(m):
        norms_points = np.linalg.norm(circ_points[i, :, :], axis=1)
        circ_points[i, :, :] = circ_points[i, :, :] / (2 *
                                                       norms_points[:, None])
    circ_points = circ_points + error
    circ_points = circ_points + np.array([0.5, 0.5])

    ## random sphere centerred at [0.5, 0.5, 0.5]
    sphere_points = mvn(mean=[0, 0, 0]).rvs(size=(m, n))
    error = mvn(cov=np.diag([V**2, V**2, V**2])).rvs(size=(m, n))
    for i in range(m):
        norms_points = np.linalg.norm(sphere_points[i, :, :], axis=1)
        sphere_points[
            i, :, :] = sphere_points[i, :, :] / (2 * norms_points[:, None])
    sphere_points = sphere_points + error
    sphere_points = sphere_points + np.array([0.5, 0.5, 0.5])

    ## sample points for the Torus centered at 0.5, 0.5 0.5
    # R=0.5, r=0.25
    theta = np.random.uniform(0, 2 * np.pi, size=(m, n))
    phi = np.random.uniform(0, 2 * np.pi, size=(m, n))
    R, r = 0.5, 0.25
    x = (R + r * np.cos(theta)) * np.cos(phi)
    y = (R + r * np.cos(theta)) * np.sin(phi)
    z = r * np.sin(theta)
    error = mvn(cov=np.diag([V**2, V**2, V**2])).rvs(size=(m, n))
    torus_points = np.transpose(np.array([x, y, z]), (1, 2, 0)) + error
    torus_points = torus_points + np.array([0.5, 0.5, 0.5])

    toy_shapes = {
        "circle": circ_points,
        "sphere": sphere_points,
        "torus": torus_points
    }
    return toy_shapes
Esempio n. 16
0
def gdk(data: np.ndarray):

    # Returns array of gaussian kernel densities
    mus = data
    N = len(mus)

    lambdas = np.logspace(-6, 1, 21)  # Standard deviations
    # Performs leave-one-out cross-validation to find best lambdas
    ps = np.zeros_like(lambdas)
    for k, l in enumerate(lambdas):
        print("lambda: %.4f" % l)
        for i in range(N):
            s = np.zeros(N)
            for j in range(N):
                if i == j:
                    continue
                m = mvn(mean=mus[j],
                        cov=l**2 * np.diag(np.ones(data.shape[1])))
                s[j] = m.pdf(data[i])
            s = s.sum() / (N - 1)
            s = np.log(s)
            ps[k] += s
    ps /= N

    l = lambdas[np.argmax(ps)]
    print("Best lambda (std): %.4f" % l)

    print(ps)
    plt.semilogx(lambdas, ps, "ro-")
    plt.title("GDK Log Likelyhood")
    plt.xlabel(r"$\log_{10}(\lambda)$")
    plt.ylabel(r"$\log (P(X))$")
    plt.grid(True)
    plt.show()

    densities = np.zeros(N)
    for i in range(N):
        for j in range(N):
            s = np.zeros(N)
            if i == j:
                continue
            m = mvn(mean=mus[j], cov=l**2 * np.diag(np.ones(data.shape[1])))
            s[j] = m.pdf(data[i])
        s = s.sum() / (N - 1)
        densities[i] = s

    densities.sort()
    print(densities)
    plt.bar(np.linspace(1, N, N), densities)
    plt.semilogy()
    plt.title("Gaussian Kernel Density Estimation")
    plt.xlabel("Observation")
    plt.ylabel("GKD")
    plt.show()
def test_pdf(model,x,y,cov_matrix_0,cov_matrix_1):
    if model == 0:
        rv=mvn([5,5],cov_matrix_0)
        f=np.log(5)+rv.logpdf([x,y])
    if model == 1:
        rv=mvn([40,40],cov_matrix_1)
        f=rv.logpdf([x,y])
#    if model == 2:
#        rv=mvn([20,20],cov_matrix_1)
#        f=rv.logpdf([x,y])
    return f
Esempio n. 18
0
def create_xor_data(N):
    #np.random.seed(234)
    np.random.RandomState(0)
    C = 0.01*np.eye(2)
    Gs = [mvn(mean=[0.5,0.5], cov=C),
          mvn(mean=[-0.5,-0.5], cov=C),
          mvn(mean=[0.5,-0.5], cov=C),
          mvn(mean=[-0.5,0.5], cov=C)]
    X = np.concatenate([G.rvs(size=N) for G in Gs])
    y = np.concatenate((np.zeros(2*N), np.ones(2*N)))
    return X,y
Esempio n. 19
0
def Estep():
    #print("cov:",np.shape(cov),"mu:", np.shape(mu),"weights_pi:", np.shape(weight_pi))
    prob = np.zeros((clusters, size))
    for j in range(clusters):
        for i in range(size):
            #print(weight_pi[j])
            print(mvn(mu[j], cov[j]).pdf(points_arr[i]))
            prob[j, i] = weight_pi[j] * mvn(mu[j], cov[j]).pdf(points_arr[i])
    #print(prob)
    prob /= prob.sum(0)
    #print("Prob:",prob)
    return prob
Esempio n. 20
0
def em_gmm_orig(xs, pis, mus, sigmas, tol=0.01, max_iter=300):
    #    data, label = xs
    #    xs = np.asarray(data)
    n, p = xs.shape
    k = len(pis)

    ll_old = 0
    for l in range(max_iter):
        exp_A = []
        exp_B = []
        ll_new = 0

        # E-step
        ws = np.zeros((k, n))
        for j in range(len(mus)):
            for i in range(n):
                ws[j, i] = pis[j] * mvn(mus[j], sigmas[j]).pdf(xs[i])
        ws /= ws.sum(0)

        # M-step
        pis = np.zeros(k)
        for j in range(len(mus)):
            for i in range(n):
                pis[j] += ws[j, i]
        pis /= n

        mus = np.zeros((k, p))
        for j in range(k):
            for i in range(n):
                mus[j] += ws[j, i] * xs[i]
            mus[j] /= ws[j, :].sum()
        print(l, ' ', mus)
        sigmas = np.zeros((k, p, p))
        for j in range(k):
            for i in range(n):
                ys = np.reshape(xs[i] - mus[j], (2, 1))
                sigmas[j] += ws[j, i] * np.dot(ys, ys.T)
            sigmas[j] /= ws[j, :].sum()

        # update complete log likelihoood
        ll_new = 0.0
        for i in range(n):
            s = 0
            for j in range(k):
                s += pis[j] * mvn(mus[j], sigmas[j]).pdf(xs[i])
            ll_new += np.log(s)

        if np.abs(ll_new - ll_old) < tol:
            break
        ll_old = ll_new

    return ll_new, pis, mus, sigmas
Esempio n. 21
0
def EM_MOG(x,fi,mu,sigma,tol=0.01,max_iter = 100):
    #n:样本个数    d:维度   k: 高斯分布个数    
    n,d = x.shape
    k = len(fi)
    
    likelihood_old = 0
    iter = 0
    for i in range(max_iter):
        iter += 1
        likelihood_new = 0
        
        #E step:
        #k个高斯分布,n个样本,(j,i)第i个样本由第j个高斯分布生成的概率
        #w[j,i] = p(z(i) = j|x(i))  = p(x(i)|z(i)) * p(z(i))  / p(x(i))
        w = np.zeros((k,n))
        for j in range(k):
            for i in range(n):
                w[j,i] = fi[j] * mvn(mu[j],sigma[j]).pdf(x[i])
        w /= w.sum(0)
        
        
        #M step:
        fi = np.zeros(k)
        mu = np.zeros((k,d))
        sigma = np.zeros((k,d,d))
        for j in range(k):
            for i in range(n):
                fi[j] += w[j,i]
                mu[j] += w[j,i]*x[i]
            mu[j] /= w[j,:].sum()
        fi /= n
        
        for j in range(k):
            for i in range(n):
                ys = np.reshape(x[i] - mu[j],(2,1))
                sigma[j] += w[j,i] * np.dot(ys,ys.T)
            sigma[j] /= w[j,:].sum()
        
        #update log likelihood
        for i in range(n):
            s = 0
            for j in range(k):
                s += fi[j] * mvn(mu[j],sigma[j]).pdf(x[i])
            likelihood_new += np.log(s)
        
        if np.abs(likelihood_new - likelihood_old) < tol:
            break
        likelihood_old = likelihood_new
    
    print "iteration count:", iter    
    
    return likelihood_new,fi,mu,sigma      
Esempio n. 22
0
def em_gmm(data, means, covs, pi, tol, max_iter):
    ll_old = 0.0
    [num_points, dim] = gmm_data.shape

    for l in range(max_iter):

        ### E step #####
        #### This part is for evaluating the current responsibilities

        points_classes = np.zeros((K, num_points))
        for i in range(K):
            for j in range(num_points):
                #print(pi[i])
                #print(data[j][0])
                points_classes[i][j] = pi[i] * mvn(means[i], covs[i]).pdf(
                    data[j])
        #print(points_classes[0][500])
        points_classes /= points_classes.sum(0)
        #print(points_classes[0][100])
        #print(points_classes[0][0])

        #### M step ############
        pi = np.zeros(K)
        for i in range(K):
            for j in range(num_points):
                pi[i] = pi[i] + points_classes[i][j]
        pi /= num_points

        means = np.zeros((K, dim))
        for i in range(K):
            for j in range(num_points):
                means[i] = means[i] + points_classes[i][j] * data[j]
            means[i] /= points_classes[i][:].sum()
            #print(means)

        covs = np.zeros((K, dim, dim))
        for i in range(K):
            for j in range(num_points):
                ys = np.reshape(data[j] - means[i], (2, 1))
                covs[i] = covs[i] + points_classes[i][j] * np.dot(ys, ys.T)
            covs[i] /= points_classes[i][:].sum()

        ll_new = 0.0
        for i in range(num_points):
            s = 0
            for j in range(K):
                s = s + pi[j] * mvn(means[j], covs[j]).pdf(data[i][0])
            ll_new = ll_new + np.log(s)
        if np.abs(ll_new - ll_old) < tol:
            return pi, means, covs, ll_new
        ll_old = ll_new
    return pi, means, covs, ll_new
Esempio n. 23
0
    def compute_messages(self, x, obs_slice=None):
        N = x.shape[0]
        alpha = np.zeros((N, self.K))
        beta = np.zeros((N, self.K))
        gamma = np.zeros((N, self.K))
        zeta = np.zeros((N, self.K, self.K))
        B = np.zeros((N, self.K))
        c = np.zeros(N)

        #compute emission probabilities
        if obs_slice is not None:
            means_slice, covariances_slice = self.get_marginal(obs_slice)
            for k in range(self.K):
                B[:, k] = mvn(mean=means_slice[k],
                              cov=covariances_slice[k]).pdf(x)
        else:
            for k in range(self.K):
                B[:, k] = mvn(mean=self.means_[k],
                              cov=self.covariances_[k]).pdf(x)

        #compute alpha
        alpha[0, :] = self.weights_ * B[0, :]
        c[0] = 1. / (np.sum(alpha[0]) + realmin)
        alpha[0] *= c[0]
        for n in range(1, N):
            alpha[n, :] = B[n, :] * np.dot(alpha[n - 1, :], self.Trans_)
            c[n] = 1. / (np.sum(alpha[n]) + realmin)
            alpha[n] *= c[n]

        #compute beta
        beta[-1, :] = np.ones(self.K) * c[-1]
        for n in range(N - 1, 0, -1):
            beta[n - 1, :] = np.dot(beta[n, :] * B[n, :], self.Trans_.T)
            beta[n - 1, :] = (beta[n - 1, :] * c[n - 1])
            for i in range(len(beta[n - 1, :])):
                beta[n - 1, i] = np.min([beta[n - 1, i], realmax])

        #compute likelihood
        L = -np.sum(np.log(c))

        #compute gamma
        for n in range(N):
            gamma[n, :] = alpha[n, :] * beta[n, :] / (
                np.sum(alpha[n, :] * beta[n, :]) + realmin)

        #compute zeta
        for n in range(N - 1):
            for k in range(self.K):
                zeta[n][k, :] = alpha[n, k] * B[n + 1, :] * beta[
                    n + 1, :] * self.Trans_[k, :]

        return B, alpha, beta, gamma, zeta, L, c
Esempio n. 24
0
def em_gmm_orig(xs, pis, mus, sigmas, tol=0.01, max_iter=100):

    n, p = xs.shape
    k = len(pis)

    ll_old = 0
    for i in range(max_iter):
        exp_A = []
        exp_B = []
        ll_new = 0

        # E-step
        ws = np.zeros((k, n))
        for j in range(len(mus)):
            for i in range(n):
                ws[j, i] = pis[j] * mvn(mus[j], sigmas[j]).pdf(xs[i])
        ws /= ws.sum(0)

        # M-step
        pis = np.zeros(k)
        for j in range(len(mus)):
            for i in range(n):
                pis[j] += ws[j, i]
        pis /= n

        mus = np.zeros((k, p))
        for j in range(k):
            for i in range(n):
                mus[j] += ws[j, i] * xs[i]
            mus[j] /= ws[j, :].sum()

        sigmas = np.zeros((k, p, p))
        for j in range(k):
            for i in range(n):
                ys = np.reshape(xs[i]- mus[j], (2,1))
                sigmas[j] += ws[j, i] * np.dot(ys, ys.T)
            sigmas[j] /= ws[j,:].sum()

        # update complete log likelihoood
        ll_new = 0.0
        for i in range(n):
            s = 0
            for j in range(k):
                s += pis[j] * mvn(mus[j], sigmas[j]).pdf(xs[i])
            ll_new += np.log(s)

        if np.abs(ll_new - ll_old) < tol:
            break
        ll_old = ll_new

    return ll_new,ws, pis, mus, sigmas
def create_data(N):
    np.random.seed(234)
    # np.random.RandomState(0)
    C = 0.05*np.eye(2)
    Gs = [mvn(mean=[0.5, 0.5], cov=C),
          mvn(mean=[-0.5, -0.5], cov=C),
          mvn(mean=[0.5, -0.5], cov=C),
          mvn(mean=[-0.5, 0.5], cov=C),
          mvn(mean=[0, 0], cov=C)]
    X = np.concatenate([G.rvs(size=N) for G in Gs])
    y = np.concatenate((1*np.ones(N), 1*np.ones(N),
                        2*np.ones(N), 2*np.ones(N),
                        3*np.ones(N)))
    return X, y
Esempio n. 26
0
 def create_cluster(centers, n=n, v=0.05):
     a = max(np.random.choice(np.arange(0, int(0.45 * n))), int(0.1 * n))
     b = max(np.random.choice(np.arange(0, int(0.45 * (n - a)))),
             int(0.1 * (n - a)))
     c = n - a - b
     cluster_1 = mvn(mean=centers[0, :], cov=np.diag([v**2, v**2,
                                                      v**2])).rvs(size=a)
     cluster_2 = mvn(mean=centers[1, :], cov=np.diag([v**2, v**2,
                                                      v**2])).rvs(size=b)
     cluster_3 = mvn(mean=centers[2, :], cov=np.diag([v**2, v**2,
                                                      v**2])).rvs(size=c)
     clusters = np.vstack((cluster_1, cluster_2, cluster_3))
     np.random.shuffle(clusters)
     return clusters
Esempio n. 27
0
def em_gmm(xs, pis, mus, sigmas, tol=0.01, max_iter=100):

    n, p = xs.shape
    k = len(pis)

    ll_old = 0
    for idx in range(1, max_iter):
        # E-step
        print("第%d次EM算法迭代" % idx)
        ws = np.zeros((k, n))
        for j in range(k):
            for i in range(n):
                density = pis[j] * mvn(mus[j], sigmas[j]).pdf(xs[i])
                ws[j, i] = density
        ws /= ws.sum(0)

        # M-step
        pis = np.zeros(k)
        for j in range(k):
            for i in range(n):
                pis[j] += ws[j, i]
        pis /= n

        mus = np.zeros((k, p))
        for j in range(k):
            for i in range(n):
                mus[j] += ws[j, i] * xs[i]
            mus[j] /= ws[j, :].sum()

        sigmas = np.zeros((k, p, p))
        for j in range(k):
            for i in range(n):
                ys = np.reshape(xs[i] - mus[j], (4, 1))
                sigmas[j] += ws[j, i] * np.dot(ys, ys.T)
            sigmas[j] /= ws[j, :].sum()

        # update complete log likelihoood
        ll_new = 0.0
        for i in range(n):
            s = 0
            for j in range(k):
                s += pis[j] * mvn(mus[j], sigmas[j]).pdf(xs[i])
            ll_new += np.log(s)

        if np.abs(ll_new - ll_old) < tol:
            break
        ll_old = ll_new

    return ws, pis, mus, sigmas
Esempio n. 28
0
    def em_gmm_orig(self):

        n, p = self.xs.shape
        k = 2

        ll_old = 0
        for i in range(self.max_iter):

            # E-step
            ws = np.zeros((k, n))
            for j in range(len(mus)):
                for i in range(n):
                    ws[j, i] = pis[j] * mvn(mus[j], sigmas[j]).pdf(xs[i])
            ws /= ws.sum(0)

            # M-step
            pis = np.zeros(k)
            for j in range(len(mus)):
                for i in range(n):
                    pis[j] += ws[j, i]
            pis /= n

            mus = np.zeros((k, p))
            for j in range(k):
                for i in range(n):
                    mus[j] += ws[j, i] * self.xs[i]
                mus[j] /= ws[j, :].sum()

            sigmas = np.zeros((k, p, p))
            for j in range(k):
                for i in range(n):
                    ys = np.reshape(self.xs[i] - mus[j], (2, 1))
                    sigmas[j] += ws[j, i] * np.dot(ys, ys.T)
                sigmas[j] /= ws[j, :].sum()

            # update complete log likelihoood
            ll_new = 0.0
            for i in range(n):
                s = 0
                for j in range(k):
                    s += pis[j] * mvn(mus[j], sigmas[j]).pdf(self.xs[i])
                ll_new += np.log(s)

            if np.abs(ll_new - ll_old) < self.tol:
                break
            ll_old = ll_new

        return ll_new, pis, mus, sigmas
Esempio n. 29
0
    def __init__(self, mu, sigma, tilted=True, min_birth=0, fastQ=False):
        '''
        Parameters
        ----------
        mu : array-like, shape = (2,). 
             Mean.
        sigma : float.
                Diagonal value of covariance matrix.
        tilted : TYPE, optional
                 Boolean to specify whether to use tilted coordinates. The default is True.
        min_birth : TYPE, optional
                    float that specifies minimum allowable birth time, e.g, this value
                    should be set to 0 when used with diagrams created from Rips filtrations. 
                    The default is 0.
        fastQ: bool, optional
               Boolean to specify whether to approximate normalizing constant Q
               with 1. The default is False.
               
        Returns
        -------
        None.
        '''

        self.mu = mu
        self.sigma = sigma
        self.tilted = tilted
        self.min_birth = min_birth
        self.fastQ = fastQ

        mean = np.array(mu)
        covariance = (sigma**2) * np.eye(2)
        self.dist = mvn(mean=mean, cov=covariance)

        self._compute_normalizing_constant()
Esempio n. 30
0
def computing_errorbars(regr, dataset_errors, train_test_sets):
    """INPUTS: regr = random forest regression model
            dataset_errors = pandas dataframe with each feature
                             and their associated uncertainties
            train_test_sets = pandas dataframes with exoplanets
                              and features names
                              as well as the values

    OUTPUTS: radii_test_output_error = error on the predicted
                                       radius for the Test set
             radii_test_input_error = original uncertainty
                                      on the radius measurements"""



    # Original train and test sets
    X_train, X_test, y_train, y_test = train_test_sets

    # Cross matching the Test set with the dataset with errors
    # to compute error bars for the exoplanets which have input errors
    dataset_errors = dataset_errors.loc[X_test.index.values.tolist()]
    # Remove an exoplanet in case there is still a NaN
    # in one of the feature
    dataset_errors = dataset_errors.dropna(axis=0, how='any')

    # Matrix with all the errors on the different features
    features_errors = dataset_errors.iloc[:, :-2].values
    # Radius vector
    radii_test = dataset_errors.iloc[:, -2].values
    # Error on the radius vector
    radii_test_input_error = dataset_errors.iloc[:, -1].values

    # Empty vector to store the error bars
    radii_test_output_error = np.zeros_like(radii_test_input_error)
    for i in range(radii_test.size):
        # print(i)
        # from each line in X_train generate new values for all parameters
        # with a multivariate gaussian which has
        # a vector of mean with the value columns and std with the error column
        # mean_values_0 = features_errors[i,0:-1:2]
        # >> takes the features : [mass0, temp_eq0, ...]
        # std_errors_0 = features_errors[0,1:-1:2]
        # >> takes the errors : [mass_err0, temp_eq_err0, ...]

        rerr = regr.predict(mvn(features_errors[i, ::2],
                                np.diag(features_errors[i, 1::2]),
                                allow_singular=True).rvs(1000)).std()
        radii_test_output_error[i] = rerr
        # print(radii_test_output_error[i], radii_test_input_error[i])

    # Save the errorbars in a txt file
    outdir = 'bem_output'
    if not os.path.exists(outdir):
        os.mkdir(outdir)

    filename = 'bem_output/test_radius_RF_errorbars.dat'
    print('Error bars of the test set are saved in: ', filename)
    np.savetxt(filename, radii_test_output_error)

    return radii_test_output_error, radii_test_input_error
Esempio n. 31
0
def e_step(x, params):
    n = len(params['phi'])
    dist = [mvn(params["mu"][i], params["sigma"][i]).pdf(x) for i in range(n)]
    log_p_y_x = np.log(params['phi'])[np.newaxis, ...] + \
                np.log(dist).T
    log_p_y_x_norm = logsumexp(log_p_y_x, axis=1)
    return log_p_y_x_norm, np.exp(log_p_y_x - log_p_y_x_norm[..., np.newaxis])
Esempio n. 32
0
    def sample_obs(self, n_samples):
        self.observations = []
        pts = np.zeros((n_samples, self.d))
        simplices = self.cmplx.simplices.values()
        p_simplex = [s.area() for s in simplices]
        total_area = np.sum(p_simplex)
        p_simplex =[p/total_area for p in p_simplex]
        chosen_simplices = []
        for i in range(n_samples):
            s = np.random.choice(simplices, p=p_simplex)
            chosen_simplices.append(s)
            lmbda = np.random.rand()
            pt_src = lmbda * s.vertices[0].v + (1-lmbda) * s.vertices[1].v
            # ## compute normal direction
            normal = s.vertices[0].v - s.vertices[1].v
            normal[0], normal[1] = normal[1], normal[0]
            normal = normal / np.linalg.norm(normal)
            normal[1] *= -1
            delta = self.obs_dist.rvs()
            pt = delta*normal + pt_src
            # print pt_src, pt, delta, normal
            pts[i, :] = pt

        C = np.eye(n_samples) * self.obs_sigma
        for i in range(n_samples):
            for j in range(n_samples):
                C[i, j] += tps_kernel(pts[i], pts[j])
        pts_obs = mvn(np.zeros(n_samples), C).rvs(size=2).T

        for i in range(n_samples):
            self.observations.append(Obs(pts_obs[i], self.cmplx, pts[i], s_source=chosen_simplices[i]))
        return pts, pts_obs, chosen_simplices
Esempio n. 33
0
    def update_prior(self, x, y):
        # Use posterior as new prior
        self.prior = self.posterior

        # Create design matrix
        phi = self.get_phi(x, self.base)

        # Update mean and covariance
        if self.iter == 0:
            new_precision = self.alpha * phi.T * phi + self.beta * np.identity(
                self.base)
            self.cov = matrix(pinv(new_precision))
            self.mean = self.cov * self.alpha * phi.T * y
        else:
            new_precision = self.alpha * phi.T * phi + pinv(self.prior.cov)
            self.cov = matrix(pinv(new_precision))
            self.mean = self.cov * (self.alpha * phi.T * y +
                                    pinv(self.prior.cov) * self.mean)

        new_mean = self.mean.reshape(phi.shape)
        new_mean = array(new_mean)

        self.posterior = mvn(new_mean[0], self.cov)
        self.iter += 1
        self.data[0].append(x)
        self.data[1].append(y)
Esempio n. 34
0
    def condition(self, x_in, dim_in, dim_out, h=None, return_gmm=False):
        mu_in, sigma_in = self.get_marginal(dim_in)
        mu_out, sigma_out = self.get_marginal(dim_out)
        _, sigma_in_out = self.get_marginal(dim=dim_in, dim_out=dim_out)

        if h is None:
            h = np.zeros(self.K)
            for k in range(self.K):
                h[k] = self.weights_[k] * mvn(mean=mu_in[k],
                                              cov=sigma_in[k]).pdf(x_in)
            h = h / np.sum(h)

        #compute mu and sigma
        mu = []
        sigma = []
        for k in range(self.K):
            mu += [
                mu_out[k] +
                np.dot(sigma_in_out[k].T,
                       np.dot(np.linalg.inv(sigma_in[k]),
                              (x_in - mu_in[k]).T)).flatten()
            ]
            sigma += [
                sigma_out[k] -
                np.dot(sigma_in_out[k].T,
                       np.dot(np.linalg.inv(sigma_in[k]), sigma_in_out[k]))
            ]

        mu, sigma = (np.asarray(mu), np.asarray(sigma))
        if return_gmm:
            return h, mu, sigma
        else:
            return self.moment_matching(h, mu, sigma)
Esempio n. 35
0
def E_step(X, m, n, weight, means, cov):
    resp = np.zeros((m, n))
    for j in range(m):
        for i in range(n):
            resp[j, i] = weight[j] * mvn(means[j], cov[j]).pdf(X[i])
    resp /= resp.sum(0)
    return resp
Esempio n. 36
0
def EM(x):
    global n,d,k,iter,pi_em,sigma_em,miu_em,likelihood_new
    numda=0.01
    likelihood_odd=0
    for ite in range(iter):
        #E_step:
        w=np.zeros((k,n))
        for i in range(k):
            for j in range(n):
                w[i,j]=pi_em[i]*mvn(miu_em[i],sigma_em[i]).pdf(x[j])
        w/=w.sum(0)

        #M_step
        pi_em=np.zeros(k)
        miu_em=np.zeros((k,d))
        sigma_em=np.zeros((k,d,d))
        for i in range(k):
            for j in range(n):
                pi_em[i]+=w[i,j]
                miu_em[i]+=w[i,j]*x[j]
            miu_em[i]/=pi_em[i]
        pi_em/=n

        for i in range(k):
            for j in range(n):
                tmp=np.reshape(x[j]-miu_em[i],(2,1))
                sigma_em[i]+=w[i,j]*np.dot(tmp,tmp.T)
            sigma_em[i]/=w[i,:].sum()

        likelihood_new=0
        for i in range(n):
            s=0
            for j in range(k):
                s+=pi_em[j]*mvn(miu_em[j],sigma_em[j]).pdf(x[i])
            likelihood_new+=np.log(s)

        if np.abs(likelihood_new-likelihood_odd) < numda:
            break
        likelihood_odd=likelihood_new
        print ite
Esempio n. 37
0
 def fit (self, X, mu, sigma, pi, max_iter=100, tolerance=0.01):  
     # Dimensions
     n, p = X.shape
     k = self.k
     
     # Keep track of log likelihood for convergence purposes
     log_likelihood_old, log_likelihood_new = 0, 0
     
     for i in range(max_iter):            
         # E-Step
         resp = np.zeros((k, n))
         for mode in range(k):
             resp[mode] = pi[mode] * mvn(mu[mode], sigma[mode]).pdf(X)
         resp /= resp.sum(0)
         
         # M-Step
         pi = resp.sum(axis=1) / n
         mu = np.asarray([np.dot(r,X) / r.sum() for r in resp])
         
         # Sigma implementation adapted from Ref.8
         sigma = np.zeros((k, p, p))
         for j in range(k):
             Y = X - mu[j, :]
             sigma[j] = (resp[j,:,None,None] * mm(Y[:,:,None], Y[:,None,:])).sum(axis=0)
         sigma /= resp.sum(axis=1)[:,None,None]
                     
         # Track trajectory of means against iteration
         self.trajectory.append(mu)
         
         # Update log likelihood and check for convergence
         log_likelihood_new =  np.sum([P * mvn(M, S).pdf(X) for P,M,S in zip(pi, mu, sigma)], axis=0)
         log_likelihood_new = np.log(log_likelihood_new).sum()
         if np.abs(log_likelihood_new - log_likelihood_old) < tolerance:
             break
         
         # Otherwiae, keep updated value for next iteration
         log_likelihood_old = log_likelihood_new
         
     return [mu, sigma, pi]
Esempio n. 38
0
    def __init__(self, obs_pts=None, cmplx=None, 
                 gamma=.9, lmbda=.2, use_gp=True,
                 obs_sigma=OBS_SIGMA, propose_sigma=.0005, birth_sigma=.1,
                 d=2, obs=None, N=None, P=None, n_clusters_init=5):
        """
        gamma: geometric variable for prior on number of simplices
        sigma_sq: variance of 
        d: dimension of embedding space
        """

        assert not (obs_pts is None and cmplx is None)

        self.gamma = gamma
        self.N_prior = geom(gamma)

        self.d = d
        self.lmbda = lmbda
        self.len_prior = expon(self.lmbda)

        self.propose_mvn = mvn(np.zeros(self.d), propose_sigma*np.eye(self.d))
        self.obs_sigma=obs_sigma
        self.obs_dist = norm(loc=0, scale=obs_sigma)

        self.birth_proposal = norm(loc=0, scale=birth_sigma)

        self.use_gp = use_gp

        self.cmplx = cmplx
        if self.cmplx is None:
            # obs_pts is not None
            self.cmplx = SimplicialComplex()
            ## this is a 1d complex
            self.cmplx.initialize(obs_pts, 1, n_clusters=n_clusters_init)

        self.N = self.cmplx.simplex_count()

        if obs_pts is None:
#            self.sample_obs(self.N * 10)
            self.sample_obs(self.N * 100)
        else:
            self.observations = []
            for pt in obs_pts:
                self.observations.append(Obs(pt, self.cmplx))
Esempio n. 39
0
# initial guesses for parameters
pis = np.random.random(2)
pis /= pis.sum()
mus = np.random.random((2,2))
sigmas = np.array([np.eye(2)] * 2)

ll1, ws, pis1, mus1, sigmas1 = em_gmm_orig(xs, pis, mus, sigmas)

intervals = 101
ys = np.linspace(-8,8,intervals)
X, Y = np.meshgrid(ys, ys)
_ys = np.vstack([X.ravel(), Y.ravel()]).T

z = np.zeros(len(_ys))
for pi, mu, sigma in zip(pis1, mus1, sigmas1):
    z += pi*mvn(mu, sigma).pdf(_ys)
z = z.reshape((intervals, intervals))

ax = plt.subplot(111)
colors = ['' for x in range(len(xs))]
for i in range(len(xs)):
    if ws[0,i] < ws[1,i]:
        colors[i] = 'r'
    else:
        colors[i] = 'g'
plt.scatter(xs[:,0], xs[:,1], c = colors, alpha=0.2)
degree = np.arange(0,2*np.pi,2*pi/1000)
x1 = 3.0414 * np.cos(degree)
y1 = 3.0414 * np.sin(degree)+4
x2 = 2.2361 * np.cos(degree)-2
y2 = 2.2361 * np.sin(degree)
Esempio n. 40
0
def _multiway_refine(I, Response, Label, Background=1e-4, Smoothness=1e-4):

    # initialize output image
    Refined = Label.copy()

    # initialize cell count
    Total = 0

    # identify connected components
    Components, N = ms.label(Label > 0)

    # get locations of connected components
    Locations = ms.find_objects(Components)

    # process each connected component containing possibly multiple nuclei
    for i in np.arange(1, N+1):

        # extract label image and component mask
        Component = Label[Locations[i-1]].copy()
        ComponentMask = Components[Locations[i-1]] == i

        # zero out labels not in component
        Component[~ComponentMask] = 0

        # condense label image
        Component = lb.CondenseLabel(Component)

        # determine if more than one label value exists in Component
        if(Component.max() > 1):

            # generate region adjacency graph
            Adjacency = sg.LabelRegionAdjacency(Component, Neighbors=4)

            # layer region adjancency graph
            Adjacency = sg.RegionAdjacencyLayer(Adjacency)

            # generate region adjacencey graph
            RAG = sg.GraphColorSequential(Adjacency)

            # generate bounding box patch for graph cutting problem
            D = np.zeros((Component.shape[0], Component.shape[1],
                          np.max(RAG)+1), dtype=np.float)
            X, Y = np.meshgrid(range(0, Component.shape[1]),
                               range(0, Component.shape[0]))
            Pos = np.empty(X.shape + (2,))
            Pos[:, :, 1] = X
            Pos[:, :, 0] = Y

            # initialize list of channels containing no non-degenerate objects
            Delete = []

            # for each color in rag
            for j in np.arange(1, np.max(RAG)+1):

                # get indices of cells to model in color 'j'
                Indices = np.nonzero(RAG == j)[0]

                # initialize count of component objects that were modeled
                Count = 0

                # for each nucleus in color
                for k in Indices:

                    # define x, y coordinates of nucleus
                    cY, cX = np.nonzero(Component == k+1)

                    # model each nucleus with gaussian and add to component 'j'
                    Mean, Cov = _gaussian_model(Response[Locations[i-1]],
                                                cX, cY)
                    try:
                        Model = mvn(Mean.flatten(), Cov.squeeze())
                    except:
                        continue

                    # increment counter
                    Count += 1

                    # add multivariate normal to channel 'j' of D
                    D[:, :, j] = np.maximum(D[:, :, j], Model.pdf(Pos))

                # add channel j to delete list of no objects were added
                if(Count == 0):
                    Delete.append(j)

            # add background probability
            D[:, :, 0] = Background

            # keep channels containing non-degenerate objects
            if len(Delete):
                Temp = np.zeros((Component.shape[0], Component.shape[1],
                                 D.shape[2]-len(Delete)), dtype=np.float)
                Channel = 0
                for j in np.arange(D.shape[2]):
                    if j not in Delete:
                        Temp[:, :, Channel] = D[:, :, j]
                        Channel += 1
                D = Temp

            # component contains non-degenerate objects - cut
            if(D.shape[2] > 1):

                # score probabilities
                for j in np.arange(D.shape[2]):
                    D[:, :, j] = -np.log(D[:, :, j] + np.finfo(np.float).eps)

                # formulate image-based gradient costs
                Patch = I[Locations[i-1]].astype(np.float)
                Horizontal = np.exp(-np.abs(Patch[:, 0:-1] - Patch[:, 1:]))
                Vertical = np.exp(-np.abs(Patch[0:-1, :] - Patch[1:, :]))

                # formulate label cost
                V = 1 - np.identity(D.shape[2])
                V = Smoothness * V

                # cut the graph and reshape the output
                Cut = gc.cut_grid_graph(D, V, Vertical, Horizontal,
                                        n_iter=-1, algorithm='swap')
                Cut = Cut.reshape(Component.shape[0],
                                  Component.shape[1]).astype(np.uint32)

                # split the labels that were grouped during graph coloring
                Cut = lb.SplitLabel(Cut)

                # capture number of objects in cut result
                Max = Cut.max()

                # update the values in the cut
                Cut[Cut > 0] = Cut[Cut > 0] + Total

                # embed the resulting cut into the output label image
                Refined[Components == i] = Cut[ComponentMask]

                # update object count
                Total = Total + Max

            else:  # component is degenerate - contains no viable objects

                Refined[Components == i] = 0

        else:  # single object component - no refinement necessary

            # increment object count
            Total += 1

    return Refined
Esempio n. 41
0
mean = np.array([0.,0.])

covs = np.array(range(0,5)) /5.
covs2 = np.array(range(1,5))[::-1] /-5.
covs = np.concatenate([covs2, covs])

# covs = [.8]

for i in range(len(covs)):

    ax1 = plt.subplot2grid((1,9), (0,i))#, colspan=3)

    print (covs[i])
    
    cov = np.array([[1.,covs[i]],[covs[i],1.]])
    rv = mvn(mean, cov)


    # func = lambda x: np.exp(log_normal_pdf(x,cov=covs[i]))

    # plot_isocontours(ax1, func, xlimits=[-6, 6], ylimits=[-6, 6], numticks=101, cmap=None)
    # plot_isocontours(ax1, rv.pdf, numticks=101, cmap='Blues')
    plot_isocontours(ax1, rv.pdf, numticks=101, cmap=None)

    ax1.set_title(str(covs[i]))




plt.show()