def vonMisesFisher_pdf_log(X, theta_k, Cs_log = None):
    
    mu = theta_k[0]
    kappa = theta_k[1]
    
    ######## Make sure the parameters have proper dimensions #######
    mu = np.array(mu)
    mu = mu.flatten().reshape(mu.size,1)
    
    X = np.array(X)
    X = X.reshape(mu.size,X.size/mu.size)
    
    D = mu.size    # Dimensions of the distribution
    N = X.shape[1] # Number of samples to compute the pdf
    
    if (0):
        # For the combination of Gaussian and Watson, we need to do the preprocessing here
        X = gf.remove_module (X.T).T
        
    D,N = X.shape
    X = X.T
    
    kappa = theta_k[1]
    mu = theta_k[0]
    if (type(Cs_log) == type(None)):
        Cs_log = get_cp_log(D,kappa)  
    log_pdf = Cs_log + np.dot(X,mu)*kappa
    
#    if (log_pdf.size == 1): # Turn it into a single number if appropiate
#        log_pdf = float(log_pdf)
        
    return log_pdf
def vonMisesFisher_pdf_log(X, theta_k, Cs_log=None):

    mu = theta_k[0]
    kappa = theta_k[1]

    ######## Make sure the parameters have proper dimensions #######
    mu = np.array(mu)
    mu = mu.flatten().reshape(mu.size, 1)

    X = np.array(X)
    X = X.reshape(mu.size, X.size / mu.size)

    D = mu.size  # Dimensions of the distribution
    N = X.shape[1]  # Number of samples to compute the pdf

    if (0):
        # For the combination of Gaussian and Watson, we need to do the preprocessing here
        X = gf.remove_module(X.T).T

    D, N = X.shape
    X = X.T

    kappa = theta_k[1]
    mu = theta_k[0]
    if (type(Cs_log) == type(None)):
        Cs_log = get_cp_log(D, kappa)
    log_pdf = Cs_log + np.dot(X, mu) * kappa

    #    if (log_pdf.size == 1): # Turn it into a single number if appropiate
    #        log_pdf = float(log_pdf)

    return log_pdf
Esempio n. 3
0
def Watson_K_pdf_log(X, theta, Cs_log=None, parameters=None):
    # Extension of Watson_pdf_log in which we also accept several clusters
    # We have to be more restrict in this case and the parameters must be:
    # X (D,Nsamples)  mu(D,K) kappa(K) cp_log(K)
    # The result is (Nsamples, K)
    D, N = X.shape
    K = len(theta)

    ## TODO: better manage this
    if (1):
        # For the combination of Gaussian and Watson, we need to do the preprocessing here
        X = gf.remove_module(X.T).T
#    print K
#    print "theta_len", len(theta)
#    print "theta0", theta[0][0].shape, theta[0][1].shape
########### Obtain parameters from theta list ############
    mus = []
    kappas = []
    for theta_k in theta:
        mus.append(theta_k[0])
        kappas.append(theta_k[1])

    mus = np.concatenate(mus, axis=1)
    kappas = np.array(kappas).reshape(K, 1)

    #    print "kappas", kappas.shape

    ## Obtain the coefficients if needed
    if (type(Cs_log) == type(None)):
        Cs_log = []
        for k in range(K):
            cp_log_k = get_cp_log(D, kappas[k, 0])
            # If for any of the clusters we cannot compute the normalization constant
            # then we just indicate it
            if (type(cp_log_k) == type(None)):
                return None
            Cs_log.append(cp_log_k)


#    print "Cs_log", Cs_log
## Perform the computation for several clusters !!
    kappas = np.array(kappas)
    kappas = kappas.reshape(kappas.size, 1)

    Cs_log = np.array(Cs_log)
    Cs_log = Cs_log.reshape(Cs_log.size, 1)

    aux1 = np.dot(mus.T, X)
    log_pdf = Cs_log + (kappas * np.power(aux1, 2))
    return log_pdf.T
Esempio n. 4
0
def Watson_K_pdf_log (X, theta, Cs_log = None, parameters = None):
    # Extension of Watson_pdf_log in which we also accept several clusters
    # We have to be more restrict in this case and the parameters must be:
    # X (D,Nsamples)  mu(D,K) kappa(K) cp_log(K)
    # The result is (Nsamples, K)
    D, N = X.shape
    K = len(theta)
    
    ## TODO: better manage this
    if (1):
        # For the combination of Gaussian and Watson, we need to do the preprocessing here
        X = gf.remove_module (X.T).T
#    print K
#    print "theta_len", len(theta)
#    print "theta0", theta[0][0].shape, theta[0][1].shape    
    ########### Obtain parameters from theta list ############
    mus = []
    kappas = []
    for theta_k in theta:
        mus.append(theta_k[0])
        kappas.append(theta_k[1])
    
    mus = np.concatenate(mus, axis = 1)
    kappas = np.array(kappas).reshape(K,1)
    
#    print "kappas", kappas.shape
    
    ## Obtain the coefficients if needed
    if(type(Cs_log) == type(None)):
        Cs_log = []
        for k in range(K):
            cp_log_k = get_cp_log(D, kappas[k,0])
            # If for any of the clusters we cannot compute the normalization constant
            # then we just indicate it
            if (type(cp_log_k) == type(None)):
                return None
            Cs_log.append(cp_log_k)
        
#    print "Cs_log", Cs_log
    ## Perform the computation for several clusters !!
    kappas = np.array(kappas)
    kappas = kappas.reshape(kappas.size,1)
    
    Cs_log = np.array(Cs_log)
    Cs_log = Cs_log.reshape(Cs_log.size,1)
    
    aux1 = np.dot(mus.T, X)
    log_pdf = Cs_log + (kappas * np.power(aux1,2))
    return log_pdf.T
Esempio n. 5
0
def Watson_pdf_log (X, theta, C_log = None):
    # Compute this in case that the probability is too high or low for just one sample
    # cp is ok, we can calculate normaly and then put it log
    # cp goes to very high if low dimensions and high kappa
    
    # If we indicate cp_log  we do not compute it.
    
    # Watson pdf for a 
    # mu: [mu0 mu1 mu...] D weights of the main direction of the distribution
    # kappa: Dispersion value
    # X: Vectors we want to know the probability for
    
    # Just make sure the matrixes are aligned
    ## TODO: better manage this
    ########### Obtain parameters from theta list ############
    mu = theta[0]
    kappa = theta[1]
    
    ######## Make sure the parameters have proper dimensions #######
    mu = np.array(mu)
    mu = mu.flatten().reshape(mu.size,1)
    
    X = np.array(X)
    X = X.reshape(mu.size,X.size/mu.size)
    
    D = mu.size    # Dimensions of the distribution
    N = X.shape[1] # Number of samples to compute the pdf
    
    if (0):
        # For the combination of Gaussian and Watson, we need to do the preprocessing here
        X = gf.remove_module (X.T).T
    ######### Check if we can save the computation of norm constant #######
    if (type(C_log) == type(None)):
        C_log = get_cp_log(D, kappa) # Function compute normalization const
    
    ###### Compute the probability ###############
    aux1 = np.dot(mu.T, X)
    log_pdf = C_log + (kappa * np.power(aux1,2))
    
    if (log_pdf.size == 1): # Turn it into a single number if appropiate
        log_pdf = float(log_pdf)
        
    return log_pdf
Esempio n. 6
0
def Watson_pdf_log(X, theta, C_log=None):
    # Compute this in case that the probability is too high or low for just one sample
    # cp is ok, we can calculate normaly and then put it log
    # cp goes to very high if low dimensions and high kappa

    # If we indicate cp_log  we do not compute it.

    # Watson pdf for a
    # mu: [mu0 mu1 mu...] D weights of the main direction of the distribution
    # kappa: Dispersion value
    # X: Vectors we want to know the probability for

    # Just make sure the matrixes are aligned
    ## TODO: better manage this
    ########### Obtain parameters from theta list ############
    mu = theta[0]
    kappa = theta[1]

    ######## Make sure the parameters have proper dimensions #######
    mu = np.array(mu)
    mu = mu.flatten().reshape(mu.size, 1)

    X = np.array(X)
    X = X.reshape(mu.size, X.size / mu.size)

    D = mu.size  # Dimensions of the distribution
    N = X.shape[1]  # Number of samples to compute the pdf

    if (0):
        # For the combination of Gaussian and Watson, we need to do the preprocessing here
        X = gf.remove_module(X.T).T
    ######### Check if we can save the computation of norm constant #######
    if (type(C_log) == type(None)):
        C_log = get_cp_log(D, kappa)  # Function compute normalization const

    ###### Compute the probability ###############
    aux1 = np.dot(mu.T, X)
    log_pdf = C_log + (kappa * np.power(aux1, 2))

    if (log_pdf.size == 1):  # Turn it into a single number if appropiate
        log_pdf = float(log_pdf)

    return log_pdf
def vonMisesFisher_K_pdf_log (X, theta, Cs_log = None, parameters = None):
    # Extension of Watson_pdf_log in which we also accept several clusters
    # We have to be more restrict in this case and the parameters must be:
    # X (D,Nsamples)  mu(D,K) kappa(K) cp_log(K)
    # The result is (Nsamples, K)
    D, N = X.shape
    K = len(theta)

    if (1):
        # For the combination of Gaussian and Watson, we need to do the preprocessing here
        X = gf.remove_module (X.T).T
        
    if (type(Cs_log) == type(None)):
        Cs_log = [None]*K
        
    log_pdfs = []
    for k in range(K):
        # TODO: Play with norm constant
        log_pdf_i = vonMisesFisher_pdf_log(X, theta[k],  Cs_log = Cs_log[k])
        log_pdfs.append(log_pdf_i)
    
    log_pdfs = np.concatenate(log_pdfs, axis = 1)
    
    return log_pdfs
def vonMisesFisher_K_pdf_log(X, theta, Cs_log=None, parameters=None):
    # Extension of Watson_pdf_log in which we also accept several clusters
    # We have to be more restrict in this case and the parameters must be:
    # X (D,Nsamples)  mu(D,K) kappa(K) cp_log(K)
    # The result is (Nsamples, K)
    D, N = X.shape
    K = len(theta)

    if (1):
        # For the combination of Gaussian and Watson, we need to do the preprocessing here
        X = gf.remove_module(X.T).T

    if (type(Cs_log) == type(None)):
        Cs_log = [None] * K

    log_pdfs = []
    for k in range(K):
        # TODO: Play with norm constant
        log_pdf_i = vonMisesFisher_pdf_log(X, theta[k], Cs_log=Cs_log[k])
        log_pdfs.append(log_pdf_i)

    log_pdfs = np.concatenate(log_pdfs, axis=1)

    return log_pdfs
Esempio n. 9
0
def generate_gaussian_data(folder_images, plot_original_data, N1 = 200, N2 = 300, N3 = 50):

    
    mu1 = np.array([[0],[0]])
    cov1 = np.array([[0.8,-1.1],
                     [-1.1,1.6]])
    
    mu2 = np.array([[0],[0]])
    cov2 = np.array([[0.3,0.45],
                     [0.45,0.8]])
    mu3 = np.array([[0],[0]])
    cov3 = np.array([[0.1,0.0],
                     [0.0,0.1]])
    
    X1 = np.random.multivariate_normal(mu1.flatten(), cov1, N1).T
    X2 = np.random.multivariate_normal(mu2.flatten(), cov2, N2).T
    X3 = np.random.multivariate_normal(mu3.flatten(), cov3, N3).T

#    samples_X1 = np.array(range(X1.shape[1]))[np.where([X1[0,:] > 0])[0]]
#    samples_X1 = np.where(X1[0,:] > 0)[0] # np.array(range(X1.shape[1]))
#    print samples_X1
#    X1 = X1[:,samples_X1]
#    X2 = np.concatenate((X2,X3),axis = 1)
    
    ######## Plotting #####
    if (plot_original_data):
        gl.init_figure();
        ## First cluster
        ax1 = gl.scatter(X1[0,:],X1[1,:], labels = ["Gaussian Generated Data", "x1","x2"], 
                         legend = ["K = 1"], color = "r",alpha = 0.5)
        mean,w,h,theta = bMA.get_gaussian_ellipse_params( mu = mu1, Sigma = cov1, Chi2val = 2.4477)
        r_ellipse = bMA.get_ellipse_points(mean,w,h,theta)
        gl.plot(r_ellipse[:,0], r_ellipse[:,1], ax = ax1, ls = "--", lw = 2
                 ,AxesStyle = "Normal2", color = "r")
        
        ## Second cluster
        ax1 = gl.scatter(X2[0,:],X2[1,:], legend = ["K = 2"], color = "b", alpha = 0.5)
        mean,w,h,theta = bMA.get_gaussian_ellipse_params( mu = mu2, Sigma = cov2, Chi2val = 2.4477)
        r_ellipse = bMA.get_ellipse_points(mean,w,h,theta)
        gl.plot(r_ellipse[:,0], r_ellipse[:,1], ax = ax1, ls = "--", lw = 2,AxesStyle = "Normal2", color = "b")
        
        ## Third cluster
        ax1 = gl.scatter(X3[0,:],X3[1,:], legend = ["K = 3"], color = "g", alpha = 0.5)
        mean,w,h,theta = bMA.get_gaussian_ellipse_params( mu = mu3, Sigma = cov3, Chi2val = 2.4477)
        r_ellipse = bMA.get_ellipse_points(mean,w,h,theta)
        gl.plot(r_ellipse[:,0], r_ellipse[:,1], ax = ax1, ls = "--", lw = 2,AxesStyle = "Normal2", color = "g")
        
        ax1.axis('equal')

        gl.savefig(folder_images +'Original data.png', 
               dpi = 100, sizeInches = [12, 6])
    
    ############ ESTIMATE THEM ################
    theta1 = Gae.get_Gaussian_muSigma_ML(X1.T, parameters = dict([["Sigma","full"]]))
    print ("mu1:")
    print (theta1[0])
    print ("Sigma1")
    print(theta1[1])
    
    ############## Estimate Likelihood ###################
    ll = Gad.Gaussian_pdf_log (X1, [mu1,cov1])
    ll2 = []
    for i in range (ll.size):
        ll2.append( multivariate_normal.logpdf(X1[:,i], mean=mu1.flatten(), cov=cov1))
    ll2 = np.array(ll2).reshape(ll.shape)
    
    print ("ll ours")
    print (ll.T)
    print ("ll scipy")
    print (ll2.T)
    print ("Difference in ll")
    print ((ll - ll2).T)
    
    ###### Multiple clusters case
    ll_K = Gad.Gaussian_K_pdf_log(X1, [[mu1,cov1],[mu2,cov2]])
    
    if(0):
        X1 = gf.remove_module(X1.T).T
        X2 = gf.remove_module(X2.T).T
        X3 = gf.remove_module(X3.T).T
    Xdata = np.concatenate((X1,X2,X3), axis =1).T

    return X1,X2,X3,Xdata, mu1,mu2,mu3, cov1,cov2, cov3
Esempio n. 10
0
def get_Watson_muKappa_ML(X, rk=None, parameters=None):
    """ This function obtains efficiently obtains the parameters. (mu, kappa),
    for a single cluster, given the weight vectors rk.
    If the estimation is ill-posed (degenerated cluster), then it will trigger a RuntimeError
    which will be handled later to deal with the degenerated cluster and take
    an action, like deleting the cluster or changing its parameters.
    If there is ever an error, then we create the RunTime Error to comunicate it later.
    
    If we could not estimate the parameters we just return None. 
    The entire code is in a try cath to avoid being broken.
    But to be honest our EM arquitecture also does that so no problem
    """

    Niter = parameters["Num_Newton_iterations"]
    try:
        # We compute the Weighted correlation matrix for the component
        # Sk = (N*pimix_k) * B_ik (*) X*X.T
        # If No rk specified it is just one
        N, D = X.shape
        if (type(rk) == type(None)):
            rk = np.ones((N, 1)) * (1 / float(N))
            ## TODO: better manage this
        if (1):
            # For the combination of Gaussian and Watson, we need to do the preprocessing here
            X = gf.remove_module(X)
        #################### Get the Mus from here !!
        Sk, EigenValues, V = get_eigenDV_ML(X, rk=rk)

        # Obtain the highest and smallest eigenvalue and associated eigenvectors
        d_max = np.argmax(EigenValues)
        d_min = np.argmin(EigenValues)
        mu_pos = V[:, d_max]
        mu_neg = V[:, d_min]

        ## We solve the positive and the negative situations and output the one with
        ## the highest likelihood
        eigenValue_pos = EigenValues[d_max]
        eigenValue_min = EigenValues[d_min]

        # Compute the explained variance of projections.
        r_neg = np.dot(mu_neg.T, Sk).dot(mu_neg)
        r_pos = np.dot(mu_pos.T, Sk).dot(mu_pos)
        """ If the explained variance of r_neg is too low, close to 0, then it is illposed
            and we cannot compute it. We choose the other one directly.
            If the explained variance of r_pos is too high, close to 1, then it is also illposed
            and we cannot compute it. We choose the other one directly.
            If both can be computed, then we choose the one with highest likelihood.
            If None can be computed then we have a degenerated cluster and we create the exeption.
        """

        tolerance = 1e-3

        if (parameters["Allow_negative_kappa"] == "yes"):
            if (r_neg < tolerance and r_pos > 1 - tolerance):
                # Case where we have a degenerated cluster
                #        print "Degenerated cluster"
                raise RuntimeError(
                    'Degenerated cluster focus in one sample. Percentage_samples = %f',
                    "Degenerated_Cluster_Error",
                    np.sum(rk) / N)

            elif (r_neg < tolerance and r_pos < 1 - tolerance):
                # Case where the negative kappa case is very unilikely
                kappa_pos = get_Watson_kappa_ML(X,
                                                mu_pos,
                                                Sk=Sk,
                                                rk=rk,
                                                Niter=Niter)
                kappa = kappa_pos
                mu = mu_pos
        #        print "TODO: Warninig we do not try negative Kappa coz explained variance is too low !"
            elif (r_neg > tolerance and r_pos > 1 - tolerance):
                # Case where the positive kappa case is very unilikely
                kappa_neg = get_Watson_kappa_ML(X,
                                                mu_neg,
                                                Sk=Sk,
                                                rk=rk,
                                                Niter=Niter)
                kappa = kappa_neg
                mu = mu_neg
            else:
                # Case where both are possible.
                kappa_pos = get_Watson_kappa_ML(X,
                                                mu_pos,
                                                Sk=Sk,
                                                rk=rk,
                                                Niter=Niter)
                kappa_neg = get_Watson_kappa_ML(X,
                                                mu_neg,
                                                Sk=Sk,
                                                rk=rk,
                                                Niter=Niter)

                # The maximum weighted likelihood estimator
                #        print "rk", rk.shape
                #        print "X",X.shape
                #        print "mup",mu_pos.shape, "kappap", kappa_pos
                #        print "mun",mu_neg.shape, "kappan", kappa_neg
                #        print "Watsonpdf", Wad.Watson_pdf_log(X.T,mu_pos,kappa_pos).shape

                likelihood_pos = np.sum(
                    Wad.Watson_pdf_log(X.T, [mu_pos, kappa_pos]) * rk.T)
                likelihood_neg = np.sum(
                    Wad.Watson_pdf_log(X.T, [mu_neg, kappa_neg]) * rk.T)

                #    print likelihood_pos, likelihood_neg

                ## Check that there are not duplicated eigenvalues
                if (likelihood_pos > likelihood_neg):
                    if (EigenValues[0] == EigenValues[1]):
                        print(
                            "Warning: Eigenvalue1 = EigenValue2 in MLmean estimation"
                        )
                    kappa = kappa_pos
                    mu = mu_pos
                else:
                    if (EigenValues[-1] == EigenValues[-2]):
                        print(
                            "Warning: Eigenvalue1 = EigenValue2 in MLmean estimation"
                        )
                    kappa = kappa_neg
                    mu = mu_neg
        else:
            if (r_pos > 1 - tolerance):
                # Case where we have a degenerated cluster
                raise RuntimeError(
                    'Degenerated cluster focus in one sample. Percentage_samples = %f',
                    "Degenerated_Cluster_Error",
                    np.sum(rk) / N)

            else:
                # Case where the negative kappa case is very unilikely
                kappa_pos = get_Watson_kappa_ML(X,
                                                mu_pos,
                                                Sk=Sk,
                                                rk=rk,
                                                Niter=Niter)
                kappa = kappa_pos
                mu = mu_pos
                if (EigenValues[0] == EigenValues[1]):
                    print(
                        "Warning: Eigenvalue1 = EigenValue2 in MLmean estimation"
                    )
                kappa = kappa_pos
                mu = mu_pos

        theta = [mu.reshape(D, 1), kappa.reshape(1, 1)]

    except RuntimeError as err:
        theta = None

    return theta
Esempio n. 11
0
def get_vonMissesFisher_muKappa_ML(X, rk = None, parameters = None):
    """Maxiumum likelihood estimator for a single vonMises"""
    
    Niter = parameters["Num_Newton_iterations"]
    try:
        # We compute the Weighted correlation matrix for the component
        # Sk = (N*pimix_k) * B_ik (*) X*X.T
        # If No rk specified it is just one 
        N,D = X.shape
        if(type(rk) == type(None)):
            rk = np.ones((N,1))*(1/float(N))
            ## TODO: better manage this
        if (1):
            # For the combination of Gaussian and Watson, we need to do the preprocessing here
            X = gf.remove_module (X)
        #################### Get the Mus from here !!
    
        N,p = X.shape
#        print X.shape, rk.shape
#        print(X*rk).shape
        sum_x = np.sum(X*rk,0)
        
#        print sum_x.shape
#        print rk
        sum_rk  = np.sum(rk)
        norm_sum_x = np.linalg.norm(sum_x)
        
        if (norm_sum_x < 0.00001):
            # Case where we have a degenerated cluster
    #        print "Degenerated cluster"
            raise RuntimeError('Degenerated cluster focus in one sample. Percentage_samples = %f', "Degenerated_Cluster_Error",np.sum(rk)/N)
            
            
        mu = sum_x/norm_sum_x
        R = norm_sum_x /(sum_rk)
        
#        print N,p,R
#
        tolerance =  1e-3
        
        if ((R > 1-tolerance) or (R < tolerance)):
            # Case where we have a degenerated cluster
    #        print "Degenerated cluster"
            raise RuntimeError('Degenerated cluster focus in one sample. Percentage_samples = %f', "Degenerated_Cluster_Error",np.sum(rk)/N)
            
            
        if (R > 0.9):    # When r -> 1 
            R = R - 1e-30  # To avoid divide by 0
            kappa0 = (R*(p-np.power(R,2)))/(1-np.power(R,2))
        elif(R < 0.1):    # When r -> 0
            R = R + 1e-30  # To avoid divide by 0
            kappa0 = (R*(p-np.power(R,2)))/(1-np.power(R,2))
        else:            # General approximation
            kappa0 = (R*(p-np.power(R,2)))/(1-np.power(R,2))


#        print "R: ", R
        
#        kappa0 = (R*p - np.power(R,3))/(1-np.power(R,2))
#        print "kappa: ", kappa0
#
        kappa_opt = Newton_kappa_log(kappa0,D,R,Niter)
#        
#        kappa1 = kappa0 - (A - R)/(1-np.power(A,2)-A*float(p-1)/kappa0)
        
#        kappa0 = np.min([1000, kappa0])
#        print "kappa Post: ", kappa_opt
        theta = [mu.reshape(D,1), kappa_opt.reshape(1,1)]

    except RuntimeError as err:
        theta = None
            
    return theta
Esempio n. 12
0
def generate_gaussian_data(folder_images,
                           plot_original_data,
                           N1=200,
                           N2=300,
                           N3=50):

    mu1 = np.array([[0], [0]])
    cov1 = np.array([[0.8, -1.1], [-1.1, 1.6]])

    mu2 = np.array([[0], [0]])
    cov2 = np.array([[0.3, 0.45], [0.45, 0.8]])
    mu3 = np.array([[0], [0]])
    cov3 = np.array([[0.1, 0.0], [0.0, 0.1]])

    X1 = np.random.multivariate_normal(mu1.flatten(), cov1, N1).T
    X2 = np.random.multivariate_normal(mu2.flatten(), cov2, N2).T
    X3 = np.random.multivariate_normal(mu3.flatten(), cov3, N3).T

    #    samples_X1 = np.array(range(X1.shape[1]))[np.where([X1[0,:] > 0])[0]]
    #    samples_X1 = np.where(X1[0,:] > 0)[0] # np.array(range(X1.shape[1]))
    #    print samples_X1
    #    X1 = X1[:,samples_X1]
    #    X2 = np.concatenate((X2,X3),axis = 1)

    ######## Plotting #####
    if (plot_original_data):
        gl.init_figure()
        ## First cluster
        ax1 = gl.scatter(X1[0, :],
                         X1[1, :],
                         labels=["Gaussian Generated Data", "x1", "x2"],
                         legend=["K = 1"],
                         color="r",
                         alpha=0.5)
        mean, w, h, theta = bMA.get_gaussian_ellipse_params(mu=mu1,
                                                            Sigma=cov1,
                                                            Chi2val=2.4477)
        r_ellipse = bMA.get_ellipse_points(mean, w, h, theta)
        gl.plot(r_ellipse[:, 0],
                r_ellipse[:, 1],
                ax=ax1,
                ls="--",
                lw=2,
                AxesStyle="Normal2",
                color="r")

        ## Second cluster
        ax1 = gl.scatter(X2[0, :],
                         X2[1, :],
                         legend=["K = 2"],
                         color="b",
                         alpha=0.5)
        mean, w, h, theta = bMA.get_gaussian_ellipse_params(mu=mu2,
                                                            Sigma=cov2,
                                                            Chi2val=2.4477)
        r_ellipse = bMA.get_ellipse_points(mean, w, h, theta)
        gl.plot(r_ellipse[:, 0],
                r_ellipse[:, 1],
                ax=ax1,
                ls="--",
                lw=2,
                AxesStyle="Normal2",
                color="b")

        ## Third cluster
        ax1 = gl.scatter(X3[0, :],
                         X3[1, :],
                         legend=["K = 3"],
                         color="g",
                         alpha=0.5)
        mean, w, h, theta = bMA.get_gaussian_ellipse_params(mu=mu3,
                                                            Sigma=cov3,
                                                            Chi2val=2.4477)
        r_ellipse = bMA.get_ellipse_points(mean, w, h, theta)
        gl.plot(r_ellipse[:, 0],
                r_ellipse[:, 1],
                ax=ax1,
                ls="--",
                lw=2,
                AxesStyle="Normal2",
                color="g")

        ax1.axis('equal')

        gl.savefig(folder_images + 'Original data.png',
                   dpi=100,
                   sizeInches=[12, 6])

    ############ ESTIMATE THEM ################
    theta1 = Gae.get_Gaussian_muSigma_ML(X1.T,
                                         parameters=dict([["Sigma", "full"]]))
    print("mu1:")
    print(theta1[0])
    print("Sigma1")
    print(theta1[1])

    ############## Estimate Likelihood ###################
    ll = Gad.Gaussian_pdf_log(X1, [mu1, cov1])
    ll2 = []
    for i in range(ll.size):
        ll2.append(
            multivariate_normal.logpdf(X1[:, i], mean=mu1.flatten(), cov=cov1))
    ll2 = np.array(ll2).reshape(ll.shape)

    print("ll ours")
    print(ll.T)
    print("ll scipy")
    print(ll2.T)
    print("Difference in ll")
    print((ll - ll2).T)

    ###### Multiple clusters case
    ll_K = Gad.Gaussian_K_pdf_log(X1, [[mu1, cov1], [mu2, cov2]])

    if (0):
        X1 = gf.remove_module(X1.T).T
        X2 = gf.remove_module(X2.T).T
        X3 = gf.remove_module(X3.T).T
    Xdata = np.concatenate((X1, X2, X3), axis=1).T

    return X1, X2, X3, Xdata, mu1, mu2, mu3, cov1, cov2, cov3
Esempio n. 13
0
def get_Watson_muKappa_ML(X, rk = None, parameters = None):
    """ 
    This function efficiently computes the parameters: (mu, kappa),
    for a single Waton distribution, given the weight vectors rk from the EM algorithm.
    If the estimation is ill-posed (degenerated cluster), then it will trigger a RuntimeError
    which will be handled later to deal with the degenerated cluster and take
    an action, like deleting the cluster or changing its parameters.
    """
    
    Niter = parameters["Num_Newton_iterations"]
    try:
        # We compute the Weighted correlation matrix for the component
        # Sk = (N*pimix_k) * B_ik (*) X*X.T
        # If No rk specified, it is just one 
        N,D = X.shape
        if(type(rk) == type(None)):
            rk = np.ones((N,1))*(1/float(N))
        if (1):
            # For the combination of Gaussian and Watson, we need to do the preprocessing here
            X = gf.remove_module (X)
        #################### Get the Mus from here !!
        Sk, EigenValues,V = get_eigenDV_ML(X, rk = rk)
        
        # Obtain the highest and smallest eigenvalue and associated eigenvectors
        d_max = np.argmax(EigenValues)
        d_min = np.argmin(EigenValues)
        mu_pos = V[:,d_max]
        mu_neg = V[:,d_min]
        
        ## We solve the positive and the negative situations and output the one with
        ## the highest likelihood
        eigenValue_pos = EigenValues[d_max]
        eigenValue_min = EigenValues[d_min]
        
        # Compute the explained variance of projections.
        r_neg = np.dot(mu_neg.T,Sk).dot(mu_neg)
        r_pos = np.dot(mu_pos.T,Sk).dot(mu_pos)
    
        
        """ 
        If the explained variance of r_neg is too low, close to 0, then it is illposed
        and we cannot compute it. We choose the other one directly.
        If the explained variance of r_pos is too high, close to 1, then it is also illposed
        and we cannot compute it. We choose the other one directly.
        If both can be computed, then we choose the one with highest likelihood.
        If None can be computed then we have a degenerated cluster and we create the exeption.
        """
        
        tolerance =  1e-3
        
        if(parameters["Allow_negative_kappa"] == "yes"):
            if (r_neg < tolerance and r_pos > 1-tolerance):
                # Case where we have a degenerated cluster
                raise RuntimeError('Degenerated cluster focus in one sample. Percentage_samples = %f', 
                                   "Degenerated_Cluster_Error",np.sum(rk)/N)
            
            elif (r_neg < tolerance and r_pos < 1-tolerance):
                # Case where the negative kappa case is very unilikely
                kappa_pos = get_Watson_kappa_ML(X, mu_pos,  Sk = Sk, rk = rk, Niter = Niter)
                kappa = kappa_pos
                mu = mu_pos
            elif (r_neg > tolerance and r_pos > 1-tolerance):
                # Case where the positive kappa case is very unilikely
                kappa_neg = get_Watson_kappa_ML(X, mu_neg,  Sk = Sk, rk = rk, Niter = Niter)
                kappa = kappa_neg
                mu = mu_neg
            else:
                # Case where both are possible.
                kappa_pos = get_Watson_kappa_ML(X, mu_pos,  Sk = Sk, rk = rk, Niter = Niter)
                kappa_neg = get_Watson_kappa_ML(X, mu_neg,  Sk = Sk, rk = rk, Niter = Niter)
                likelihood_pos = np.sum(Wad.Watson_pdf_log(X.T,[mu_pos,kappa_pos])*rk.T)
                likelihood_neg = np.sum(Wad.Watson_pdf_log(X.T,[mu_neg,kappa_neg])*rk.T)

            ## Check that there are no duplicated eigenvalues
                if (likelihood_pos > likelihood_neg):
                    if (EigenValues[0] == EigenValues[1]):
                        print ("Warning: Eigenvalue1 = EigenValue2 in MLmean estimation")
                    kappa = kappa_pos
                    mu = mu_pos
                else:
                    if (EigenValues[-1] == EigenValues[-2]):
                        print ("Warning: Eigenvalue1 = EigenValue2 in MLmean estimation")
                    kappa = kappa_neg
                    mu = mu_neg
        else:
            if ( r_pos > 1-tolerance):
                # Case where we have a degenerated cluster
                raise RuntimeError('Degenerated cluster focus in one sample. Percentage_samples = %f', 
                                   "Degenerated_Cluster_Error",np.sum(rk)/N)
            
            else:
                # Case where the negative kappa case is very unilikely
                kappa_pos = get_Watson_kappa_ML(X, mu_pos,  Sk = Sk, rk = rk, Niter = Niter)
                kappa = kappa_pos
                mu = mu_pos
                if (EigenValues[0] == EigenValues[1]):
                    print ("Warning: Eigenvalue1 = EigenValue2 in MLmean estimation")
                kappa = kappa_pos
                mu = mu_pos
                
        theta = [mu.reshape(D,1), kappa.reshape(1,1)]
            
    except RuntimeError as err:
        theta = None
            
    return theta