示例#1
0
    def generate_gmm(image_features_list):
        concatenated_descriptors = np.concatenate(image_features_list)
        gmm_filename = 'gmm.pkl'
        N, D = concatenated_descriptors.shape
        K = 128

        print("The sizes are {0} and {1}".format(N, D))

        if (N > 3000000):
            batch_size = 3000000
        else:
            batch_size = N

        ggmm.init(batch_size * D)
        gmm = ggmm.GMM(K, D)

        thresh = 1e-3  # convergence threshold
        n_iter = 500  # maximum number of EM iterations
        init_params = 'wmc'  # initialize weights, means, and covariances

        # train GMM
        converged = gmm.fit(concatenated_descriptors[:batch_size],
                            thresh,
                            n_iter,
                            init_params=init_params)

        print("GMM converged? ... {0}".format(converged))

        pickle.dump((gmm.get_weights(), gmm.get_means(), gmm.get_covars()),
                    open(gmm_filename, 'wb'))

        return gmm
示例#2
0
def EMEngine(X, thresh_, n_iter_, timer, K_, init_='wmc'):
    N, D = X.shape
    K = K_

    start = time.time()

    # train gmm
    ggmm.init()
    gmm = ggmm.GMM(K,D)
    gmm.fit(X, thresh = thresh_, n_iter = n_iter_, init_params=init_, verbose=False, iterations=timer)

    # retrieve parameters
    weights = gmm.get_weights()
    means = gmm.get_means()
    covars = gmm.get_covars()
    posteriors = gmm.compute_posteriors(X)

    end = time.time() - start

    if timer:
        print 'Max Iterations:',n_iter_
        print 'Convergence Threshold:',thresh_
        print 'Exectution Time:', end
        print 'Number of Gaussians:', K
        print '==========================='

    return (means, weights, covars)
示例#3
0
    def generate_gmm(image_features_list):
        concatenated_descriptors = np.concatenate(image_features_list)
        gmm_filename = 'gmm.pkl'
        N, D = concatenated_descriptors.shape
        K=128

        print("The sizes are {0} and {1}".format(N,D))

        if(N > 3000000):
            batch_size = 3000000
        else:
            batch_size = N

        ggmm.init(batch_size * D)
        gmm = ggmm.GMM(K,D)

        thresh = 1e-3 # convergence threshold
        n_iter = 500 # maximum number of EM iterations
        init_params = 'wmc' # initialize weights, means, and covariances

        # train GMM
        converged = gmm.fit(concatenated_descriptors[:batch_size], thresh, n_iter, init_params=init_params)

        print("GMM converged? ... {0}".format(converged))

        pickle.dump((gmm.get_weights(), gmm.get_means(), gmm.get_covars()), open(gmm_filename,'wb'))

        return gmm
示例#4
0
    def load_gmm():
        wmc = pickle.load(open("gmm.pkl", "rb"))
        ggmm.init(wmc[0].shape[0] * 64)
        gmm = ggmm.GMM(wmc[0].shape[0], 64)

        gmm.set_weights(wmc[0])
        gmm.set_means(wmc[1])
        gmm.set_covars(wmc[2])
        print("Loaded GMM Info!")
        return gmm
示例#5
0
    def load_gmm():
        wmc = pickle.load(open("gmm.pkl","rb"))
        ggmm.init(wmc[0].shape[0] * 64)
        gmm = ggmm.GMM(wmc[0].shape[0],64)

        gmm.set_weights(wmc[0])
        gmm.set_means(wmc[1])
        gmm.set_covars(wmc[2])
        print("Loaded GMM Info!")
        return gmm
示例#6
0
def setup():
    ggmm.init() # activates cublas