示例#1
0
文件: BEM.py 项目: fivejjs/dpmix
    def expected_labels(self):
        if self.gpu:
            self.ll, self.ct, self.xbar, self.densities = get_expected_labels_GPU(
                self.gpu_workers, self.weights, self.mu, self.Sigma)

        else:
            densities = mvn_weighted_logged(self.data, self.mu, self.Sigma, self.weights)
            densities = np.exp(densities)
            norm = densities.sum(1)
            self.ll = np.sum(np.log(norm))
            densities = (densities.T / norm).T
            self.ct = densities.sum(0)
            self.xbar = np.dot(densities.T, self.data)
            self.densities = densities
示例#2
0
    def expected_labels(self):
        if self.gpu:
            self.ll, self.ct, self.xbar, self.densities = get_expected_labels_GPU(
                self.gpu_workers, self.weights, self.mu, self.Sigma)

        else:
            densities = mvn_weighted_logged(self.data, self.mu, self.Sigma,
                                            self.weights)
            densities = np.exp(densities)
            norm = densities.sum(1)
            self.ll = np.sum(np.log(norm))
            densities = (densities.T / norm).T
            self.ct = densities.sum(0)
            self.xbar = np.dot(densities.T, self.data)
            self.densities = densities
示例#3
0
    ind = np.arange(N)
    np.random.shuffle(ind)
    all_data = data[ind].copy()

    w = np.ones(ncomps)
    mu = np.zeros((ncomps, J))
    Sigma = np.zeros((ncomps, J, J))
    for i in range(ncomps):
        Sigma[i] = np.identity(J)

    workers = multigpu.init_GPUWorkers(data, gpus)

    starttime = time.time()
    for i in xrange(1000):
        if i % 100 == 0:
            print i
            #import pdb; pdb.set_trace()
        ll, ct, xbar, dens = multigpu.get_expected_labels_GPU(
            workers, w, mu, Sigma)
        labels = multigpu.get_labelsGPU(workers, w, mu, Sigma, True)

    ## make sure host GPU is ok ...
    from pycuda.gpuarray import to_gpu
    from pycuda.gpuarray import sum as gsum
    test = to_gpu(np.ones(100000, dtype=np.int32))
    print gsum(test)

    multigpu.kill_GPUWorkers(workers)

    print "DONE! it took " + str(time.time() - starttime)
示例#4
0
    ind = np.arange(N); np.random.shuffle(ind);
    all_data = data[ind].copy()

    w = np.ones(ncomps)
    mu = np.zeros((ncomps, J))
    Sigma = np.zeros((ncomps, J, J))
    for i in range(ncomps):
        Sigma[i] = np.identity(J)

    workers = multigpu.init_GPUWorkers(data, gpus)

    starttime = time.time()
    for i in xrange(1000):
        if i % 100 == 0:
            print i
            #import pdb; pdb.set_trace()
        ll, ct, xbar, dens = multigpu.get_expected_labels_GPU(workers, w, mu, Sigma)
        labels = multigpu.get_labelsGPU(workers, w, mu, Sigma, True)

    ## make sure host GPU is ok ... 
    from pycuda.gpuarray import to_gpu 
    from pycuda.gpuarray import sum as gsum
    test = to_gpu(np.ones(100000, dtype=np.int32))
    print gsum(test)

    multigpu.kill_GPUWorkers(workers)

    print "DONE! it took " + str(time.time() - starttime)