Esempio n. 1
0
 def _update_labels(self, mu, Sigma, weights, ident=False):
     if self.gpu:
         # GPU business happens?
         return get_labelsGPU(self.gpu_workers, weights, mu, Sigma, relabel=ident)
     else:
         densities = mvn_weighted_logged(self.data, mu, Sigma, weights)
         if ident:
             Z = np.asarray(densities.argmax(1), dtype='i')
         else:
             Z = None
         return sample_discrete(densities).squeeze(), Z
Esempio n. 2
0
 def _update_labels(self, mu, Sigma, weights, ident=False):
     if self.gpu:
         # GPU business happens?
         return get_labelsGPU(self.gpu_workers,
                              weights,
                              mu,
                              Sigma,
                              relabel=ident)
     else:
         densities = mvn_weighted_logged(self.data, mu, Sigma, weights)
         if ident:
             Z = np.asarray(densities.argmax(1), dtype='i')
         else:
             Z = None
         return sample_discrete(densities).squeeze(), Z
Esempio n. 3
0
    ind = np.arange(N)
    np.random.shuffle(ind)
    all_data = data[ind].copy()

    w = np.ones(ncomps)
    mu = np.zeros((ncomps, J))
    Sigma = np.zeros((ncomps, J, J))
    for i in range(ncomps):
        Sigma[i] = np.identity(J)

    workers = multigpu.init_GPUWorkers(data, gpus)

    starttime = time.time()
    for i in xrange(1000):
        if i % 100 == 0:
            print i
            #import pdb; pdb.set_trace()
        ll, ct, xbar, dens = multigpu.get_expected_labels_GPU(
            workers, w, mu, Sigma)
        labels = multigpu.get_labelsGPU(workers, w, mu, Sigma, True)

    ## make sure host GPU is ok ...
    from pycuda.gpuarray import to_gpu
    from pycuda.gpuarray import sum as gsum
    test = to_gpu(np.ones(100000, dtype=np.int32))
    print gsum(test)

    multigpu.kill_GPUWorkers(workers)

    print "DONE! it took " + str(time.time() - starttime)
Esempio n. 4
0
    ind = np.arange(N); np.random.shuffle(ind);
    all_data = data[ind].copy()

    w = np.ones(ncomps)
    mu = np.zeros((ncomps, J))
    Sigma = np.zeros((ncomps, J, J))
    for i in range(ncomps):
        Sigma[i] = np.identity(J)

    workers = multigpu.init_GPUWorkers(data, gpus)

    starttime = time.time()
    for i in xrange(1000):
        if i % 100 == 0:
            print i
            #import pdb; pdb.set_trace()
        ll, ct, xbar, dens = multigpu.get_expected_labels_GPU(workers, w, mu, Sigma)
        labels = multigpu.get_labelsGPU(workers, w, mu, Sigma, True)

    ## make sure host GPU is ok ... 
    from pycuda.gpuarray import to_gpu 
    from pycuda.gpuarray import sum as gsum
    test = to_gpu(np.ones(100000, dtype=np.int32))
    print gsum(test)

    multigpu.kill_GPUWorkers(workers)

    print "DONE! it took " + str(time.time() - starttime)