Esempio n. 1
0
def check_time_do_mstep(N,K,D):
    random_state = np.random.RandomState(123)
    weights = random_state.rand(K)
    weights /= weights.sum()
    means = random_state.randn(K,D)
    covars = random_state.rand(K,D)
    X = random_state.randn(N,D)

    gmm_cpu = cgmm.GMM(K,D)
    gmm_cpu.set_weights(weights)
    gmm_cpu.set_means(means)
    gmm_cpu.set_covars(covars)

    gmm_gpu = ggmm.GMM(K,D)
    gmm_gpu.set_weights(weights)
    gmm_gpu.set_means(means)
    gmm_gpu.set_covars(covars)

    X_gpu = ggmm.return_CUDAMatrix(X)
    temp_gpu_mem = ggmm.TempGPUMem()
    logprob_cpu, posterior_cpu = gmm_cpu.score_samples(X)
    logprob_gpu, posterior_gpu = gmm_gpu.score_samples(X_gpu,temp_gpu_mem)

    update_params = 'wmc'
    min_covar = 1e-3

    MAX_TRIALS = 10
    MAX_TIME = 1.0

    gpu_trials = 0
    t0 = time.time()
    for i in range(MAX_TRIALS):
        gmm_gpu._do_mstep(X_gpu,posterior_gpu, update_params, min_covar)
        gpu_time = time.time() - t0
        gpu_trials += 1
        if gpu_time > MAX_TIME:
            break
    avg_gpu_time = gpu_time/gpu_trials

    t0 = time.time()
    cpu_trials = 0
    for i in range(MAX_TRIALS):
        gmm_cpu._do_mstep(X,posterior_cpu, update_params, min_covar)
        cpu_time = time.time() - t0
        cpu_trials += 1
        if cpu_time > MAX_TIME:
            break
    avg_cpu_time = cpu_time/cpu_trials

    speedup = avg_cpu_time/avg_gpu_time

    print('')
    print('------------------------------------')
    print('N=%u, K=%u, D=%u' % (N,K,D))
    print('avg_cpu_time (%u trials):' % cpu_trials, avg_cpu_time)
    print('avg_gpu_time (%u trials):' % gpu_trials, avg_gpu_time)
    print('speedup:', speedup)
    print('------------------------------------')
    print('')
    assert_greater(speedup, 1.0)
Esempio n. 2
0
def check_time_score_samples(N, K, D):
    random_state = np.random.RandomState(123)
    weights = random_state.rand(K)
    weights /= weights.sum()
    means = random_state.randn(K, D)
    covars = random_state.rand(K, D)
    X = random_state.randn(N, D)

    gmm_cpu = cgmm.GMM(K, D)
    gmm_cpu.set_weights(weights)
    gmm_cpu.set_means(means)
    gmm_cpu.set_covars(covars)

    gmm_gpu = ggmm.GMM(K, D)
    gmm_gpu.set_weights(weights)
    gmm_gpu.set_means(means)
    gmm_gpu.set_covars(covars)

    MAX_TRIALS = 10
    MAX_TIME = 1.0

    temp_gpu_mem = ggmm.TempGPUMem()
    X_gpu = ggmm.return_CUDAMatrix(X)
    gpu_trials = 0
    t0 = time.time()
    for i in xrange(MAX_TRIALS):
        logprob_gpu, posterior_gpu = gmm_gpu.score_samples(X_gpu, temp_gpu_mem)
        gpu_time = time.time() - t0
        gpu_trials += 1
        if gpu_time > MAX_TIME:
            break
    avg_gpu_time = gpu_time / gpu_trials

    t0 = time.time()
    cpu_trials = 0
    for i in xrange(MAX_TRIALS):
        logprob_cpu, posterior_cpu = gmm_cpu.score_samples(X)
        cpu_time = time.time() - t0
        cpu_trials += 1
        if cpu_time > MAX_TIME:
            break
    avg_cpu_time = cpu_time / cpu_trials

    speedup = avg_cpu_time / avg_gpu_time

    print ''
    print '------------------------------------'
    print 'N=%u, K=%u, D=%u' % (N, K, D)
    print 'avg_cpu_time (%u trials):' % cpu_trials, avg_cpu_time
    print 'avg_gpu_time (%u trials):' % gpu_trials, avg_gpu_time
    print 'speedup:', speedup
    print '------------------------------------'
    print ''
    assert_greater(speedup, 1.0)