lmbda = 1e-2 opt = cbpdn.ConvBPDN.Options({ 'Verbose': True, 'MaxMainIter': 250, 'HighMemSolve': True, 'RelStopTol': 5e-3, 'AuxVarObj': False }) """ If GPU available, run CUDA ConvBPDN solver, otherwise run standard Python version. """ if cuda.device_count() > 0: print('%s GPU found: running CUDA solver' % cuda.device_name()) tm = util.Timer() with sys_pipes(), util.ContextTimer(tm): X = cuda.cbpdn(D, sh, lmbda, opt) t = tm.elapsed() else: print('GPU not found: running Python solver') c = cbpdn.ConvBPDN(D, sh, lmbda, opt) X = c.solve().squeeze() t = c.timer.elapsed('solve') print('Solve time: %.2f s' % t) """ Reconstruct the image from the sparse representation. """ shr = np.sum(spl.fftconv(D, X), axis=2) imgr = sl + shr print("Reconstruction PSNR: %.2fdB\n" % spm.psnr(img, imgr))
'LinSolveCheck': False, 'RelStopTol': 2e-3, 'AuxVarObj': False, 'rho': 1.5e0, 'AutoRho': { 'Enabled': False } }) # Initialise and run AddMaskSim/ConvBPDN object b = cbpdn.AddMaskSim(cbpdn.ConvBPDN, D, shw, msk, lmbda, opt=opt) X1 = b.solve() print("AddMaskSim/ConvBPDN solve time: %.2fs" % b.timer.elapsed('solve')) # Time CUDA AddMaskSim/ConvBPDN solve t = util.Timer() with util.ContextTimer(t): X2 = cucbpdn.cbpdnmsk(D, shw, msk, lmbda, opt) # Solve time comparison print("GPU AddMaskSim/ConvBPDN solve time: %.2fs" % t.elapsed()) print("GPU time improvement factor: %.1f" % (b.timer.elapsed('solve') / t.elapsed())) # Compare CPU and GPU solutions print("CPU solution: min: %.4e max: %.4e l1: %.4e" % (X1.min(), X1.max(), np.sum(np.abs(X1)))) print("GPU solution: min: %.4e max: %.4e l1: %.4e" % (X2.min(), X2.max(), np.sum(np.abs(X2)))) print("CPU/GPU MSE: %.2e SNR: %.2f dB" % (sm.mse(X1, X2), sm.snr(X1, X2)))
def test_21(self): t = util.Timer() with util.ContextTimer(t): t0 = t.elapsed() assert t.elapsed() >= 0.0
# Set up ConvBPDN options lmbda = 1e-2 opt = cbpdn.ConvBPDN.Options({ 'Verbose': False, 'MaxMainIter': 0, 'HighMemSolve': True, 'LinSolveCheck': True, 'RelStopTol': 1e-5, 'AuxVarObj': False, 'AutoRho': { 'Enabled': False } }) # Compute initialisation time: solve with 0 iterations t0 = util.Timer() with util.ContextTimer(t0): X = cucbpdn.cbpdn(D, sh, lmbda, opt) # Solve with Niter iterations Niter = 200 opt['MaxMainIter'] = Niter t1 = util.Timer() with util.ContextTimer(t1): X = cucbpdn.cbpdn(D, sh, lmbda, opt) # Print run time information print("GPU ConvBPDN init time: %.3fs" % t0.elapsed()) print("GPU ConvBPDN solve time per iteration: %.3fs" % (t1.elapsed() / Niter))