alpha0 = 0.3 - 0.5j tspan = np.linspace(0, 10, 11) a = qt.destroy(N) at = qt.create(N) n = at * a H = delta_c * n + eta * (a + at) J = [np.sqrt(kappa) * a] psi0 = qt.coherent(N, alpha0) exp_n = qt.mcsolve(H, psi0, tspan, J, [n], ntraj=evals, options=options).expect[0] return np.real(exp_n) print("Benchmarking:", name) print("Cutoff: ", end="", flush=True) checks = {} results = [] for N in cutoffs: print(N, "", end="", flush=True) options = setup(N) checks[N] = sum(f(N, options)) t = benchmarkutils.run_benchmark(f, N, options, samples=samples, evals=1) results.append({"N": N, "t": t / evals}) print() benchmarkutils.check(name, checks, eps=0.05) benchmarkutils.save(name, results)
import qutip as qt import benchmarkutils name = "multiplication_bra_dense" samples = 2 evals = 5 cutoffs = range(50, 501, 50) def setup(N): op1 = qt.rand_dm(N, N) * 0.2j psi = qt.rand_ket(N).dag().full().ravel() return op1, psi def f(op1, psi): return psi * op1 print("Benchmarking:", name) print("Cutoff: ", end="", flush=True) results = [] for N in cutoffs: print(N, "", end="", flush=True) op1, psi = setup(N) t = benchmarkutils.run_benchmark(f, op1, psi, samples=samples, evals=evals) results.append({"N": N, "t": t}) print() benchmarkutils.save(name, results)
def setup(N, s): op1 = sp.rand(N, N, 1., dtype=float) * 0.2j op2 = sp.rand(N, N, s, dtype=float) * 0.1j return op1, op2 def f(op1, op2): return op1 + op2 for s in S: name = basename + "_" + str(s).replace(".", "") print("Benchmarking:", name) print("Cutoff: ", end="", flush=True) results = [] for N in cutoffs: print(N, "", end="", flush=True) T = 0. for i in range(Nrand): op1, op2 = setup(N, s) T += benchmarkutils.run_benchmark(f, op1, op2, samples=samples, evals=evals) results.append({"N": N, "t": T / Nrand}) print() benchmarkutils.save(name, results)
J = [x + 1j * p] options = qt.Options() options.nsteps = 1000000 options.atol = 1e-8 options.rtol = 1e-6 return psi0, H, x, J, options def f(psi0, H, x, J, options): tlist = np.linspace(0, 10, 11) exp_x = qt.mesolve(H, psi0, tlist, J, [x], options=options).expect[0] return exp_x print("Benchmarking:", name) print("Cutoff: ", end="", flush=True) checks = {} results = [] for N in cutoffs: print(N, "", end="", flush=True) psi0, H, x, J, options = setup(N) checks[N] = sum(f(psi0, H, x, J, options)) t = benchmarkutils.run_benchmark(f, psi0, H, x, J, options, samples=samples, evals=evals) results.append({"N": N, "t": t}) print() benchmarkutils.check(name, checks, 1e-4) benchmarkutils.save(name, results)
name = "displace" samples = 3 evals = 10 cutoffs = range(10, 151, 10) def setup(N): alpha = np.log(N) return alpha def f(N, alpha): return qt.displace(N, alpha) print("Benchmarking:", name) print("Cutoff: ", end="", flush=True) checks = {} results = [] for N in cutoffs: print(N, "", end="", flush=True) alpha = setup(N) checks[N] = qt.expect(qt.destroy(N), f(N, alpha)) t = benchmarkutils.run_benchmark(f, N, alpha, samples=samples, evals=evals) results.append({"N": N, "t": t}) print() benchmarkutils.check(name, checks, 0.05) benchmarkutils.save(name, results)
yvec = np.linspace(-50, 50, 100) state = qt.coherent(N, alpha) op = state * state.dag() return op, xvec, yvec def f(state, xvec, yvec): return qt.wigner(state, xvec, yvec) print("Benchmarking:", name) print("Cutoff: ", end="", flush=True) checks = {} results = [] for N in cutoffs: print(N, "", end="", flush=True) op, xvec, yvec = setup(N) alpha_check = 0.6 + 0.1j checks[N] = f(op, [alpha_check.real], [alpha_check.imag])[0, 0] t = benchmarkutils.run_benchmark(f, op, xvec, yvec, samples=samples, evals=evals) results.append({"N": N, "t": t}) print() benchmarkutils.check(name, checks) benchmarkutils.save(name, results)
name = "expect_operator" samples = 5 evals = 100 cutoffs = range(100, 2501, 100) def setup(N): op = (qt.destroy(N) + qt.create(N)) psi = qt.Qobj(np.ones(N, complex)/(N**(1/2))) rho = psi*psi.dag() return op, rho def f(op, rho): return qt.expect(op, rho) print("Benchmarking:", name) print("Cutoff: ", end="", flush=True) checks = {} results = [] for N in cutoffs: print(N, "", end="", flush=True) op, rho = setup(N) checks[N] = f(op, rho) t = benchmarkutils.run_benchmark(f, op, rho, samples=samples, evals=evals) results.append({"N": N, "t": t}) print() benchmarkutils.check(name, checks) benchmarkutils.save(name, results)