Exemple #1
0
    J = [x + 1j * p]

    options = qt.Options()
    options.nsteps = 1000000
    options.atol = 1e-8
    options.rtol = 1e-6

    return psi0, H, x, J, options


def f(psi0, H, x, J, options):
    tlist = np.linspace(0, 10, 11)
    exp_x = qt.mesolve(H, psi0, tlist, J, [x], options=options).expect[0]
    return exp_x


print("Benchmarking:", name)
print("Cutoff: ", end="", flush=True)
checks = {}
results = []
for N in cutoffs:
    print(N, "", end="", flush=True)
    psi0, H, x, J, options = setup(N)
    checks[N] = sum(f(psi0, H, x, J, options))
    t = benchmarkutils.run_benchmark(f, psi0, H, x, J, options, samples=samples, evals=evals)
    results.append({"N": N, "t": t})
print()

benchmarkutils.check(name, checks, 1e-4)
benchmarkutils.save(name, results)
Exemple #2
0
    alpha0 = 0.3 - 0.5j
    tspan = np.linspace(0, 10, 11)

    a = qt.destroy(N)
    at = qt.create(N)
    n = at * a

    H = delta_c * n + eta * (a + at)
    J = [np.sqrt(kappa) * a]

    psi0 = qt.coherent(N, alpha0)
    exp_n = qt.mcsolve(H, psi0, tspan, J, [n], ntraj=evals,
                       options=options).expect[0]
    return np.real(exp_n)


print("Benchmarking:", name)
print("Cutoff: ", end="", flush=True)
checks = {}
results = []
for N in cutoffs:
    print(N, "", end="", flush=True)
    options = setup(N)
    checks[N] = sum(f(N, options))
    t = benchmarkutils.run_benchmark(f, N, options, samples=samples, evals=1)
    results.append({"N": N, "t": t / evals})
print()

benchmarkutils.check(name, checks, eps=0.05)
benchmarkutils.save(name, results)
                  np.sqrt(gamma) * sm),
    ]

    psi0 = qt.tensor(qt.fock(N, 0), (qt.basis(2, 0) + qt.basis(2, 1)).unit())
    exp_n = qt.mesolve(H,
                       psi0,
                       tspan,
                       c_ops, [qt.tensor(n, Ia)],
                       options=options).expect[0]
    return exp_n


print("Benchmarking:", name)
print("Cutoff: ", end="", flush=True)
checks = {}
results = []
for N in cutoffs:
    print(N, "", end="", flush=True)
    options = setup(N)
    checks[N] = sum(f(N, options))
    t = benchmarkutils.run_benchmark(f,
                                     N,
                                     options,
                                     samples=samples,
                                     evals=evals)
    results.append({"N": 2 * N, "t": t})
print()

benchmarkutils.check(name, checks)
benchmarkutils.save(name, results)