Пример #1
0
def test_complex128(highp):
    tc.set_backend("tensorflow")
    tc.set_dtype("complex128")
    c = tc.Circuit(2)
    c.H(1)
    c.rx(0, theta=tc.gates.num_to_tensor(1j))
    c.wavefunction()
    assert np.allclose(c.expectation((tc.gates.z(), [1])), 0)
Пример #2
0
def test_ad():
    # this amazingly shows how to code once and run in very different AD-ML engines
    tc.set_backend("tensorflow")
    universal_ad()
    tc.set_backend("jax")
    universal_ad()
    tc.set_backend("numpy")
Пример #3
0
from functools import partial
from collections import namedtuple
from pickle import dump
from matplotlib import pyplot as plt
import numpy as np
import tensorflow as tf
import tensorcircuit as tc
from tensorcircuit.applications.dqas import *
from tensorcircuit.applications.vags import *
from tensorcircuit.applications.layers import *
from tensorcircuit.applications.graphdata import regular_graph_generator

# qaoa_block_vag_energy = partial(qaoa_block_vag, f=(_identity, _neg))

tc.set_backend("tensorflow")


def main_layerwise_encoding():
    p = 5
    c = 7

    def noise():
        n = np.random.normal(loc=0.0, scale=0.002, size=[p, c])
        return tf.constant(n, dtype=tf.float32)

    def penalty_gradient(stp, nnp, lbd=0.15, lbd2=0.01):
        c = stp.shape[1]
        p = stp.shape[0]
        cost = tf.constant(
            [1.0, 1.0, 1.0, 1.0, 27 / 2 * 2.0, 27 / 2 * 2.0, 15 / 2.0],
Пример #4
0
def torchb():
    tc.set_backend("pytorch")
    tc.set_dtype("float64")
    yield
    tc.set_backend("numpy")
    tc.set_dtype("complex64")
Пример #5
0
def jaxb():
    tc.set_backend("jax")
    yield
    tc.set_backend("numpy")
Пример #6
0
def tfb():
    tc.set_backend("tensorflow")
    yield
    tc.set_backend("numpy")  # default backend