Exemplo n.º 1
0
def test_chain_sharing(size, backend):
    xs = [np.random.rand(2, 2) for _ in range(size)]
    alphabet = ''.join(get_symbol(i) for i in range(size + 1))
    names = [alphabet[i:i+2] for i in range(size)]
    inputs = ','.join(names)

    num_exprs_nosharing = 0
    for i in range(size + 1):
        with shared_intermediates() as cache:
            target = alphabet[i]
            eq = '{}->{}'.format(inputs, target)
            expr = contract_expression(eq, *(x.shape for x in xs))
            expr(*xs, backend=backend)
            num_exprs_nosharing += _compute_cost(cache)

    with shared_intermediates() as cache:
        print(inputs)
        for i in range(size + 1):
            target = alphabet[i]
            eq = '{}->{}'.format(inputs, target)
            path_info = contract_path(eq, *xs)
            print(path_info[1])
            expr = contract_expression(eq, *(x.shape for x in xs))
            expr(*xs, backend=backend)
        num_exprs_sharing = _compute_cost(cache)

    print('-' * 40)
    print('Without sharing: {} expressions'.format(num_exprs_nosharing))
    print('With sharing: {} expressions'.format(num_exprs_sharing))
    assert num_exprs_nosharing > num_exprs_sharing
def make_hmm_example(length,
                     latent_dim=32,
                     observed_dim=4,
                     batch_dim=10,
                     query=None):
    symbols = symbol_stream()
    b = next(symbols)
    shapes = []
    inputs = []
    xs = []
    ys = []
    for t in range(length):
        xs.append(next(symbols))
        ys.append(next(symbols))

        # Add observation matrix.
        shapes.append([batch_dim, latent_dim, observed_dim])
        inputs.append(b + xs[-1] + ys[-1])

        if t >= 1:
            # Add transition matrix.
            shapes.append([batch_dim, latent_dim, latent_dim])
            inputs.append(b + xs[-2] + xs[-1])

    inputs = ','.join(inputs)
    output = '' if query is None else b + xs[query]
    eq = inputs + '->' + output
    name = 'hmm_{}_{}_{}_{}_{}'.format(length, latent_dim, observed_dim,
                                       batch_dim,
                                       'total' if query is None else query)
    contract_expression(eq, *shapes, optimize='eager')  # smoke test
    save(name, eq, shapes)
Exemplo n.º 3
0
def test_chain_sharing(size, backend):
    xs = [np.random.rand(2, 2) for _ in range(size)]
    alphabet = ''.join(get_symbol(i) for i in range(size + 1))
    names = [alphabet[i:i + 2] for i in range(size)]
    inputs = ','.join(names)

    num_exprs_nosharing = 0
    for i in range(size + 1):
        with shared_intermediates() as cache:
            target = alphabet[i]
            eq = '{}->{}'.format(inputs, target)
            expr = contract_expression(eq, *(x.shape for x in xs))
            expr(*xs, backend=backend)
            num_exprs_nosharing += _compute_cost(cache)

    with shared_intermediates() as cache:
        print(inputs)
        for i in range(size + 1):
            target = alphabet[i]
            eq = '{}->{}'.format(inputs, target)
            path_info = contract_path(eq, *xs)
            print(path_info[1])
            expr = contract_expression(eq, *(x.shape for x in xs))
            expr(*xs, backend=backend)
        num_exprs_sharing = _compute_cost(cache)

    print('-' * 40)
    print('Without sharing: {} expressions'.format(num_exprs_nosharing))
    print('With sharing: {} expressions'.format(num_exprs_sharing))
    assert num_exprs_nosharing > num_exprs_sharing
Exemplo n.º 4
0
def test_sharing_with_constants(backend):
    inputs = 'ij,jk,kl'
    outputs = 'ijkl'
    equations = ['{}->{}'.format(inputs, output) for output in outputs]
    shapes = (2, 3), (3, 4), (4, 5)
    constants = {0, 2}
    ops = [
        np.random.rand(*shp) if i in constants else shp
        for i, shp in enumerate(shapes)
    ]
    var = np.random.rand(*shapes[1])

    expected = [
        contract_expression(eq, *shapes)(ops[0], var, ops[2])
        for eq in equations
    ]

    with shared_intermediates():
        actual = [
            contract_expression(eq, *ops, constants=constants)(var)
            for eq in equations
        ]

    for dim, expected_dim, actual_dim in zip(outputs, expected, actual):
        assert np.allclose(expected_dim, actual_dim), 'error at {}'.format(dim)
Exemplo n.º 5
0
 def method2(views):
     with shared_intermediates():
         y = contract_expression(eqs[2], *shapes)(*views, backend=backend)
         z = contract_expression(eqs[3], *shapes)(*views, backend=backend)
         refs['y'] = y
         refs['z'] = z
         result = contract_expression('c,d->', y.shape, z.shape)(y, z, backend=backend)
         result = result + method1(views)  # nest method1 in method2
         del y, z
         assert 'y' in refs
         assert 'z' in refs
     assert 'y' not in refs
     assert 'z' not in refs
Exemplo n.º 6
0
 def method1(views):
     with shared_intermediates():
         w = contract_expression(eqs[0], *shapes)(*views, backend=backend)
         x = contract_expression(eqs[2], *shapes)(*views, backend=backend)
         result = contract_expression('a,b->', w.shape, x.shape)(w, x, backend=backend)
         refs['w'] = w
         refs['x'] = x
         del w, x
         assert 'w' in refs
         assert 'x' in refs
     assert 'w' not in refs, 'cache leakage'
     assert 'x' not in refs, 'cache leakage'
     return result
Exemplo n.º 7
0
 def method1(views):
     with shared_intermediates():
         w = contract_expression(eqs[0], *shapes)(*views, backend=backend)
         x = contract_expression(eqs[2], *shapes)(*views, backend=backend)
         result = contract_expression("a,b->", w.shape,
                                      x.shape)(w, x, backend=backend)
         refs["w"] = w
         refs["x"] = x
         del w, x
         assert "w" in refs
         assert "x" in refs
     assert "w" not in refs, "cache leakage"
     assert "x" not in refs, "cache leakage"
     return result
Exemplo n.º 8
0
 def method2(views):
     with shared_intermediates():
         y = contract_expression(eqs[2], *shapes)(*views, backend=backend)
         z = contract_expression(eqs[3], *shapes)(*views, backend=backend)
         refs["y"] = y
         refs["z"] = z
         result = contract_expression("c,d->", y.shape,
                                      z.shape)(y, z, backend=backend)
         result = result + method1(views)  # nest method1 in method2
         del y, z
         assert "y" in refs
         assert "z" in refs
     assert "y" not in refs
     assert "z" not in refs
Exemplo n.º 9
0
 def method1(views):
     with shared_intermediates():
         w = contract_expression(eqs[0], *shapes)(*views, backend=backend)
         x = contract_expression(eqs[2], *shapes)(*views, backend=backend)
         result = contract_expression('a,b->', w.shape,
                                      x.shape)(w, x, backend=backend)
         refs['w'] = w
         refs['x'] = x
         del w, x
         assert 'w' in refs
         assert 'x' in refs
     assert 'w' not in refs, 'cache leakage'
     assert 'x' not in refs, 'cache leakage'
     return result
Exemplo n.º 10
0
 def method2(views):
     with shared_intermediates():
         y = contract_expression(eqs[2], *shapes)(*views, backend=backend)
         z = contract_expression(eqs[3], *shapes)(*views, backend=backend)
         refs['y'] = y
         refs['z'] = z
         result = contract_expression('c,d->', y.shape,
                                      z.shape)(y, z, backend=backend)
         result = result + method1(views)  # nest method1 in method2
         del y, z
         assert 'y' in refs
         assert 'z' in refs
     assert 'y' not in refs
     assert 'z' not in refs
def test_tensorflow_with_constants(constants):
    eq = 'ij,jk,kl->li'
    shapes = (2, 3), (3, 4), (4, 5)
    non_const, = {0, 1, 2} - constants
    ops = [
        np.random.rand(*shp) if i in constants else shp
        for i, shp in enumerate(shapes)
    ]
    var = np.random.rand(*shapes[non_const])
    res_exp = contract(eq,
                       *(ops[i] if i in constants else var for i in range(3)))

    expr = contract_expression(eq, *ops, constants=constants)

    # check tensorflow
    with tf.Session(config=_TF_CONFIG).as_default():
        res_got = expr(var, backend='tensorflow')
    assert all(array is None or infer_backend(array) == 'tensorflow'
               for array in expr._evaluated_constants['tensorflow'])
    assert np.allclose(res_exp, res_got)

    # check can call with numpy still
    res_got2 = expr(var, backend='numpy')
    assert np.allclose(res_exp, res_got2)

    # check tensorflow call returns tensorflow still
    res_got3 = expr(backends.to_tensorflow(var))
    assert isinstance(res_got3, tf.Tensor)
def test_torch_with_constants(constants):
    eq = 'ij,jk,kl->li'
    shapes = (2, 3), (3, 4), (4, 5)
    non_const, = {0, 1, 2} - constants
    ops = [
        np.random.rand(*shp) if i in constants else shp
        for i, shp in enumerate(shapes)
    ]
    var = np.random.rand(*shapes[non_const])
    res_exp = contract(eq,
                       *(ops[i] if i in constants else var for i in range(3)))

    expr = contract_expression(eq, *ops, constants=constants)

    # check torch
    res_got = expr(var, backend='torch')
    assert all(array is None or infer_backend(array) == 'torch'
               for array in expr._evaluated_constants['torch'])
    assert np.allclose(res_exp, res_got)

    # check can call with numpy still
    res_got2 = expr(var, backend='numpy')
    assert np.allclose(res_exp, res_got2)

    # check torch call returns torch still
    res_got3 = expr(backends.to_torch(var))
    assert isinstance(res_got3, torch.Tensor)
    res_got3 = res_got3.numpy(
    ) if res_got3.device.type == 'cpu' else res_got3.cpu().numpy()
    assert np.allclose(res_exp, res_got3)
Exemplo n.º 13
0
def test_torch_with_constants():

    eq = 'ij,jk,kl->li'
    shapes = (2, 3), (3, 4), (4, 5)
    constants = {0, 2}
    ops = [
        np.random.rand(*shp) if i in constants else shp
        for i, shp in enumerate(shapes)
    ]
    var = np.random.rand(*shapes[1])

    res_exp = contract(eq, ops[0], var, ops[2])

    expr = contract_expression(eq, *ops, constants=constants)

    # check torch
    res_got = expr(var, backend='torch')
    assert 'torch' in expr._evaluated_constants
    assert np.allclose(res_exp, res_got)

    # check can call with numpy still
    res_got2 = expr(var, backend='numpy')
    assert np.allclose(res_exp, res_got2)

    # check torch call returns torch still
    res_got3 = expr(backends.to_torch(var), backend='torch')
    assert isinstance(res_got3, torch.Tensor)
    res_got3 = res_got3.numpy(
    ) if res_got3.device.type == 'cpu' else res_got3.cpu().numpy()
    assert np.allclose(res_exp, res_got3)
def test_cupy_with_constants(constants):  # pragma: no cover
    eq = 'ij,jk,kl->li'
    shapes = (2, 3), (3, 4), (4, 5)
    non_const, = {0, 1, 2} - constants
    ops = [
        np.random.rand(*shp) if i in constants else shp
        for i, shp in enumerate(shapes)
    ]
    var = np.random.rand(*shapes[non_const])
    res_exp = contract(eq,
                       *(ops[i] if i in constants else var for i in range(3)))

    expr = contract_expression(eq, *ops, constants=constants)

    # check cupy
    res_got = expr(var, backend='cupy')
    # check cupy versions of constants exist
    assert all(array is None or infer_backend(array) == 'cupy'
               for array in expr._evaluated_constants['cupy'])
    assert np.allclose(res_exp, res_got)

    # check can call with numpy still
    res_got2 = expr(var, backend='numpy')
    assert np.allclose(res_exp, res_got2)

    # check cupy call returns cupy still
    res_got3 = expr(cupy.asarray(var))
    assert isinstance(res_got3, cupy.ndarray)
    assert np.allclose(res_exp, res_got3.get())
Exemplo n.º 15
0
def test_sparse(string):
    views = helpers.build_views(string)

    # sparsify views so they don't become dense during contraction
    for view in views:
        np.random.seed(42)
        mask = np.random.choice([False, True], view.shape, True, [0.05, 0.95])
        view[mask] = 0

    ein = contract(string, *views, optimize=False, use_blas=False)
    shps = [v.shape for v in views]
    expr = contract_expression(string, *shps, optimize=True)

    # test non-conversion mode
    sparse_views = [sparse.COO.from_numpy(x) for x in views]
    sparse_opt = expr(*sparse_views, backend='sparse')

    # check type is maintained when not using numpy arrays
    assert isinstance(sparse_opt, sparse.COO)

    assert np.allclose(ein, sparse_opt.todense())

    # try raw contract
    sparse_opt = contract(string, *sparse_views, backend='sparse')
    assert isinstance(sparse_opt, sparse.COO)
    assert np.allclose(ein, sparse_opt.todense())
Exemplo n.º 16
0
def test_tensorflow_with_constants():
    eq = 'ij,jk,kl->li'
    shapes = (2, 3), (3, 4), (4, 5)
    constants = {0, 2}
    ops = [
        np.random.rand(*shp) if i in constants else shp
        for i, shp in enumerate(shapes)
    ]
    var = np.random.rand(*shapes[1])

    res_exp = contract(eq, ops[0], var, ops[2])

    expr = contract_expression(eq, *ops, constants=constants)

    # check tensorflow
    sess = tf.Session(config=_TF_CONFIG)
    with sess.as_default():
        res_got = expr(var, backend='tensorflow')
    sess.close()
    assert 'tensorflow' in expr._evaluated_constants
    assert np.allclose(res_exp, res_got)

    # check can call with numpy still
    res_got2 = expr(var, backend='numpy')
    assert np.allclose(res_exp, res_got2)

    # check tensorflow call returns tensorflow still
    res_got3 = expr(backends.to_tensorflow(var), backend='tensorflow')
    assert isinstance(res_got3, tf.Tensor)
Exemplo n.º 17
0
 def create_diagonal_mask_operation(self):
     weights_index_names = self.WEIGHTS_INDEX_NAMES[:self.order]
     operands = ','.join(map(lambda x: 'b' + x, weights_index_names))
     expression = f"{operands}->{weights_index_names}"
     shapes = [(self.DEFAULT_JET_COUNT, self.DEFAULT_JET_COUNT)
               ] * self.order
     return contract_expression(expression, *shapes)
Exemplo n.º 18
0
def test_no_sharing_separate_cache(backend):
    eq = 'ab,bc,cd->'
    views = helpers.build_views(eq)
    expr = contract_expression(eq, *(v.shape for v in views))

    print('-' * 40)
    print('Without sharing:')
    with shared_intermediates() as cache:
        expr(*views, backend=backend)
        expected = count_cached_ops(cache)
        expected.update(count_cached_ops(cache))  # we expect double

    print('-' * 40)
    print('With sharing:')
    with shared_intermediates() as cache1:
        expr(*views, backend=backend)
        actual = count_cached_ops(cache1)
    with shared_intermediates() as cache2:
        expr(*views, backend=backend)
        actual.update(count_cached_ops(cache2))

    print('-' * 40)
    print('Without sharing: {} expressions'.format(expected))
    print('With sharing: {} expressions'.format(actual))
    assert actual == expected
def test_theano_with_constants(constants):
    eq = 'ij,jk,kl->li'
    shapes = (2, 3), (3, 4), (4, 5)
    non_const, = {0, 1, 2} - constants
    ops = [
        np.random.rand(*shp) if i in constants else shp
        for i, shp in enumerate(shapes)
    ]
    var = np.random.rand(*shapes[non_const])
    res_exp = contract(eq,
                       *(ops[i] if i in constants else var for i in range(3)))

    expr = contract_expression(eq, *ops, constants=constants)

    # check theano
    res_got = expr(var, backend='theano')
    assert all(array is None or infer_backend(array) == 'theano'
               for array in expr._evaluated_constants['theano'])
    assert np.allclose(res_exp, res_got)

    # check can call with numpy still
    res_got2 = expr(var, backend='numpy')
    assert np.allclose(res_exp, res_got2)

    # check theano call returns theano still
    res_got3 = expr(backends.to_theano(var))
    assert isinstance(res_got3, theano.tensor.TensorVariable)
Exemplo n.º 20
0
def test_contract_expression_interleaved_input():
    x, y, z = (np.random.randn(2, 2) for _ in 'xyz')
    expected = np.einsum(x, [0, 1], y, [1, 2], z, [2, 3], [3, 0])
    xshp, yshp, zshp = ((2, 2) for _ in 'xyz')
    expr = contract_expression(xshp, [0, 1], yshp, [1, 2], zshp, [2, 3], [3, 0])
    out = expr(x, y, z)
    assert np.allclose(out, expected)
Exemplo n.º 21
0
def test_partial_sharing(backend):
    eq = 'ab,bc,de->'
    x, y, z1 = helpers.build_views(eq)
    z2 = 2.0 * z1 - 1.0
    expr = contract_expression(eq, x.shape, y.shape, z1.shape)

    print('-' * 40)
    print('Without sharing:')
    num_exprs_nosharing = Counter()
    with shared_intermediates() as cache:
        expr(x, y, z1, backend=backend)
        num_exprs_nosharing.update(count_cached_ops(cache))
    with shared_intermediates() as cache:
        expr(x, y, z2, backend=backend)
        num_exprs_nosharing.update(count_cached_ops(cache))

    print('-' * 40)
    print('With sharing:')
    with shared_intermediates() as cache:
        expr(x, y, z1, backend=backend)
        expr(x, y, z2, backend=backend)
        num_exprs_sharing = count_cached_ops(cache)

    print('-' * 40)
    print('Without sharing: {} expressions'.format(num_exprs_nosharing))
    print('With sharing: {} expressions'.format(num_exprs_sharing))
    assert num_exprs_nosharing['einsum'] > num_exprs_sharing['einsum']
Exemplo n.º 22
0
def test_no_sharing_separate_cache(backend):
    eq = 'ab,bc,cd->'
    views = helpers.build_views(eq)
    expr = contract_expression(eq, *(v.shape for v in views))

    print('-' * 40)
    print('Without sharing:')
    with shared_intermediates() as cache:
        expr(*views, backend=backend)
        expected = count_cached_ops(cache)
        expected.update(count_cached_ops(cache))  # we expect double

    print('-' * 40)
    print('With sharing:')
    with shared_intermediates() as cache1:
        expr(*views, backend=backend)
        actual = count_cached_ops(cache1)
    with shared_intermediates() as cache2:
        expr(*views, backend=backend)
        actual.update(count_cached_ops(cache2))

    print('-' * 40)
    print('Without sharing: {} expressions'.format(expected))
    print('With sharing: {} expressions'.format(actual))
    assert actual == expected
Exemplo n.º 23
0
def test_partial_sharing(backend):
    eq = 'ab,bc,de->'
    x, y, z1 = helpers.build_views(eq)
    z2 = 2.0 * z1 - 1.0
    expr = contract_expression(eq, x.shape, y.shape, z1.shape)

    print('-' * 40)
    print('Without sharing:')
    num_exprs_nosharing = Counter()
    with shared_intermediates() as cache:
        expr(x, y, z1, backend=backend)
        num_exprs_nosharing.update(count_cached_ops(cache))
    with shared_intermediates() as cache:
        expr(x, y, z2, backend=backend)
        num_exprs_nosharing.update(count_cached_ops(cache))

    print('-' * 40)
    print('With sharing:')
    with shared_intermediates() as cache:
        expr(x, y, z1, backend=backend)
        expr(x, y, z2, backend=backend)
        num_exprs_sharing = count_cached_ops(cache)

    print('-' * 40)
    print('Without sharing: {} expressions'.format(num_exprs_nosharing))
    print('With sharing: {} expressions'.format(num_exprs_sharing))
    assert num_exprs_nosharing['einsum'] > num_exprs_sharing['einsum']
Exemplo n.º 24
0
def partial_trace_v1(states: tf.Tensor, subsystem: Union[int, List[int]],
                     n_qubits: int):
    """
    Partial trace
    :param states: States in vector or density matrix from to trace
    :param subsystem: Subsystem to trace away
    :return: Remaining states
    """
    if isinstance(subsystem, int):
        subsystem = [subsystem]
    # Convert to density matrices
    if states.shape[-1] == 1:
        states = density_matrix(states)
    n_qubits = intlog2(states.shape[-1])
    # Construct Einstein sum-equation, inspired by:
    # https://github.com/rigetti/quantumflow/blob/bf965f0ca70cd69b387f9ca8407ab38da955e925/quantumflow/qubits.py#L201
    import string
    # EINSTEIN_SUBSCRIPTS = string.ascii_lowercase
    subscripts = list(string.ascii_lowercase)[0:n_qubits * 2]
    # Make a copy of the same index n_qubits later to trace out that entry
    for i in subsystem:
        subscripts[n_qubits + i] = subscripts[i]
    subscript_str = 'z' + ''.join(subscripts)
    batch_shape = states.shape[:-2]
    states_reshaped = tf.reshape(states, batch_shape + [2] * 2 * n_qubits)
    expr = oe.contract_expression(subscript_str, tf.shape(states_reshaped))
    result = expr(states_reshaped, backend='tensorflow')
    result = tf.einsum(
        subscript_str,
        states_reshaped)  # FIXME: einsum in tf only supports up to rank 6!
    return tf.reshape(result, batch_shape + [2**n_qubits, 2**n_qubits])
Exemplo n.º 25
0
def test_cupy_with_constants():  # pragma: no cover

    eq = 'ij,jk,kl->li'
    shapes = (2, 3), (3, 4), (4, 5)
    constants = {0, 2}
    ops = [
        np.random.rand(*shp) if i in constants else shp
        for i, shp in enumerate(shapes)
    ]
    var = np.random.rand(*shapes[1])

    res_exp = contract(eq, ops[0], var, ops[2])

    expr = contract_expression(eq, *ops, constants=constants)

    # check cupy
    res_got = expr(var, backend='cupy')
    assert 'cupy' in expr._evaluated_constants
    assert np.allclose(res_exp, res_got)

    # check can call with numpy still
    res_got2 = expr(var, backend='numpy')
    assert np.allclose(res_exp, res_got2)

    # check cupy call returns cupy still
    res_got3 = expr(cupy.asarray(var), backend='cupy')
    assert isinstance(res_got3, cupy.ndarray)
    assert np.allclose(res_exp, res_got3.get())
Exemplo n.º 26
0
def test_contract_expression_interleaved_input():
    x, y, z = (np.random.randn(2, 2) for _ in 'xyz')
    expected = np.einsum(x, [0, 1], y, [1, 2], z, [2, 3], [3, 0])
    xshp, yshp, zshp = ((2, 2) for _ in 'xyz')
    expr = contract_expression(xshp, [0, 1], yshp, [1, 2], zshp, [2, 3], [3, 0])
    out = expr(x, y, z)
    assert np.allclose(out, expected)
Exemplo n.º 27
0
def test_theano_with_constants():
    eq = 'ij,jk,kl->li'
    shapes = (2, 3), (3, 4), (4, 5)
    constants = {0, 2}
    ops = [
        np.random.rand(*shp) if i in constants else shp
        for i, shp in enumerate(shapes)
    ]
    var = np.random.rand(*shapes[1])

    res_exp = contract(eq, ops[0], var, ops[2])

    expr = contract_expression(eq, *ops, constants=constants)

    # check theano
    res_got = expr(var, backend='theano')
    assert 'theano' in expr._evaluated_constants
    assert np.allclose(res_exp, res_got)

    # check can call with numpy still
    res_got2 = expr(var, backend='numpy')
    assert np.allclose(res_exp, res_got2)

    # check theano call returns theano still
    res_got3 = expr(backends.to_theano(var), backend='theano')
    assert isinstance(res_got3, theano.tensor.TensorVariable)
def make_dbn_example(length,
                     global_dim=2,
                     latent_dim=32,
                     observed_dim=4,
                     batch_dim=10,
                     query=None):
    symbols = symbol_stream()
    b = next(symbols)
    w = next(symbols)
    shapes = []
    inputs = []
    xs = []
    ys = []
    zs = []
    for t in range(length):
        xs.append(next(symbols))
        ys.append(next(symbols))
        zs.append(next(symbols))

        # Add vertical dependencies.
        shapes.append([batch_dim, global_dim, latent_dim])
        inputs.append(b + w + xs[-1])

        shapes.append([batch_dim, latent_dim, latent_dim])
        inputs.append(b + xs[-1] + ys[-1])

        shapes.append([batch_dim, latent_dim, latent_dim])
        inputs.append(b + ys[-1] + zs[-1])

        if t >= 1:
            # Add horizontal dependencies.
            shapes.append([batch_dim, latent_dim, latent_dim])
            inputs.append(b + xs[-2] + xs[-1])

            shapes.append([batch_dim, latent_dim, latent_dim])
            inputs.append(b + ys[-2] + ys[-1])

    inputs = ','.join(inputs)
    output = '' if query is None else b + w + xs[query] + ys[query]
    eq = inputs + '->' + output
    name = 'dbn_{}_{}_{}_{}_{}'.format(length, latent_dim, observed_dim,
                                       batch_dim,
                                       'total' if query is None else query)
    contract_expression(eq, *shapes, optimize='eager')  # smoke test
    save(name, eq, shapes)
Exemplo n.º 29
0
def test_can_blas_on_healed_broadcast_dimensions():

    expr = contract_expression("ab,bc,bd->acd", (5, 4), (1, 5), (4, 20))
    # first contraction involves broadcasting
    assert expr.contraction_list[0][2] == "bc,ab->bca"
    assert expr.contraction_list[0][-1] is False
    # but then is healed GEMM is usable
    assert expr.contraction_list[1][2] == "bca,bd->acd"
    assert expr.contraction_list[1][-1] == "GEMM"
Exemplo n.º 30
0
def test_sharing_with_constants(backend):
    inputs = 'ij,jk,kl'
    outputs = 'ijkl'
    equations = ['{}->{}'.format(inputs, output) for output in outputs]
    shapes = (2, 3), (3, 4), (4, 5)
    constants = {0, 2}
    ops = [np.random.rand(*shp) if i in constants else shp for i, shp in enumerate(shapes)]
    var = np.random.rand(*shapes[1])

    expected = [contract_expression(eq, *shapes)(ops[0], var, ops[2])
                for eq in equations]

    with shared_intermediates():
        actual = [contract_expression(eq, *ops, constants=constants)(var)
                  for eq in equations]

    for dim, expected_dim, actual_dim in zip(outputs, expected, actual):
        assert np.allclose(expected_dim, actual_dim), 'error at {}'.format(dim)
Exemplo n.º 31
0
def test_can_blas_on_healed_broadcast_dimensions():

    expr = contract_expression("ab,bc,bd->acd", (5, 4), (1, 5), (4, 20))
    # first contraction involves broadcasting
    assert expr.contraction_list[0][2] == 'bc,ab->bca'
    assert expr.contraction_list[0][-1] is False
    # but then is healed GEMM is usable
    assert expr.contraction_list[1][2] == 'bca,bd->acd'
    assert expr.contraction_list[1][-1] == 'GEMM'
Exemplo n.º 32
0
 def contract(self, *args: ContractArgs) -> Tensor:
     contract_expression_args = tensors_to_shapes(*args)
     if contract_expression_args not in self.paths:
         self.paths[contract_expression_args] = oe.contract_expression(
             *contract_expression_args, optimize="auto-hq"
         )
     return self.paths[contract_expression_args](
         *(x for x in args if isinstance(x, Tensor))
     )
Exemplo n.º 33
0
def test_sharing_value(eq, backend):
    views = helpers.build_views(eq)
    shapes = [v.shape for v in views]
    expr = contract_expression(eq, *shapes)

    expected = expr(*views, backend=backend)
    with shared_intermediates():
        actual = expr(*views, backend=backend)

    assert (actual == expected).all()
def test_jax(string):  # pragma: no cover
    views = helpers.build_views(string)
    ein = contract(string, *views, optimize=False, use_blas=False)
    shps = [v.shape for v in views]

    expr = contract_expression(string, *shps, optimize=True)

    opt = expr(*views, backend='jax')
    assert np.allclose(ein, opt)
    assert isinstance(opt, np.ndarray)
Exemplo n.º 35
0
def test_sharing_value(eq, backend):
    views = helpers.build_views(eq)
    shapes = [v.shape for v in views]
    expr = contract_expression(eq, *shapes)

    expected = expr(*views, backend=backend)
    with shared_intermediates():
        actual = expr(*views, backend=backend)

    assert (actual == expected).all()
Exemplo n.º 36
0
def calculate_control_matrix_from_atomic(
        phases: ndarray,
        R_g: ndarray,
        Q_liouville: ndarray,
        show_progressbar: Optional[bool] = None) -> ndarray:
    r"""
    Calculate the control matrix from the control matrices of atomic segments.

    Parameters
    ----------
    phases: array_like, shape (n_dt, n_omega)
        The phase factors for :math:`l\in\{0, 1, \dots, n-1\}`.
    R_g: array_like, shape (n_dt, n_nops, d**2, n_omega)
        The pulse control matrices for :math:`l\in\{1, 2, \dots, n\}`.
    Q_liouville: array_like, shape (n_dt, n_nops, d**2, d**2)
        The transfer matrices of the cumulative propagators for
        :math:`l\in\{0, 1, \dots, n-1\}`.
    show_progressbar: bool, optional
        Show a progress bar for the calculation.

    Returns
    -------
    R: ndarray, shape (n_nops, d**2, n_omega)
        The control matrix :math:`\mathcal{R}(\omega)`.

    Notes
    -----
    The control matrix is calculated by evaluating the sum

    .. math::

        \mathcal{R}(\omega) = \sum_{l=1}^n e^{i\omega t_{l-1}}
            \mathcal{R}^{(l)}(\omega)\mathcal{Q}^{(l-1)}.

    See Also
    --------
    calculate_control_matrix_from_scratch: Control matrix from scratch.
    liouville_representation: Liouville representation for a given basis.
    """
    n = len(R_g)
    # Allocate memory
    R = np.zeros(R_g.shape[1:], dtype=complex)

    # Set up a reusable contraction expression. In some cases it is faster to
    # also contract the time dimension in the same expression instead of
    # looping over it, but we don't distinguish here for readability.
    R_expr = oe.contract_expression('ijo,jk->iko', R_g.shape[1:],
                                    Q_liouville.shape[1:])

    for g in util.progressbar_range(n,
                                    show_progressbar=show_progressbar,
                                    desc='Calculating control matrix'):
        R += R_expr(phases[g] * R_g[g], Q_liouville[g])

    return R
Exemplo n.º 37
0
    def make_contraction(self):
        input_index_names = np.array(list(self.INPUT_INDEX_NAMES))

        operations = map(lambda x: f"{x}bi", input_index_names)
        operations = ','.join(islice(operations, self.order))

        result = f"->b{''.join(input_index_names[:self.order])}"

        expression = operations + result
        shapes = [(self.batch_size, self.DEFAULT_JET_COUNT, self.features)
                  ] * self.order
        return contract_expression(expression, *shapes, optimize='optimal')
Exemplo n.º 38
0
def partial_trace_last(states: tf.Tensor, n_qubits2trace: int, n_qubits: int):
    # Convert to density matrices
    if states.shape[-1] == 1:
        states = density_matrix(states)
    subscripts = 'xyaza'
    # Reshape to the form of subscripts
    n_static = n_qubits - n_qubits2trace
    states = tf.reshape(
        states,
        [-1, 2**n_static, 2**n_qubits2trace, 2**n_static, 2**n_qubits2trace])
    expr = oe.contract_expression(subscripts, tf.shape(states))
    return expr(states, backend='tensorflow')
Exemplo n.º 39
0
    def __init__(
        self,
        width=160,
        height=120,
        fps=100,
        freq_min=12,
        freq_max=15,
        num_base_functions=4,
    ):
        self.width = width
        self.height = height
        self.fps = np.float32(fps)

        self.frequencies = np.linspace(freq_min,
                                       freq_max + 1,
                                       num=num_base_functions)
        self.wavelets = [self._get_wavelet(f) for f in self.frequencies]
        self.max_wavelet_length = max([w.shape[0] for w in self.wavelets])
        self.wavelets = np.stack([
            np.pad(w, (self.max_wavelet_length - w.shape[0], 0))
            for w in self.wavelets
        ])

        self.buffer_size = int(2.5 * self.max_wavelet_length)
        self.buffer_time = (1 / fps) * self.buffer_size

        self.activity_long = np.zeros((height, width), dtype=np.float32)
        self.activity = np.zeros((height, width), dtype=np.float32)

        self.buffer = np.zeros((self.buffer_size, height, width),
                               dtype=np.float32)
        self.buffer_idx = 0

        self.frame_response = np.zeros(shape=(height, width), dtype=np.float32)

        # Decay background activity slower than 'current' activity.
        # The activity of 1 s ago will still contribute 5% to the 'current' activity.
        self.activity_decay = np.exp(np.log(0.05) / self.fps)
        self.activity_long_decay = np.exp(np.log(0.1) / self.fps)

        self.einsum_expression = opt_einsum.contract_expression(
            "ij,jkl->ikl", self.wavelets.shape,
            (self.wavelets.shape[1], height, width))

        self.warmup_period_over = False
        self.rolling_mean = np.zeros(shape=(height, width), dtype=np.float32)
        self.temp_full_buffer = np.zeros(shape=(self.wavelets.shape[1], height,
                                                width),
                                         dtype=np.float32)
        self.temp_one_frame_buffer = np.zeros(shape=(height, width),
                                              dtype=np.float32)
Exemplo n.º 40
0
def test_chain_2(size, backend):
    xs = [np.random.rand(2, 2) for _ in range(size)]
    shapes = [x.shape for x in xs]
    alphabet = ''.join(get_symbol(i) for i in range(size + 1))
    names = [alphabet[i:i+2] for i in range(size)]
    inputs = ','.join(names)

    with shared_intermediates():
        print(inputs)
        for i in range(size):
            target = alphabet[i:i+2]
            eq = '{}->{}'.format(inputs, target)
            path_info = contract_path(eq, *xs)
            print(path_info[1])
            expr = contract_expression(eq, *shapes)
            expr(*xs, backend=backend)
        print('-' * 40)
Exemplo n.º 41
0
def test_contract_expressions(string, optimize, use_blas, out_spec):
    views = helpers.build_views(string)
    shapes = [view.shape for view in views]
    expected = contract(string, *views, optimize=False, use_blas=False)

    expr = contract_expression(string, *shapes, optimize=optimize, use_blas=use_blas)

    if out_spec and ("->" in string) and (string[-2:] != "->"):
        out, = helpers.build_views(string.split('->')[1])
        expr(*views, out=out)
    else:
        out = expr(*views)

    assert np.allclose(out, expected)

    # check representations
    assert string in expr.__repr__()
    assert string in expr.__str__()
Exemplo n.º 42
0
def test_contract_expression_with_constants(string, constants):
    views = helpers.build_views(string)
    expected = contract(string, *views, optimize=False, use_blas=False)

    shapes = [view.shape for view in views]

    expr_args = []
    ctrc_args = []
    for i, (shape, view) in enumerate(zip(shapes, views)):
        if i in constants:
            expr_args.append(view)
        else:
            expr_args.append(shape)
            ctrc_args.append(view)

    expr = contract_expression(string, *expr_args, constants=constants)
    print(expr)
    out = expr(*ctrc_args)
    assert np.allclose(expected, out)
Exemplo n.º 43
0
def test_chain_2_growth(backend):
    sizes = list(range(1, 21))
    costs = []
    for size in sizes:
        xs = [np.random.rand(2, 2) for _ in range(size)]
        alphabet = ''.join(get_symbol(i) for i in range(size + 1))
        names = [alphabet[i:i+2] for i in range(size)]
        inputs = ','.join(names)

        with shared_intermediates() as cache:
            for i in range(size):
                target = alphabet[i:i+2]
                eq = '{}->{}'.format(inputs, target)
                expr = contract_expression(eq, *(x.shape for x in xs))
                expr(*xs, backend=backend)
            costs.append(_compute_cost(cache))

    print('sizes = {}'.format(repr(sizes)))
    print('costs = {}'.format(repr(costs)))
    for size, cost in zip(sizes, costs):
        print('{}\t{}'.format(size, cost))
Exemplo n.º 44
0
def test_contract_expression_checks():
    # check optimize needed
    with pytest.raises(ValueError):
        contract_expression("ab,bc->ac", (2, 3), (3, 4), optimize=False)

    # check sizes are still checked
    with pytest.raises(ValueError):
        contract_expression("ab,bc->ac", (2, 3), (3, 4), (42, 42))

    # check if out given
    out = np.empty((2, 4))
    with pytest.raises(ValueError):
        contract_expression("ab,bc->ac", (2, 3), (3, 4), out=out)

    # check still get errors when wrong ranks supplied to expression
    expr = contract_expression("ab,bc->ac", (2, 3), (3, 4))

    # too few arguments
    with pytest.raises(ValueError) as err:
        expr(np.random.rand(2, 3))
    assert "`ContractExpression` takes exactly 2" in str(err)

    # too many arguments
    with pytest.raises(ValueError) as err:
        expr(np.random.rand(2, 3), np.random.rand(2, 3), np.random.rand(2, 3))
    assert "`ContractExpression` takes exactly 2" in str(err)

    # wrong shapes
    with pytest.raises(ValueError) as err:
        expr(np.random.rand(2, 3, 4), np.random.rand(3, 4))
    assert "Internal error while evaluating `ContractExpression`" in str(err)
    with pytest.raises(ValueError) as err:
        expr(np.random.rand(2, 4), np.random.rand(3, 4, 5))
    assert "Internal error while evaluating `ContractExpression`" in str(err)
    with pytest.raises(ValueError) as err:
        expr(np.random.rand(2, 3), np.random.rand(3, 4), out=np.random.rand(2, 4, 6))
    assert "Internal error while evaluating `ContractExpression`" in str(err)

    # should only be able to specify out
    with pytest.raises(ValueError) as err:
        expr(np.random.rand(2, 3), np.random.rand(3, 4), order='F')
    assert "only valid keyword arguments to a `ContractExpression`" in str(err)
Exemplo n.º 45
0
def test_complete_sharing(backend):
    eq = 'ab,bc,cd->'
    views = helpers.build_views(eq)
    expr = contract_expression(eq, *(v.shape for v in views))

    print('-' * 40)
    print('Without sharing:')
    with shared_intermediates() as cache:
        expr(*views, backend=backend)
        expected = count_cached_ops(cache)

    print('-' * 40)
    print('With sharing:')
    with shared_intermediates() as cache:
        expr(*views, backend=backend)
        expr(*views, backend=backend)
        actual = count_cached_ops(cache)

    print('-' * 40)
    print('Without sharing: {} expressions'.format(expected))
    print('With sharing: {} expressions'.format(actual))
    assert actual == expected