示例#1
0
def test_get_mat_slices_shape(
    seq_len: int,
    bond_dim: int,
    input_dim: int,
    batch: int,
    vec_input: bool,
    is_complex: bool,
    uniform: bool,
):
    """
    Check that get_mat_slices gives correct shapes
    """
    if uniform:
        core_shape = (input_dim, bond_dim, bond_dim)
    else:
        core_shape = (seq_len, input_dim, bond_dim, bond_dim)
    core_tensor = near_eye_init(core_shape, is_complex)
    assert core_tensor.is_complex() == is_complex

    if vec_input:
        fake_data = torch.randn(batch, seq_len, input_dim).abs()
        fake_data /= fake_data.sum(dim=2, keepdim=True)
    else:
        fake_data = torch.randint(input_dim, (batch, seq_len))

    # Run get_mat_slices and verify that the output has expected shape
    output = get_mat_slices(fake_data, core_tensor)
    assert output.shape == (batch, seq_len, bond_dim, bond_dim)
def get_mat_slices_runner(
    benchmark,
    vec_input: bool = False,
    uniform: bool = True,
    input_dim: int = 10,
    bond_dim: int = 10,
    seq_len: int = 100,
    batch: int = 100,
):
    # Create fake core tensor and input data
    if uniform:
        core_tensor = near_eye_init((input_dim, bond_dim, bond_dim))
    else:
        core_tensor = near_eye_init((seq_len, input_dim, bond_dim, bond_dim))
    if vec_input:
        fake_data = torch.randn(seq_len, batch, input_dim).abs()
    else:
        fake_data = torch.randint(input_dim, (seq_len, batch))

    # Benchmark get_mat_slices using input benchmark
    benchmark(get_mat_slices, fake_data, core_tensor)
def slim_eval_runner(
    benchmark,
    vec_input: bool = False,
    uniform: bool = True,
    input_dim: int = 10,
    bond_dim: int = 10,
    seq_len: int = 100,
    batch: int = 100,
):
    # Create fake input data, core tensor, and boundary vectors
    if uniform:
        core_tensor = near_eye_init((input_dim, bond_dim, bond_dim))
    else:
        core_tensor = near_eye_init((seq_len, input_dim, bond_dim, bond_dim))
    if vec_input:
        fake_data = torch.randn(seq_len, batch, input_dim).abs()
    else:
        fake_data = torch.randint(input_dim, (seq_len, batch))
    bound_vecs = torch.randn(2, bond_dim)

    # Benchmark slim_eval_fun using input benchmark
    benchmark(slim_eval_fun, fake_data, core_tensor, bound_vecs)
示例#4
0
def test_composite_init_mat_slice_contraction(
    seq_len: int,
    bond_dim: int,
    input_dim: int,
    batch: int,
    vec_input: bool,
    is_complex: bool,
    uniform: bool,
):
    """
    Verify that initializing identity core, getting matrix slices, and then
    contracting the slices gives identity matrices
    """
    if uniform:
        core_shape = (input_dim, bond_dim, bond_dim)
    else:
        core_shape = (seq_len, input_dim, bond_dim, bond_dim)
    core_tensor = near_eye_init(core_shape, is_complex, noise=0)
    assert core_tensor.is_complex() == is_complex

    if vec_input:
        fake_data = torch.randn(batch, seq_len, input_dim).abs()
        fake_data /= fake_data.sum(dim=2, keepdim=True)
    else:
        fake_data = torch.randint(input_dim, (batch, seq_len))

    # Get matrix slices, then contract them all together
    mat_slices = get_mat_slices(fake_data, core_tensor)

    prod_mats = contract_matseq(mat_slices)

    # Verify that all contracted matrix slices are identities
    target_prods = torch.eye(bond_dim)
    assert torch.allclose(prod_mats.abs(), target_prods, atol=1e-4, rtol=1e-4)

    # Do the same thing for slim_eval_fun, but with boundary vectors
    ref_vec = torch.randn(bond_dim).to(core_tensor.dtype)
    ref_vals = ref_vec.norm()**2
    bound_vecs = torch.stack((ref_vec, ref_vec))
    prod_vals, log_scales = slim_eval_fun(fake_data, core_tensor, bound_vecs)
    prod_vals *= log_scales.exp()
    assert torch.allclose(prod_vals.abs(), ref_vals, atol=1e-4, rtol=1e-4)
def contract_matseq_runner(
    benchmark,
    boundaries: bool = True,
    parallel: bool = False,
    naive: bool = False,
    seq_len: int = 100,
    bond_dim: int = 10,
    batch: int = 100,
):
    # Create fake matrix slices and boundary vectors
    mat_slices = near_eye_init((seq_len, batch, bond_dim, bond_dim))
    if boundaries:
        left_vec, right_vec = torch.randn(2, bond_dim)
    else:
        left_vec, right_vec = None, None

    # Benchmark contract_matseq or naive_contraction using input benchmark
    if naive:
        benchmark(naive_contraction, mat_slices, left_vec, right_vec)
    else:
        benchmark(contract_matseq, mat_slices, left_vec, right_vec, parallel)
示例#6
0
def test_contract_matseq_identity_batches(batch_shape, bond_dim, seq_len,
                                          use_lvec, use_rvec, parallel_eval):
    """
    Multipy random multiples of the identity matrix w/ variable batch size
    """
    # Generate identity matrices and boundary vectors
    shape = tuple(batch_shape) + (seq_len, bond_dim, bond_dim)
    eye_mats = near_eye_init(shape, noise=0)
    eye_mats2 = [eye_mats[..., i, :, :] for i in range(seq_len)]
    left_vec, right_vec = torch.randn(2, bond_dim)
    lvec = left_vec if use_lvec else None
    rvec = right_vec if use_rvec else None

    # Contract with the naive algorithm, compare to contract_matseq output
    naive_result = naive_contraction(eye_mats, lvec, rvec)
    lib_result = contract_matseq(eye_mats, lvec, rvec, parallel_eval)
    lib_result2 = contract_matseq(eye_mats2, lvec, rvec, parallel_eval)

    # Both ways of calling contract_matseq should agree
    assert torch.equal(lib_result, lib_result2)
    assert torch.allclose(lib_result, naive_result)