예제 #1
0
def test_simplify():
    list_of_tensors = [
        make_tensor('H', "g0", "g0"),
        make_tensor('t', "h0", "p0"),
        make_tensor('t', "p1", "h1"),
        make_tensor('L', 'h0', 'g0'),
        make_tensor('L', 'g0, p1', 'p0, h1')
    ]
    a = Term(list_of_tensors, SecondQuantizedOperator.make_empty(), -1)
    a.simplify()
    assert a.is_void()

    list_of_tensors = [
        make_tensor('H', "g0", "g0"),
        make_tensor('t', "h0", "p0"),
        make_tensor('t', "p1", "h1"),
        make_tensor('L', 'g0', 'h1'),
        make_tensor('L', 'h0', 'g0'),
        make_tensor('K', 'p0', 'p1')
    ]
    a = Term(list_of_tensors, SecondQuantizedOperator.make_empty())
    a.simplify()
    assert a.list_of_tensors == [
        make_tensor('H', 'h6', 'h6'),
        make_tensor('t', 'p1', 'h3'),
        make_tensor('t', 'h4', 'p1'),
        make_tensor('L', 'h6', 'h3'),
        make_tensor('L', 'h4', 'h6')
    ]
    assert a.sq_op == SecondQuantizedOperator.make_empty()
    assert a.indices_set == {Index(i) for i in ['h6', 'p1', 'h3', 'h4']}
    assert a.diagonal_indices == {Index('h6'): 4}

    list_of_tensors = [
        make_tensor('H', "g0", "g0"),
        make_tensor('t', "h0", "p0"),
        make_tensor('t', "p1", "c1"),
        make_tensor('L', 'g0', 'c1'),
        make_tensor('K', 'p0', 'p1')
    ]
    a = Term(list_of_tensors, SecondQuantizedOperator('h0', 'g0'))
    a.simplify()
    assert a.list_of_tensors == [
        make_tensor('H', "c2", "c2"),
        make_tensor('t', "p1", "c2"),
        make_tensor('t', "h0", "p1")
    ]
    assert a.sq_op == SecondQuantizedOperator('h0', 'c2')
    assert a.indices_set == {Index(i) for i in ['c2', 'p1', 'h0']}
    assert a.diagonal_indices == {Index('c2'): 4}
예제 #2
0
def test_canonicalize_2():
    # H^{ v_{0} }_{ v_{1} } T^{ v_{1} }_{ c_{1} } T^{ v_{2} }_{ c_{0} } T^{ c_{0} c_{1} }_{ v_{0} v_{2} }
    indices_type = 'so'
    list_of_tensors = [
        make_tensor('Hamiltonian', "v0", "v1", indices_type),
        make_tensor('cluster_amplitude', "v1", "c1", indices_type),
        make_tensor('cluster_amplitude', "v2", "c0", indices_type),
        make_tensor('cluster_amplitude', "c0,c1", "v0,v2", indices_type)
    ]
    sq_op = SecondQuantizedOperator.make_empty(indices_type)
    a = Term(list_of_tensors, sq_op)

    # H^{ v_{0} }_{ v_{1} } T^{ c_{0} }_{ v_{2} } T^{ c_{1} }_{ v_{0} } T^{ v_{1} v_{2} }_{ c_{0} c_{1} }
    list_of_tensors = [
        make_tensor('Hamiltonian', "v0", "v1", indices_type),
        make_tensor('cluster_amplitude', "c0", "v2", indices_type),
        make_tensor('cluster_amplitude', "c1", "v0", indices_type),
        make_tensor('cluster_amplitude', "v1,v2", "c0,c1", indices_type)
    ]
    b = Term(list_of_tensors, sq_op)

    ref = Term([
        make_tensor('Hamiltonian', "v1", "v0", indices_type),
        make_tensor('cluster_amplitude', "c0", "v0", indices_type),
        make_tensor('cluster_amplitude', "c1", "v2", indices_type),
        make_tensor('cluster_amplitude', "c0,c1", "v1,v2", indices_type)
    ], sq_op, -1)
    assert ref == a.canonicalize() == b.canonicalize()
예제 #3
0
def process_composite_contractions(contraction, elementary_contractions,
                                   n_indices, expand_hole, base_order_map,
                                   upper_indices_set, lower_indices_set):
    """
    Process a single composite contraction expressed in terms of indices of elementary contractions.
    :param contraction: a composite contraction
    :param elementary_contractions: a list of density cumulants / hole densities
    :param n_indices: the total number of indices
    :param expand_hole: expand hole densities to Kronecker delta minus one density if True
    :param base_order_map: the index map to ordering index, e.g., {"ug0": 0, "lg0": 1, ...}, u/l for upper/lower
    :param upper_indices_set: the set of all creation operators
    :param lower_indices_set: the set of all annihilation operators
    :return: a list of contractions in terms of (sign, list_of_densities, sq_op)
    """
    list_of_densities = []
    current_order = []

    n_open = 0
    for con in contraction:
        ele_con = elementary_contractions[con]
        list_of_densities.append(ele_con)
        n_open += ele_con.n_upper

        if isinstance(ele_con, HoleDensity):
            current_order += [
                f"l{ele_con.lower_indices[0].name}",
                f"u{ele_con.upper_indices[0].name}"
            ]
        else:
            current_order += [f"u{i.name}" for i in ele_con.upper_indices]
            current_order += [
                f"l{i.name}" for i in ele_con.lower_indices[::-1]
            ]
    n_open = n_indices - 2 * n_open

    # sort the open indices
    if n_open != 0:
        contracted = set(current_order)
        open_upper_indices = IndicesSpinOrbital(
            sorted(Index(i[1:]) for i in upper_indices_set - contracted))
        open_lower_indices = IndicesSpinOrbital(
            sorted(Index(i[1:]) for i in lower_indices_set - contracted))
        current_order += [f"u{i.name}" for i in open_upper_indices]
        current_order += [f"l{i.name}" for i in open_lower_indices[::-1]]

        sq_op = SecondQuantizedOperator(open_upper_indices, open_lower_indices)
    else:
        sq_op = SecondQuantizedOperator.make_empty()

    # expand hole densities to delta - lambda_1
    sign_densities_pairs = expand_hole_densities(
        list_of_densities) if expand_hole else [(1, list_of_densities)]

    # determine sign
    sign = (-1)**Permutation([base_order_map[i]
                              for i in current_order]).inversions()

    return [(sign * _s, list_of_densities, sq_op)
            for _s, list_of_densities in sign_densities_pairs]
예제 #4
0
def test_canonicalize_4():
    list_of_tensors = [
        make_tensor('Hamiltonian', "g0,g1,c0", "g2,p0,v0"),
        make_tensor('cluster_amplitude', "p0,p1,g3", "a0,h1,a1"),
        make_tensor('Kronecker', "v0", "p1"),
        make_tensor('cumulant', "h1", "c0"),
        make_tensor('cumulant', "a1", "g3"),
        make_tensor('cumulant', "g2,a0", "g0,g1")
    ]
    a = Term(list_of_tensors, SecondQuantizedOperator.make_empty())

    ref = Term([
        make_tensor('H', "c0,a1,a2", "p0,v0,a0"),
        make_tensor('t', "c0,a4,a5", "p0,v0,a3"),
        make_tensor('L', "a4", "a3"),
        make_tensor('L', "a1,a2", "a0,a5")
    ], SecondQuantizedOperator.make_empty())
    assert a.canonicalize() == ref
예제 #5
0
def test_latex_1():
    list_of_tensors = [
        make_tensor('H', 'v0, v1', 'c0, c1'),
        make_tensor('t', 'c0, c1', 'v0, v1')
    ]
    sq_op = SecondQuantizedOperator.make_empty()
    a = Term(list_of_tensors, sq_op, 0.25)
    assert a.latex(
    ) == "1/4 H^{ v_{0} v_{1} }_{ c_{0} c_{1} } T^{ c_{0} c_{1} }_{ v_{0} v_{1} }"
    assert a.latex(
        dollar=True
    ) == "$1/4 H^{ v_{0} v_{1} }_{ c_{0} c_{1} } T^{ c_{0} c_{1} }_{ v_{0} v_{1} }$"
예제 #6
0
def test_ambit_1():
    list_of_tensors = [
        make_tensor('H', 'v0, v1', 'c0, c1'),
        make_tensor('t', 'c0, c1', 'v0, v1')
    ]
    sq_op = SecondQuantizedOperator.make_empty()
    a = Term(list_of_tensors, sq_op, 0.25)
    assert a.ambit(
        name='X'
    ) == 'X0 += (1.0 / 4.0) * H2["v0,v1,c0,c1"] * T2["c0,c1,v0,v1"];'

    list_of_tensors = [
        make_tensor('H', 'v0, v1', 'g0, c1'),
        make_tensor('t', 'c0, c1', 'v0, v1')
    ]
    sq_op = make_sq('g0', 'c0')
    a = Term(list_of_tensors, sq_op, 0.5)
    assert a.ambit(
    ) == 'C1["c0,g0"] += (1.0 / 2.0) * H2["v0,v1,g0,c1"] * T2["c0,c1,v0,v1"];'
예제 #7
0
def process_composite_categorized(com_cat, ele_con, compatible,
                                  upper_indices_set, lower_indices_set,
                                  base_order_map, n_indices, expand_hole):
    """
    Process one composite categorized contraction.
    :param com_cat: a list of connected operator indices
    :param ele_con: an ElementaryContractionCategorized object
    :param compatible: compatible elementary contractions
    :param upper_indices_set: the set of all creation operators
    :param lower_indices_set: the set of all annihilation operators
    :param base_order_map: the Index map to ordering index
    :param n_indices: the total number of cre and ann operators
    :param expand_hole: expand hole densities to Kronecker delta minus one density if True
    :return: a list of contractions in terms of (sign, list_of_densities, sq_op)
    """
    n_open = n_indices - sum(len(i) for i in com_cat)
    out = []

    for coded_cons in ele_con.composite_contractions(Counter(com_cat),
                                                     compatible):
        contractions = [ele_con.decode(i) for i in coded_cons]

        # cre/ann ordering of the current composite contraction
        current_order = []
        for con in contractions:
            if isinstance(con, HoleDensity):
                current_order += [
                    f"l{con.lower_indices[0].name}",
                    f"u{con.upper_indices[0].name}"
                ]
            else:
                current_order += [f"u{i.name}" for i in con.upper_indices]
                current_order += [
                    f"l{i.name}" for i in con.lower_indices[::-1]
                ]

        # sort open indices
        if n_open != 0:
            contracted = set(current_order)
            open_upper = IndicesSpinOrbital(
                sorted(Index(i[1:]) for i in upper_indices_set - contracted))
            open_lower = IndicesSpinOrbital(
                sorted(Index(i[1:]) for i in lower_indices_set - contracted))
            current_order += [f"u{i.name}" for i in open_upper]
            current_order += [f"l{i.name}" for i in open_lower[::-1]]

            sq_op = SecondQuantizedOperator(open_upper, open_lower)
        else:
            sq_op = SecondQuantizedOperator.make_empty()

        # determine sign of current ordering
        sign = (-1)**Permutation([base_order_map[i]
                                  for i in current_order]).inversions()

        # expand hole density to delta - 1-cumulant
        sign_densities = expand_hole_densities(
            contractions) if expand_hole else [(1, contractions)]

        # append results
        out += [(sign * _s, cons, sq_op) for _s, cons in sign_densities]

    return out
예제 #8
0
def test_void():
    a = SecondQuantizedOperator("G2, p0, p1", "g0, A0, h2", 'si')
    b = a.void()
    assert a.indices_type == b.indices_type
    assert b.size == 0
    assert b is not SecondQuantizedOperator.make_empty('si')
예제 #9
0
def test_generate_spin_cases_2():
    from collections import defaultdict

    # -0.25 * H^{aw}_{xy} * T^{uv}_{az} * L^{xyz}_{uvw}
    a = Term([
        make_tensor('H', 'a1,a2', 'p0,a0'),
        make_tensor('t', 'a4,a5', 'p0,a3'),
        make_tensor('L', 'a1,a2,a3', 'a0,a4,a5')
    ], SecondQuantizedOperator.make_empty(), -0.25)

    spin_combined = {}
    spin_coeff = defaultdict(list)
    for term in a.generate_spin_cases_naive():
        name = term.hash_term()
        spin_coeff[name].append(term.coeff)
        spin_combined[name] = term

    for name, term in spin_combined.items():
        term.coeff = sum(spin_coeff[name])
        if abs(term.coeff) < 1.0e-15:
            spin_combined.pop(name)

    ref = {
        Term([
            make_tensor('H', 'a1,a2', 'p0,a0', 'si'),
            make_tensor('t', 'a4,a5', 'p0,a3', 'si'),
            make_tensor('L', 'a1,a2,a3', 'a0,a4,a5', 'si')
        ], SecondQuantizedOperator.make_empty('si'), -0.25),
        Term([
            make_tensor('H', 'A1,A2', 'P0,A0', 'si'),
            make_tensor('t', 'A4,A5', 'P0,A3', 'si'),
            make_tensor('L', 'A1,A2,A3', 'A0,A4,A5', 'si')
        ], SecondQuantizedOperator.make_empty('si'), -0.25),
        Term([
            make_tensor('H', 'a1,A2', 'p0,A0', 'si'),
            make_tensor('t', 'a4,a5', 'p0,a3', 'si'),
            make_tensor('L', 'a1,A2,a3', 'A0,a4,a5', 'si')
        ], SecondQuantizedOperator.make_empty('si'), -0.5).canonicalize(),
        Term([
            make_tensor('H', 'a1,a2', 'p0,a0', 'si'),
            make_tensor('t', 'a4,A5', 'p0,A3', 'si'),
            make_tensor('L', 'a1,a2,A3', 'a0,a4,A5', 'si')
        ], SecondQuantizedOperator.make_empty('si'), -0.5).canonicalize(),
        Term([
            make_tensor('H', 'A1,a2', 'P0,a0', 'si'),
            make_tensor('t', 'A4,a5', 'P0,a3', 'si'),
            make_tensor('L', 'A1,a2,a3', 'a0,A4,a5', 'si')
        ], SecondQuantizedOperator.make_empty('si'), -1).canonicalize(),
        Term([
            make_tensor('H', 'a1,A2', 'P0,a0', 'si'),
            make_tensor('t', 'A4,A5', 'P0,A3', 'si'),
            make_tensor('L', 'a1,A2,A3', 'a0,A4,A5', 'si')
        ], SecondQuantizedOperator.make_empty('si'), -0.5).canonicalize(),
        Term([
            make_tensor('H', 'A1,A2', 'P0,A0', 'si'),
            make_tensor('t', 'A4,a5', 'P0,a3', 'si'),
            make_tensor('L', 'A1,A2,a3', 'A0,A4,a5', 'si')
        ], SecondQuantizedOperator.make_empty('si'), -0.5).canonicalize(),
        Term([
            make_tensor('H', 'a1,A2', 'p0,A0', 'si'),
            make_tensor('t', 'a4,A5', 'p0,A3', 'si'),
            make_tensor('L', 'a1,A2,A3', 'A0,a4,A5', 'si')
        ], SecondQuantizedOperator.make_empty('si'), -1).canonicalize()
    }

    for i in spin_combined.values():
        assert i in ref
    assert len(spin_combined) == len(ref)